ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 13
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 78
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 93
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
16498d21-1d10-4b45-ae7a-9b43a041a5b6 | cpp | tensorflow/tensorflow | memory_space_propagation | third_party/xla/xla/service/memory_space_propagation.cc | third_party/xla/xla/service/memory_space_propagation_test.cc | #include "xla/service/memory_space_propagation.h"
#include <cstdint>
#include "xla/shape.h"
#include "xla/shape_util.h"
namespace xla {
absl::StatusOr<bool> MemorySpacePropagation::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool modified = false;
TF_ASSIGN_OR_RETURN(auto dataflow_analysis,
HloDataflowAnalysis::Run(*module, false,
true));
dataflow_analysis_ = std::move(dataflow_analysis);
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kFusion) {
for (int operand_idx = 0;
operand_idx < instruction->fused_parameters().size();
++operand_idx) {
ShapeUtil::ForEachLeafShape(
instruction->operand(operand_idx)->shape(),
[&](const Shape& sub_shape, const ShapeIndex& index) {
int64_t memory_space = sub_shape.layout().memory_space();
modified |=
Propagate(index, instruction->fused_parameter(operand_idx),
memory_space);
});
}
ShapeUtil::ForEachLeafShape(
instruction->shape(),
[&](const Shape& sub_shape, const ShapeIndex& index) {
int64_t memory_space = sub_shape.layout().memory_space();
modified |= Propagate(index, instruction->fused_expression_root(),
memory_space);
});
}
}
}
return modified;
}
bool MemorySpacePropagation::Propagate(ShapeIndexView index,
const HloInstruction* callee_instruction,
int64_t memory_space) const {
bool modified = false;
const HloValue& value = dataflow_analysis_->GetUniqueValueAt(
callee_instruction, ShapeIndex(index));
for (const HloPosition& position : value.positions()) {
HloInstruction* instruction = position.instruction;
Shape* shape = ShapeUtil::GetMutableSubshape(instruction->mutable_shape(),
position.index);
if (shape->layout().memory_space() == memory_space) {
continue;
}
shape->mutable_layout()->set_memory_space(memory_space);
modified = true;
if (instruction->opcode() == HloOpcode::kFusion) {
Propagate(position.index, instruction->fused_expression_root(),
memory_space);
}
const HloInstruction* parent_fusion =
instruction->parent()->FusionInstruction();
if (instruction == instruction->parent()->root_instruction() &&
parent_fusion->parent()->IsFusionComputation()) {
Propagate(position.index, parent_fusion, memory_space);
}
if (instruction->opcode() == HloOpcode::kParameter &&
parent_fusion->parent()->IsFusionComputation()) {
const HloInstruction* fusion_operand =
parent_fusion->operand(instruction->parameter_number());
Propagate(position.index, fusion_operand, memory_space);
}
}
for (const HloUse& use : value.GetUses()) {
if (use.instruction->opcode() == HloOpcode::kFusion) {
modified |= Propagate(
use.operand_index,
use.instruction->fused_parameter(use.operand_number), memory_space);
}
}
return modified;
}
} | #include "xla/service/memory_space_propagation.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
class MemorySpacePropagationTest : public HloTestBase {
public:
MemorySpacePropagationTest()
: HloTestBase(),
verifier_(false, false) {
}
absl::Status Verify(HloModule* module) {
return verifier_.Run(module).status();
}
private:
HloVerifier verifier_;
};
TEST_F(MemorySpacePropagationTest, NoMemorySpace) {
absl::string_view hlo_string = R"(
HloModule NoMemorySpace
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)} parameter(0)
ROOT %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)} copy(%param2)
%fusion = s32[6]{0:T(128)} fusion(s32[6]{0:T(128)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)} %arg2), kind=kLoop, calls=%fused_computation
ROOT %root = s32[6]{0:T(128)} copy(%fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
MemorySpacePropagation memory_space_propagation;
EXPECT_FALSE(memory_space_propagation.Run(module.get()).value());
TF_ASSERT_OK_AND_ASSIGN(auto ref, ParseAndReturnVerifiedModule(hlo_string));
EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));
}
TEST_F(MemorySpacePropagationTest, NonTupleOutput) {
absl::string_view hlo_string = R"(
HloModule NonTupleOutput
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)} parameter(0)
ROOT %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = s32[6]{0:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
ROOT %root = s32[6]{0:T(128)} copy(%fusion)
}
)";
absl::string_view expected_hlo_string = R"(
HloModule NonTupleOutput
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)S(1)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)S(1)} parameter(0)
ROOT %add.0 = s32[6]{0:T(128)S(1)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = s32[6]{0:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
ROOT %root = s32[6]{0:T(128)} copy(%fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
MemorySpacePropagation memory_space_propagation;
EXPECT_TRUE(memory_space_propagation.Run(module.get()).value());
TF_EXPECT_OK(Verify(module.get()));
TF_ASSERT_OK_AND_ASSIGN(auto ref,
ParseAndReturnVerifiedModule(expected_hlo_string));
EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));
}
TEST_F(MemorySpacePropagationTest, TupleOutput) {
absl::string_view hlo_string = R"(
HloModule TupleOutput
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)} parameter(0)
%add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
%multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
ROOT %tuple = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) tuple(%add.0, %multiply.0)
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = (s32[6]{0:T(128)S(1)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
%gte0 = s32[6]{0:T(128)S(1)} get-tuple-element(%fusion), index=0
%gte1 = s32[6]{0:T(128)} get-tuple-element(%fusion), index=1
ROOT %root = s32[6]{0:T(128)} add(%gte0, %gte1)
}
)";
absl::string_view expected_hlo_string = R"(
HloModule TupleOutput
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)S(1)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)S(1)} parameter(0)
%add.0 = s32[6]{0:T(128)S(1)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
%multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
ROOT %tuple = (s32[6]{0:T(128)S(1)}, s32[6]{0:T(128)}) tuple(%add.0, %multiply.0)
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = (s32[6]{0:T(128)S(1)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
%gte0 = s32[6]{0:T(128)S(1)} get-tuple-element(%fusion), index=0
%gte1 = s32[6]{0:T(128)} get-tuple-element(%fusion), index=1
ROOT %root = s32[6]{0:T(128)} add(%gte0, %gte1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
MemorySpacePropagation memory_space_propagation;
EXPECT_TRUE(memory_space_propagation.Run(module.get()).value());
TF_EXPECT_OK(Verify(module.get()));
TF_ASSERT_OK_AND_ASSIGN(auto ref,
ParseAndReturnVerifiedModule(expected_hlo_string));
EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));
}
TEST_F(MemorySpacePropagationTest, NestedInputFusion) {
absl::string_view hlo_string = R"(
HloModule NestedFusion
%bitcast_fusion {
%bf_param = s32[3,2]{0,1:T(128)} parameter(0)
ROOT %bitcast = s32[6]{0:T(128)} bitcast(%bf_param)
}
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[3,2]{0,1:T(128)} parameter(0)
%fusion.1 = s32[6]{0:T(128)} fusion(%param_0.1), kind=kLoop, calls=bitcast_fusion
ROOT %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %fusion.1)
}
ENTRY %entry {
%param0 = s32[3,2]{0,1:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[3,2]{0,1:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = s32[6]{0:T(128)S(1)} fusion(s32[3,2]{0,1:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
ROOT %root = s32[6]{0:T(128)} copy(%fusion)
}
)";
absl::string_view expected_hlo_string = R"(
HloModule NestedFusion
%bitcast_fusion {
%bf_param = s32[3,2]{0,1:T(128)S(1)} parameter(0)
ROOT %bitcast = s32[6]{0:T(128)} bitcast(%bf_param)
}
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)S(1)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[3,2]{0,1:T(128)S(1)} parameter(0)
%fusion.1 = s32[6]{0:T(128)} fusion(%param_0.1), kind=kLoop, calls=bitcast_fusion
ROOT %add.0 = s32[6]{0:T(128)S(1)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %fusion.1)
}
ENTRY %entry {
%param0 = s32[3,2]{0,1:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[3,2]{0,1:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = s32[6]{0:T(128)S(1)} fusion(s32[3,2]{0,1:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
ROOT %root = s32[6]{0:T(128)} copy(%fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
MemorySpacePropagation memory_space_propagation;
EXPECT_TRUE(memory_space_propagation.Run(module.get()).value());
TF_EXPECT_OK(Verify(module.get()));
TF_ASSERT_OK_AND_ASSIGN(auto ref,
ParseAndReturnVerifiedModule(expected_hlo_string));
EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));
}
TEST_F(MemorySpacePropagationTest, NestedOutputFusion) {
absl::string_view hlo_string = R"(
HloModule NestedFusion
%bitcast_fusion {
%bf_param = s32[6]{0:T(128)} parameter(0)
ROOT %bitcast = s32[3,2]{0,1:T(128)} bitcast(%bf_param)
}
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)} parameter(0)
%add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
ROOT %fusion.1 = s32[3,2]{0,1:T(128)} fusion(%add.0), kind=kLoop, calls=bitcast_fusion
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = s32[3,2]{0,1:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
ROOT %root = s32[3,2]{0,1:T(128)} copy(%fusion)
}
)";
absl::string_view expected_hlo_string = R"(
HloModule NestedFusion
%bitcast_fusion {
%bf_param = s32[6]{0:T(128)} parameter(0)
ROOT %bitcast = s32[3,2]{0,1:T(128)S(1)} bitcast(%bf_param)
}
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)S(1)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)S(1)} parameter(0)
%add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)S(1)} %param_0.1)
ROOT %fusion.1 = s32[3,2]{0,1:T(128)S(1)} fusion(%add.0), kind=kLoop, calls=bitcast_fusion
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = s32[3,2]{0,1:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
ROOT %root = s32[3,2]{0,1:T(128)} copy(%fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
MemorySpacePropagation memory_space_propagation;
EXPECT_TRUE(memory_space_propagation.Run(module.get()).value());
TF_EXPECT_OK(Verify(module.get()));
TF_ASSERT_OK_AND_ASSIGN(auto ref,
ParseAndReturnVerifiedModule(expected_hlo_string));
EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));
}
TEST_F(MemorySpacePropagationTest, BitcastInFusion) {
absl::string_view hlo_string = R"(
HloModule TupleOutput
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)} parameter(0)
%bitcast.0 = s32[6]{0:T(128)} bitcast(s32[6]{0:T(128)} %param_0.1)
%multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
ROOT %tuple = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) tuple(%bitcast.0, %multiply.0)
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
ROOT %fusion = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
}
)";
absl::string_view expected_hlo_string = R"(
HloModule TupleOutput
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)S(1)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)S(1)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)S(1)} parameter(0)
%bitcast.0 = s32[6]{0:T(128)} bitcast(s32[6]{0:T(128)S(1)} %param_0.1)
%multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)S(1)} %param_0.1)
ROOT %tuple = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) tuple(%bitcast.0, %multiply.0)
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
ROOT %fusion = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
MemorySpacePropagation memory_space_propagation;
EXPECT_TRUE(memory_space_propagation.Run(module.get()).value());
TF_EXPECT_OK(Verify(module.get()));
TF_ASSERT_OK_AND_ASSIGN(auto ref,
ParseAndReturnVerifiedModule(expected_hlo_string));
EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_propagation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_propagation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c7a66d7d-dadc-40c4-8d3c-c32222c567aa | cpp | tensorflow/tensorflow | xla_debug_info_manager | third_party/xla/xla/service/xla_debug_info_manager.cc | third_party/xla/xla/service/xla_debug_info_manager_test.cc | #include "xla/service/xla_debug_info_manager.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/synchronization/mutex.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_proto_util.h"
namespace xla {
void XlaDebugInfoManager::RegisterModule(
std::shared_ptr<const HloModule> hlo_module,
BufferAssignmentProto buffer_assignment) {
CHECK(hlo_module != nullptr);
absl::MutexLock lock(&mutex_);
auto result = modules_.try_emplace(hlo_module->unique_id());
CHECK(result.second);
XlaModuleEntry& m = result.first->second;
m.hlo_module = std::move(hlo_module);
m.buffer_assignment = std::move(buffer_assignment);
m.active = true;
}
void XlaDebugInfoManager::UnregisterModule(ModuleIdentifier module_id) {
absl::MutexLock lock(&mutex_);
auto it = modules_.find(module_id);
CHECK(it != modules_.end());
if (!tracing_active_) {
modules_.erase(it);
} else {
XlaModuleEntry& m = it->second;
m.active = false;
}
}
void XlaDebugInfoManager::StartTracing() {
absl::MutexLock lock(&mutex_);
tracing_active_ = true;
}
void XlaDebugInfoManager::StopTracing(
std::vector<std::unique_ptr<HloProto>>* module_debug_info) {
std::vector<XlaModuleEntry> modules_to_serialize;
{
absl::MutexLock lock(&mutex_);
if (!tracing_active_) return;
tracing_active_ = false;
modules_to_serialize.reserve(modules_.size());
for (auto it = modules_.begin(); it != modules_.end();) {
auto& m = it->second;
auto cur_it = it++;
if (!m.active) {
modules_to_serialize.emplace_back(std::move(m));
modules_.erase(cur_it);
} else {
modules_to_serialize.emplace_back(m);
}
}
}
if (module_debug_info) {
module_debug_info->clear();
for (const auto& m : modules_to_serialize) {
auto hlo_proto = std::make_unique<HloProto>(MakeHloProto(*m.hlo_module));
*hlo_proto->mutable_buffer_assignment() = m.buffer_assignment;
module_debug_info->emplace_back(std::move(hlo_proto));
}
}
}
bool XlaDebugInfoManager::TracksModule(ModuleIdentifier module_id) const {
absl::MutexLock lock(&mutex_);
return modules_.find(module_id) != modules_.end();
}
} | #include "xla/service/xla_debug_info_manager.h"
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/synchronization/mutex.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
class XlaDebugInfoManagerTestPeer {
public:
void RegisterModule(std::shared_ptr<const HloModule> hlo_module,
BufferAssignmentProto buffer_assignment) {
return xla_debug_info_manager_.RegisterModule(hlo_module,
std::move(buffer_assignment));
}
void UnregisterModule(ModuleIdentifier module_id) {
return xla_debug_info_manager_.UnregisterModule(module_id);
}
void StartTracing() { return xla_debug_info_manager_.StartTracing(); }
absl::flat_hash_set<ModuleIdentifier> StopTracing() {
std::vector<std::unique_ptr<HloProto>> module_debug_info;
xla_debug_info_manager_.StopTracing(&module_debug_info);
absl::flat_hash_set<ModuleIdentifier> module_ids;
for (const auto& hlo_proto : module_debug_info) {
module_ids.insert(hlo_proto->hlo_module().id());
}
return module_ids;
}
absl::flat_hash_set<ModuleIdentifier> GetModuleIds() {
absl::flat_hash_set<ModuleIdentifier> module_ids;
absl::MutexLock lock(&xla_debug_info_manager_.mutex_);
for (const auto& it : xla_debug_info_manager_.modules_) {
module_ids.insert(it.first);
}
return module_ids;
}
private:
XlaDebugInfoManager xla_debug_info_manager_;
};
namespace {
using ::testing::IsEmpty;
using ::testing::UnorderedElementsAre;
class XlaDebugInfoManagerTest : public HloTestBase {
protected:
struct DebugMetadata {
ModuleIdentifier unique_id;
std::shared_ptr<HloModule> module;
};
ModuleIdentifier RegisterProgram(const std::string& module_name) {
DebugMetadata debug_info;
HloModuleConfig config;
debug_info.module = std::make_shared<HloModule>(module_name, config);
ModuleIdentifier unique_id = debug_info.module->unique_id();
debug_info.unique_id = unique_id;
xla_debug_info_manager_.RegisterModule(debug_info.module,
BufferAssignmentProto());
external_references_.push_back(std::move(debug_info));
return unique_id;
}
void UnregisterProgram(ModuleIdentifier unique_id) {
for (int i = 0; i < external_references_.size(); i++) {
if (external_references_[i].unique_id == unique_id) {
xla_debug_info_manager_.UnregisterModule(unique_id);
external_references_.erase(external_references_.begin() + i);
break;
}
}
}
absl::flat_hash_set<ModuleIdentifier> GetModuleIds() {
return xla_debug_info_manager_.GetModuleIds();
}
void StartTrace() { xla_debug_info_manager_.StartTracing(); }
absl::flat_hash_set<ModuleIdentifier> StopTrace() {
return xla_debug_info_manager_.StopTracing();
}
std::vector<DebugMetadata> external_references_;
XlaDebugInfoManagerTestPeer xla_debug_info_manager_;
};
TEST_F(XlaDebugInfoManagerTest, NoTraceBasic) {
auto program0 = RegisterProgram("program0");
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0));
auto program1 = RegisterProgram("program1");
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0, program1));
UnregisterProgram(program0);
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program1));
UnregisterProgram(program1);
EXPECT_TRUE(GetModuleIds().empty());
}
TEST_F(XlaDebugInfoManagerTest, NoTraceDuplicateIds) {
auto program0A = RegisterProgram("program0");
auto program0B = RegisterProgram("program0");
auto program1 = RegisterProgram("program1");
EXPECT_THAT(GetModuleIds(),
UnorderedElementsAre(program0A, program0B, program1));
UnregisterProgram(program1);
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0A, program0B));
UnregisterProgram(program0A);
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0B));
UnregisterProgram(program0B);
EXPECT_THAT(GetModuleIds(), IsEmpty());
}
TEST_F(XlaDebugInfoManagerTest, ActiveTrace) {
auto program0A = RegisterProgram("program0");
auto program0B = RegisterProgram("program0");
auto program1 = RegisterProgram("program1");
StartTrace();
auto program2 = RegisterProgram("program2");
EXPECT_THAT(StopTrace(),
UnorderedElementsAre(program0A, program0B, program1, program2));
StartTrace();
EXPECT_THAT(StopTrace(),
UnorderedElementsAre(program0A, program0B, program1, program2));
UnregisterProgram(program2);
EXPECT_THAT(GetModuleIds(),
UnorderedElementsAre(program0A, program0B, program1));
UnregisterProgram(program0A);
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0B, program1));
UnregisterProgram(program0B);
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program1));
UnregisterProgram(program1);
EXPECT_THAT(GetModuleIds(), IsEmpty());
}
TEST_F(XlaDebugInfoManagerTest, UnregisterDuringTrace) {
auto program0A = RegisterProgram("program0");
auto program0B = RegisterProgram("program0");
auto program1 = RegisterProgram("program1");
StartTrace();
UnregisterProgram(program1);
UnregisterProgram(program0B);
EXPECT_THAT(StopTrace(),
UnorderedElementsAre(program0A, program0B, program1));
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0A));
UnregisterProgram(program0A);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/xla_debug_info_manager.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/xla_debug_info_manager_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
04ba64e2-bf21-4b89-ab30-0f9608f75f9e | cpp | tensorflow/tensorflow | convert_operand_folding | third_party/xla/xla/service/convert_operand_folding.cc | third_party/xla/xla/service/convert_operand_folding_test.cc | #include "xla/service/convert_operand_folding.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
bool IsUpcastConvert(const HloInstruction* hlo) {
if (!hlo->shape().IsArray()) {
return false;
}
switch (hlo->opcode()) {
case HloOpcode::kDynamicSlice:
case HloOpcode::kGather:
case HloOpcode::kReshape:
case HloOpcode::kSlice:
case HloOpcode::kTranspose: {
return IsUpcastConvert(hlo->operand(0));
}
case HloOpcode::kReduce: {
if (ShapeUtil::ElementsIn(hlo->shape()) ==
ShapeUtil::ElementsIn(hlo->operand(0)->shape())) {
return IsUpcastConvert(hlo->operand(0));
}
return false;
}
case HloOpcode::kConvert:
return primitive_util::CastPreservesValues(
hlo->operand(0)->shape().element_type(), hlo->shape().element_type());
default:
return false;
}
}
HloInstruction* EffectiveOperand(HloInstruction* hlo) {
switch (hlo->opcode()) {
case HloOpcode::kBroadcast:
case HloOpcode::kDynamicSlice:
case HloOpcode::kGather:
case HloOpcode::kReshape:
case HloOpcode::kSlice:
case HloOpcode::kTranspose: {
HloInstruction* operand = EffectiveOperand(hlo->mutable_operand(0));
HloInstruction* clone = hlo->AddInstruction(hlo->Clone());
*(clone->mutable_shape()) = ShapeUtil::ChangeElementType(
clone->shape(), operand->shape().element_type());
clone->ReplaceOperandWithDifferentShape(0, operand).IgnoreError();
return clone;
}
case HloOpcode::kReduce: {
HloInstruction* operand = EffectiveOperand(hlo->mutable_operand(0));
return hlo->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::ChangeElementType(hlo->shape(),
operand->shape().element_type()),
operand));
}
case HloOpcode::kConvert:
return hlo->mutable_operand(0);
default:
return nullptr;
}
}
}
bool ConvertOperandFolding::InstructionMatchesPattern(
HloInstruction* instruction) {
if (instruction->opcode() != HloOpcode::kDot &&
instruction->opcode() != HloOpcode::kConvolution) {
return false;
}
for (auto* operand : instruction->operands()) {
if (IsUpcastConvert(operand)) {
return true;
}
}
return false;
}
absl::StatusOr<HloInstruction*> ConvertOperandFolding::ExpandInstruction(
HloInstruction* instruction) {
for (int i = 0; i < instruction->operand_count(); ++i) {
auto* operand = instruction->mutable_operand(i);
if (IsUpcastConvert(operand)) {
TF_RETURN_IF_ERROR(instruction->ReplaceOperandWithDifferentShape(
i, EffectiveOperand(operand)));
}
}
return nullptr;
}
} | #include "xla/service/convert_operand_folding.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
using ConvertOperandFoldingTest = HloTestBase;
TEST_F(ConvertOperandFoldingTest, IntegralUpcastConvertFolded) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = s8[2,3]{1,0} parameter(0)
p1 = s16[3,2]{0,1} parameter(1)
c0 = s16[2,3]{1,0} convert(p0)
c1 = s16[3,2]{0,1} convert(p1)
ROOT dot = s16[2,2]{1,0} dot(c0, c1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool folded,
ConvertOperandFolding().Run(module.get()));
EXPECT_TRUE(folded);
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(op::Dot(op::Parameter(0), op::Parameter(1)),
op::Shape("s16[2,2]{1,0}")));
}
TEST_F(ConvertOperandFoldingTest, FloatingUpcastConvertFolded) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = f16[2,3]{1,0} parameter(0)
p1 = bf16[3,2]{0,1} parameter(1)
c0 = f32[2,3]{1,0} convert(p0)
c1 = f32[3,2]{0,1} convert(p1)
ROOT dot = f32[2,2]{1,0} dot(c0, c1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool folded,
ConvertOperandFolding().Run(module.get()));
EXPECT_TRUE(folded);
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(op::Dot(op::Parameter(0), op::Parameter(1)),
op::Shape("f32[2,2]{1,0}")));
}
TEST_F(ConvertOperandFoldingTest, IntegralToFloatingConvertFolded) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = s8[2,3]{1,0} parameter(0)
p1 = s16[3,2]{0,1} parameter(1)
c0 = f16[2,3]{1,0} convert(p0)
c1 = f32[3,2]{0,1} convert(p1)
ROOT dot = f32[2,2]{1,0} dot(c0, c1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool folded,
ConvertOperandFolding().Run(module.get()));
EXPECT_TRUE(folded);
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(op::Dot(op::Parameter(0), op::Parameter(1)),
op::Shape("f32[2,2]{1,0}")));
}
TEST_F(ConvertOperandFoldingTest, DowncastConvertNotFolded) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = s32[2,3]{1,0} parameter(0)
p1 = s16[3,2]{0,1} parameter(1)
c0 = s16[2,3]{1,0} convert(p0)
c1 = s8[3,2]{0,1} convert(p1)
ROOT dot = s16[2,2]{1,0} dot(c0, c1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool folded,
ConvertOperandFolding().Run(module.get()));
EXPECT_FALSE(folded);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
AllOf(
op::Dot(
AllOf(op::Convert(op::Parameter(0)), op::Shape("s16[2,3]{1,0}")),
AllOf(op::Convert(op::Parameter(1)), op::Shape("s8[3,2]{0,1}"))),
op::Shape("s16[2,2]{1,0}")));
}
TEST_F(ConvertOperandFoldingTest, OneOperandFolded) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = s8[2,3]{1,0} parameter(0)
p1 = s16[3,2]{0,1} parameter(1)
c0 = s16[2,3]{1,0} convert(p0)
c1 = s8[3,2]{0,1} convert(p1)
ROOT dot = s16[2,2]{1,0} dot(c0, c1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool folded,
ConvertOperandFolding().Run(module.get()));
EXPECT_TRUE(folded);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
AllOf(op::Dot(op::Parameter(0), AllOf(op::Convert(op::Parameter(1)),
op::Shape("s8[3,2]{0,1}"))),
op::Shape("s16[2,2]{1,0}")));
}
TEST_F(ConvertOperandFoldingTest, FoldedWithFormatting) {
absl::string_view module_string = R"(
HloModule module
sum {
a = s16[] parameter(0)
b = s16[] parameter(1)
ROOT r = add(a,b)
}
ENTRY main {
p0 = s8[3,10] parameter(0)
c0 = s16[3,10] convert(p0)
r0 = s16[3,2,5] reshape(c0)
t0 = s16[2,5,3] transpose(r0), dimensions={1,2,0}
s0 = s16[2,1,3] slice(t0), slice={[0:2], [2:3], [0:3]}
rs0 = s16[2,3] reshape(s0)
p1 = s8[3,1,2] parameter(1)
c1 = s16[3,1,2] convert(p1)
r1 = s16[1,3,2] transpose(c1), dimensions={1,0,2}
z = s16[] constant(0)
rr1 = s16[3,2] reduce(r1,z), dimensions={0}, to_apply=sum
ROOT dot = s16[2,2] dot(rs0, rr1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool folded,
ConvertOperandFolding().Run(module.get()));
EXPECT_TRUE(folded);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Dot(
op::Reshape(op::Slice(op::Transpose(op::Reshape(op::Parameter(0))))),
op::Reshape(op::Transpose(op::Parameter(1)))));
}
TEST_F(ConvertOperandFoldingTest, FoldedWithDSAndGather) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = s8[100,3] parameter(0)
c0 = s16[100,3] convert(p0)
ids = s32[20] parameter(2)
g = s16[20,3] gather(c0, ids), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,3}
t = s16[3,20] transpose(g), dimensions={1,0}
p1 = s8[25,3] parameter(1)
c1 = s16[25,3] convert(p1)
z = s32[] constant(0)
s = s32[] parameter(3)
ds = s16[20,3] dynamic-slice(c1, s, z), dynamic_slice_sizes={20,3}
ROOT dot = s16[3,3] dot(t, ds), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool folded,
ConvertOperandFolding().Run(module.get()));
EXPECT_TRUE(folded);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Dot(op::Transpose(op::Gather(op::Parameter(0), op::Parameter(2))),
op::DynamicSlice(op::Parameter(1), op::Parameter(3),
op::Constant())));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convert_operand_folding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convert_operand_folding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
31d6bc83-735f-44e1-8e06-a62edd6d24b3 | cpp | tensorflow/tensorflow | convert_memory_placement_to_internal_annotations | third_party/xla/xla/service/convert_memory_placement_to_internal_annotations.cc | third_party/xla/xla/service/convert_memory_placement_to_internal_annotations_test.cc | #include "xla/service/convert_memory_placement_to_internal_annotations.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/side_effect_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
absl::StatusOr<bool> ConvertMemoryPlacementToInternalAnnotations::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* c : module->MakeNonfusionComputations()) {
for (HloInstruction* instruction : c->MakeInstructionPostOrder()) {
if (instruction->IsCustomCall(
host_memory_offload_annotations::kDevicePlacement)) {
const auto& frontend_attributes = instruction->frontend_attributes();
const auto it = frontend_attributes.map().find(kXlaBufferPlacementAttr);
if (it == frontend_attributes.map().end()) {
continue;
}
const bool is_to_host_case =
(it->second ==
host_memory_offload_annotations::kMemoryTargetPinnedHost ||
it->second ==
host_memory_offload_annotations::kMemoryTargetUnpinnedHost);
const bool is_to_device_case =
(it->second ==
host_memory_offload_annotations::kMemoryTargetDevice);
if (!is_to_host_case && !is_to_device_case) {
continue;
}
if (is_to_host_case) {
VLOG(1) << "Process forward case: " << instruction->ToString();
if (instruction->operand_count() != 1) {
return Internal(
"Custom calls with target %s must have exactly one operand. %s "
"has %d.",
host_memory_offload_annotations::kDevicePlacement,
instruction->name(), instruction->operand_count());
}
HloInstruction* input = instruction->mutable_operand(0);
HloInstruction* move_to_host_custom_call =
c->AddInstruction(HloInstruction::CreateCustomCall(
input->shape(), {input},
host_memory_offload_annotations::
kMoveToHostCustomCallTarget));
if (instruction->has_sharding()) {
move_to_host_custom_call->set_sharding(instruction->sharding());
}
TF_RETURN_IF_ERROR(
instruction->ReplaceAllUsesWith(move_to_host_custom_call));
TF_RETURN_IF_ERROR(
c->RemoveInstructionAndUnusedOperands(instruction));
changed = true;
} else if (is_to_device_case) {
VLOG(1) << "Process backward case: " << instruction->ToString();
HloInstruction* custom_call_operand = instruction->mutable_operand(0);
HloInstruction* new_result =
c->AddInstruction(HloInstruction::CreateCustomCall(
custom_call_operand->shape(), {custom_call_operand},
host_memory_offload_annotations::
kMoveToDeviceCustomCallTarget));
TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(new_result));
TF_RETURN_IF_ERROR(
c->RemoveInstructionAndUnusedOperands(instruction));
changed = true;
}
}
}
}
return changed;
}
} | #include "xla/service/convert_memory_placement_to_internal_annotations.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class ConvertMemoryPlacementToInternalAnnotationsTest : public HloTestBase {
public:
ConvertMemoryPlacementToInternalAnnotationsTest() = default;
};
TEST_F(ConvertMemoryPlacementToInternalAnnotationsTest, ConvertPinnedHostTest) {
const char* hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[16]{0})->f32[16]{0}}
region_0.9 {
arg_tuple.10 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.11 = s32[] get-tuple-element(arg_tuple.10), index=0
constant.15 = s32[] constant(1)
add.33 = s32[] add(get-tuple-element.11, constant.15)
get-tuple-element.12 = f32[16]{0} get-tuple-element(arg_tuple.10), index=1
sine.18 = f32[16]{0} sine(get-tuple-element.12)
sine.19 = f32[16]{0} sine(sine.18)
sine.20 = f32[16]{0} sine(sine.19)
get-tuple-element.13 = f32[16,16]{1,0} get-tuple-element(arg_tuple.10), index=2
custom-call.21 = f32[16]{0} custom-call(sine.19), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="pinned_host"}
reshape.23 = f32[1,16]{1,0} reshape(custom-call.21)
constant.17 = s32[] constant(0)
compare.24 = pred[] compare(get-tuple-element.11, constant.17), direction=LT
constant.16 = s32[] constant(16)
add.25 = s32[] add(get-tuple-element.11, constant.16)
select.26 = s32[] select(compare.24, add.25, get-tuple-element.11)
dynamic-update-slice.27 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.13, reshape.23, select.26, constant.17)
get-tuple-element.14 = f32[16,16]{1,0} get-tuple-element(arg_tuple.10), index=3
custom-call.22 = f32[16]{0} custom-call(sine.20), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="pinned_host"}
reshape.28 = f32[1,16]{1,0} reshape(custom-call.22)
compare.29 = pred[] compare(get-tuple-element.11, constant.17), direction=LT
add.30 = s32[] add(get-tuple-element.11, constant.16)
select.31 = s32[] select(compare.29, add.30, get-tuple-element.11)
dynamic-update-slice.32 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.14, reshape.28, select.31, constant.17)
ROOT tuple.34 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(add.33, sine.20, dynamic-update-slice.27, dynamic-update-slice.32)
}
region_1.35 {
arg_tuple.36 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.38 = f32[16]{0} get-tuple-element(arg_tuple.36), index=1
get-tuple-element.39 = f32[16,16]{1,0} get-tuple-element(arg_tuple.36), index=2
get-tuple-element.40 = f32[16,16]{1,0} get-tuple-element(arg_tuple.36), index=3
get-tuple-element.37 = s32[] get-tuple-element(arg_tuple.36), index=0
constant.41 = s32[] constant(16)
ROOT compare.42 = pred[] compare(get-tuple-element.37, constant.41), direction=LT
}
core_closed_call.43 {
constant.47 = s32[] constant(0)
Arg_0.44 = f32[16]{0} parameter(0)
constant.45 = f32[] constant(0)
broadcast.46 = f32[16,16]{1,0} broadcast(constant.45), dimensions={}
tuple.48 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(constant.47, Arg_0.44, broadcast.46, broadcast.46)
while.49 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) while(tuple.48), condition=region_1.35, body=region_0.9
get-tuple-element.50 = s32[] get-tuple-element(while.49), index=0
get-tuple-element.51 = f32[16]{0} get-tuple-element(while.49), index=1
get-tuple-element.52 = f32[16,16]{1,0} get-tuple-element(while.49), index=2
get-tuple-element.53 = f32[16,16]{1,0} get-tuple-element(while.49), index=3
ROOT tuple.54 = (f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(get-tuple-element.52, get-tuple-element.53)
}
region_2.65 {
arg_tuple.66 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.67 = s32[] get-tuple-element(arg_tuple.66), index=0
constant.74 = s32[] constant(1)
add.108 = s32[] add(get-tuple-element.67, constant.74)
get-tuple-element.73 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=6
constant.76 = s32[] constant(0)
compare.82 = pred[] compare(get-tuple-element.67, constant.76), direction=LT
constant.75 = s32[] constant(16)
add.83 = s32[] add(get-tuple-element.67, constant.75)
select.84 = s32[] select(compare.82, add.83, get-tuple-element.67)
dynamic-slice.85 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.73, select.84, constant.76), dynamic_slice_sizes={1,16}
reshape.86 = f32[16]{0} reshape(dynamic-slice.85)
custom-call.87 = f32[16]{0} custom-call(reshape.86), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="device"}
get-tuple-element.69 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=2
get-tuple-element.68 = f32[16]{0} get-tuple-element(arg_tuple.66), index=1
cosine.88 = f32[16]{0} cosine(get-tuple-element.68)
reshape.93 = f32[1,16]{1,0} reshape(cosine.88)
compare.94 = pred[] compare(get-tuple-element.67, constant.76), direction=LT
add.95 = s32[] add(get-tuple-element.67, constant.75)
select.96 = s32[] select(compare.94, add.95, get-tuple-element.67)
dynamic-update-slice.97 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.69, reshape.93, select.96, constant.76)
get-tuple-element.70 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=3
sine.89 = f32[16]{0} sine(get-tuple-element.68)
cosine.90 = f32[16]{0} cosine(sine.89)
reshape.98 = f32[1,16]{1,0} reshape(cosine.90)
compare.99 = pred[] compare(get-tuple-element.67, constant.76), direction=LT
add.100 = s32[] add(get-tuple-element.67, constant.75)
select.101 = s32[] select(compare.99, add.100, get-tuple-element.67)
dynamic-update-slice.102 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.70, reshape.98, select.101, constant.76)
get-tuple-element.71 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=4
get-tuple-element.72 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=5
compare.77 = pred[] compare(get-tuple-element.67, constant.76), direction=LT
add.78 = s32[] add(get-tuple-element.67, constant.75)
select.79 = s32[] select(compare.77, add.78, get-tuple-element.67)
dynamic-slice.80 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.72, select.79, constant.76), dynamic_slice_sizes={1,16}
reshape.81 = f32[16]{0} reshape(dynamic-slice.80)
custom-call.91 = f32[16]{0} custom-call(reshape.81), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="device"}
cosine.92 = f32[16]{0} cosine(custom-call.91)
reshape.103 = f32[1,16]{1,0} reshape(cosine.92)
compare.104 = pred[] compare(get-tuple-element.67, constant.76), direction=LT
add.105 = s32[] add(get-tuple-element.67, constant.75)
select.106 = s32[] select(compare.104, add.105, get-tuple-element.67)
dynamic-update-slice.107 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.71, reshape.103, select.106, constant.76)
ROOT tuple.109 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(add.108, custom-call.87, dynamic-update-slice.97, dynamic-update-slice.102, dynamic-update-slice.107, get-tuple-element.72, get-tuple-element.73)
}
region_3.110 {
arg_tuple.111 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.113 = f32[16]{0} get-tuple-element(arg_tuple.111), index=1
get-tuple-element.114 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=2
get-tuple-element.115 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=3
get-tuple-element.116 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=4
get-tuple-element.117 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=5
get-tuple-element.118 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=6
get-tuple-element.112 = s32[] get-tuple-element(arg_tuple.111), index=0
constant.119 = s32[] constant(16)
ROOT compare.120 = pred[] compare(get-tuple-element.112, constant.119), direction=LT
}
region_4.130 {
arg_tuple.131 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.132 = s32[] get-tuple-element(arg_tuple.131), index=0
constant.140 = s32[] constant(1)
add.164 = s32[] add(get-tuple-element.132, constant.140)
get-tuple-element.133 = f32[16]{0} get-tuple-element(arg_tuple.131), index=1
get-tuple-element.134 = f32[] get-tuple-element(arg_tuple.131), index=2
broadcast.159 = f32[16]{0} broadcast(get-tuple-element.134), dimensions={}
add.160 = f32[16]{0} add(get-tuple-element.133, broadcast.159)
get-tuple-element.137 = f32[16,16]{1,0} get-tuple-element(arg_tuple.131), index=5
constant.141 = s32[] constant(16)
subtract.142 = s32[] subtract(constant.141, get-tuple-element.132)
subtract.143 = s32[] subtract(subtract.142, constant.140)
constant.139 = s32[] constant(0)
compare.154 = pred[] compare(subtract.143, constant.139), direction=LT
add.155 = s32[] add(subtract.143, constant.141)
select.156 = s32[] select(compare.154, add.155, subtract.143)
dynamic-slice.157 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.137, select.156, constant.139), dynamic_slice_sizes={1,16}
reshape.158 = f32[16]{0} reshape(dynamic-slice.157)
multiply.161 = f32[16]{0} multiply(add.160, reshape.158)
get-tuple-element.136 = f32[16,16]{1,0} get-tuple-element(arg_tuple.131), index=4
compare.149 = pred[] compare(subtract.143, constant.139), direction=LT
add.150 = s32[] add(subtract.143, constant.141)
select.151 = s32[] select(compare.149, add.150, subtract.143)
dynamic-slice.152 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.136, select.151, constant.139), dynamic_slice_sizes={1,16}
reshape.153 = f32[16]{0} reshape(dynamic-slice.152)
multiply.162 = f32[16]{0} multiply(multiply.161, reshape.153)
get-tuple-element.135 = f32[16,16]{1,0} get-tuple-element(arg_tuple.131), index=3
compare.144 = pred[] compare(subtract.143, constant.139), direction=LT
add.145 = s32[] add(subtract.143, constant.141)
select.146 = s32[] select(compare.144, add.145, subtract.143)
dynamic-slice.147 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.135, select.146, constant.139), dynamic_slice_sizes={1,16}
reshape.148 = f32[16]{0} reshape(dynamic-slice.147)
multiply.163 = f32[16]{0} multiply(multiply.162, reshape.148)
constant.138 = f32[] constant(0)
ROOT tuple.165 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(add.164, multiply.163, constant.138, get-tuple-element.135, get-tuple-element.136, get-tuple-element.137)
}
region_5.166 {
arg_tuple.167 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.169 = f32[16]{0} get-tuple-element(arg_tuple.167), index=1
get-tuple-element.170 = f32[] get-tuple-element(arg_tuple.167), index=2
get-tuple-element.171 = f32[16,16]{1,0} get-tuple-element(arg_tuple.167), index=3
get-tuple-element.172 = f32[16,16]{1,0} get-tuple-element(arg_tuple.167), index=4
get-tuple-element.173 = f32[16,16]{1,0} get-tuple-element(arg_tuple.167), index=5
get-tuple-element.168 = s32[] get-tuple-element(arg_tuple.167), index=0
constant.174 = s32[] constant(16)
ROOT compare.175 = pred[] compare(get-tuple-element.168, constant.174), direction=LT
}
ENTRY main.183 {
constant.6 = s32[] constant(0)
Arg_0.1 = f32[16]{0} parameter(0), sharding={devices=[2]<=[2]}
call.55 = (f32[16,16]{1,0}, f32[16,16]{1,0}) call(Arg_0.1), to_apply=core_closed_call.43
get-tuple-element.56 = f32[16,16]{1,0} get-tuple-element(call.55), index=0
get-tuple-element.57 = f32[16,16]{1,0} get-tuple-element(call.55), index=1
constant.7 = f32[] constant(1)
tuple.58 = (f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16]{0}, f32[]) tuple(get-tuple-element.56, get-tuple-element.57, Arg_0.1, constant.7)
opt-barrier.59 = (f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16]{0}, f32[]) opt-barrier(tuple.58)
get-tuple-element.62 = f32[16]{0} get-tuple-element(opt-barrier.59), index=2
constant.4 = f32[] constant(0)
broadcast.5 = f32[16,16]{1,0} broadcast(constant.4), dimensions={}
get-tuple-element.60 = f32[16,16]{1,0} get-tuple-element(opt-barrier.59), index=0
get-tuple-element.61 = f32[16,16]{1,0} get-tuple-element(opt-barrier.59), index=1
tuple.64 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(constant.6, get-tuple-element.62, broadcast.5, broadcast.5, broadcast.5, get-tuple-element.60, get-tuple-element.61)
while.121 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) while(tuple.64), condition=region_3.110, body=region_2.65
get-tuple-element.122 = s32[] get-tuple-element(while.121), index=0
get-tuple-element.123 = f32[16]{0} get-tuple-element(while.121), index=1
get-tuple-element.127 = f32[16,16]{1,0} get-tuple-element(while.121), index=5
get-tuple-element.128 = f32[16,16]{1,0} get-tuple-element(while.121), index=6
constant.2 = f32[] constant(0)
broadcast.3 = f32[16]{0} broadcast(constant.2), dimensions={}
get-tuple-element.63 = f32[] get-tuple-element(opt-barrier.59), index=3
get-tuple-element.124 = f32[16,16]{1,0} get-tuple-element(while.121), index=2
get-tuple-element.125 = f32[16,16]{1,0} get-tuple-element(while.121), index=3
get-tuple-element.126 = f32[16,16]{1,0} get-tuple-element(while.121), index=4
tuple.129 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(constant.6, broadcast.3, get-tuple-element.63, get-tuple-element.124, get-tuple-element.125, get-tuple-element.126)
while.176 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) while(tuple.129), condition=region_5.166, body=region_4.130
get-tuple-element.177 = s32[] get-tuple-element(while.176), index=0
ROOT get-tuple-element.178 = f32[16]{0} get-tuple-element(while.176), index=1
get-tuple-element.179 = f32[] get-tuple-element(while.176), index=2
get-tuple-element.180 = f32[16,16]{1,0} get-tuple-element(while.176), index=3
get-tuple-element.181 = f32[16,16]{1,0} get-tuple-element(while.176), index=4
get-tuple-element.182 = f32[16,16]{1,0} get-tuple-element(while.176), index=5
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
bool changed =
ConvertMemoryPlacementToInternalAnnotations().Run(module.get()).value();
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
int64_t custom_calls_count = 0;
for (auto* c : module->computations()) {
for (auto* instr : c->instructions()) {
if (instr->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget) ||
instr->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) {
++custom_calls_count;
}
}
}
EXPECT_EQ(custom_calls_count, 4);
}
TEST_F(ConvertMemoryPlacementToInternalAnnotationsTest,
ConvertUnpinnedHostTest) {
const char* hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[16]{0})->f32[16]{0}}
region_0.9 {
arg_tuple.10 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.11 = s32[] get-tuple-element(arg_tuple.10), index=0
constant.15 = s32[] constant(1)
add.33 = s32[] add(get-tuple-element.11, constant.15)
get-tuple-element.12 = f32[16]{0} get-tuple-element(arg_tuple.10), index=1
sine.18 = f32[16]{0} sine(get-tuple-element.12)
sine.19 = f32[16]{0} sine(sine.18)
sine.20 = f32[16]{0} sine(sine.19)
get-tuple-element.13 = f32[16,16]{1,0} get-tuple-element(arg_tuple.10), index=2
custom-call.21 = f32[16]{0} custom-call(sine.19), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="unpinned_host"}
reshape.23 = f32[1,16]{1,0} reshape(custom-call.21)
constant.17 = s32[] constant(0)
compare.24 = pred[] compare(get-tuple-element.11, constant.17), direction=LT
constant.16 = s32[] constant(16)
add.25 = s32[] add(get-tuple-element.11, constant.16)
select.26 = s32[] select(compare.24, add.25, get-tuple-element.11)
dynamic-update-slice.27 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.13, reshape.23, select.26, constant.17)
get-tuple-element.14 = f32[16,16]{1,0} get-tuple-element(arg_tuple.10), index=3
custom-call.22 = f32[16]{0} custom-call(sine.20), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="unpinned_host"}
reshape.28 = f32[1,16]{1,0} reshape(custom-call.22)
compare.29 = pred[] compare(get-tuple-element.11, constant.17), direction=LT
add.30 = s32[] add(get-tuple-element.11, constant.16)
select.31 = s32[] select(compare.29, add.30, get-tuple-element.11)
dynamic-update-slice.32 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.14, reshape.28, select.31, constant.17)
ROOT tuple.34 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(add.33, sine.20, dynamic-update-slice.27, dynamic-update-slice.32)
}
region_1.35 {
arg_tuple.36 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.38 = f32[16]{0} get-tuple-element(arg_tuple.36), index=1
get-tuple-element.39 = f32[16,16]{1,0} get-tuple-element(arg_tuple.36), index=2
get-tuple-element.40 = f32[16,16]{1,0} get-tuple-element(arg_tuple.36), index=3
get-tuple-element.37 = s32[] get-tuple-element(arg_tuple.36), index=0
constant.41 = s32[] constant(16)
ROOT compare.42 = pred[] compare(get-tuple-element.37, constant.41), direction=LT
}
core_closed_call.43 {
constant.47 = s32[] constant(0)
Arg_0.44 = f32[16]{0} parameter(0)
constant.45 = f32[] constant(0)
broadcast.46 = f32[16,16]{1,0} broadcast(constant.45), dimensions={}
tuple.48 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(constant.47, Arg_0.44, broadcast.46, broadcast.46)
while.49 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) while(tuple.48), condition=region_1.35, body=region_0.9
get-tuple-element.50 = s32[] get-tuple-element(while.49), index=0
get-tuple-element.51 = f32[16]{0} get-tuple-element(while.49), index=1
get-tuple-element.52 = f32[16,16]{1,0} get-tuple-element(while.49), index=2
get-tuple-element.53 = f32[16,16]{1,0} get-tuple-element(while.49), index=3
ROOT tuple.54 = (f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(get-tuple-element.52, get-tuple-element.53)
}
region_2.65 {
arg_tuple.66 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.67 = s32[] get-tuple-element(arg_tuple.66), index=0
constant.74 = s32[] constant(1)
add.108 = s32[] add(get-tuple-element.67, constant.74)
get-tuple-element.73 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=6
constant.76 = s32[] constant(0)
compare.82 = pred[] compare(get-tuple-element.67, constant.76), direction=LT
constant.75 = s32[] constant(16)
add.83 = s32[] add(get-tuple-element.67, constant.75)
select.84 = s32[] select(compare.82, add.83, get-tuple-element.67)
dynamic-slice.85 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.73, select.84, constant.76), dynamic_slice_sizes={1,16}
reshape.86 = f32[16]{0} reshape(dynamic-slice.85)
custom-call.87 = f32[16]{0} custom-call(reshape.86), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="device"}
get-tuple-element.69 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=2
get-tuple-element.68 = f32[16]{0} get-tuple-element(arg_tuple.66), index=1
cosine.88 = f32[16]{0} cosine(get-tuple-element.68)
reshape.93 = f32[1,16]{1,0} reshape(cosine.88)
compare.94 = pred[] compare(get-tuple-element.67, constant.76), direction=LT
add.95 = s32[] add(get-tuple-element.67, constant.75)
select.96 = s32[] select(compare.94, add.95, get-tuple-element.67)
dynamic-update-slice.97 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.69, reshape.93, select.96, constant.76)
get-tuple-element.70 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=3
sine.89 = f32[16]{0} sine(get-tuple-element.68)
cosine.90 = f32[16]{0} cosine(sine.89)
reshape.98 = f32[1,16]{1,0} reshape(cosine.90)
compare.99 = pred[] compare(get-tuple-element.67, constant.76), direction=LT
add.100 = s32[] add(get-tuple-element.67, constant.75)
select.101 = s32[] select(compare.99, add.100, get-tuple-element.67)
dynamic-update-slice.102 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.70, reshape.98, select.101, constant.76)
get-tuple-element.71 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=4
get-tuple-element.72 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=5
compare.77 = pred[] compare(get-tuple-element.67, constant.76), direction=LT
add.78 = s32[] add(get-tuple-element.67, constant.75)
select.79 = s32[] select(compare.77, add.78, get-tuple-element.67)
dynamic-slice.80 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.72, select.79, constant.76), dynamic_slice_sizes={1,16}
reshape.81 = f32[16]{0} reshape(dynamic-slice.80)
custom-call.91 = f32[16]{0} custom-call(reshape.81), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="device"}
cosine.92 = f32[16]{0} cosine(custom-call.91)
reshape.103 = f32[1,16]{1,0} reshape(cosine.92)
compare.104 = pred[] compare(get-tuple-element.67, constant.76), direction=LT
add.105 = s32[] add(get-tuple-element.67, constant.75)
select.106 = s32[] select(compare.104, add.105, get-tuple-element.67)
dynamic-update-slice.107 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.71, reshape.103, select.106, constant.76)
ROOT tuple.109 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(add.108, custom-call.87, dynamic-update-slice.97, dynamic-update-slice.102, dynamic-update-slice.107, get-tuple-element.72, get-tuple-element.73)
}
region_3.110 {
arg_tuple.111 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.113 = f32[16]{0} get-tuple-element(arg_tuple.111), index=1
get-tuple-element.114 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=2
get-tuple-element.115 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=3
get-tuple-element.116 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=4
get-tuple-element.117 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=5
get-tuple-element.118 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=6
get-tuple-element.112 = s32[] get-tuple-element(arg_tuple.111), index=0
constant.119 = s32[] constant(16)
ROOT compare.120 = pred[] compare(get-tuple-element.112, constant.119), direction=LT
}
region_4.130 {
arg_tuple.131 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.132 = s32[] get-tuple-element(arg_tuple.131), index=0
constant.140 = s32[] constant(1)
add.164 = s32[] add(get-tuple-element.132, constant.140)
get-tuple-element.133 = f32[16]{0} get-tuple-element(arg_tuple.131), index=1
get-tuple-element.134 = f32[] get-tuple-element(arg_tuple.131), index=2
broadcast.159 = f32[16]{0} broadcast(get-tuple-element.134), dimensions={}
add.160 = f32[16]{0} add(get-tuple-element.133, broadcast.159)
get-tuple-element.137 = f32[16,16]{1,0} get-tuple-element(arg_tuple.131), index=5
constant.141 = s32[] constant(16)
subtract.142 = s32[] subtract(constant.141, get-tuple-element.132)
subtract.143 = s32[] subtract(subtract.142, constant.140)
constant.139 = s32[] constant(0)
compare.154 = pred[] compare(subtract.143, constant.139), direction=LT
add.155 = s32[] add(subtract.143, constant.141)
select.156 = s32[] select(compare.154, add.155, subtract.143)
dynamic-slice.157 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.137, select.156, constant.139), dynamic_slice_sizes={1,16}
reshape.158 = f32[16]{0} reshape(dynamic-slice.157)
multiply.161 = f32[16]{0} multiply(add.160, reshape.158)
get-tuple-element.136 = f32[16,16]{1,0} get-tuple-element(arg_tuple.131), index=4
compare.149 = pred[] compare(subtract.143, constant.139), direction=LT
add.150 = s32[] add(subtract.143, constant.141)
select.151 = s32[] select(compare.149, add.150, subtract.143)
dynamic-slice.152 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.136, select.151, constant.139), dynamic_slice_sizes={1,16}
reshape.153 = f32[16]{0} reshape(dynamic-slice.152)
multiply.162 = f32[16]{0} multiply(multiply.161, reshape.153)
get-tuple-element.135 = f32[16,16]{1,0} get-tuple-element(arg_tuple.131), index=3
compare.144 = pred[] compare(subtract.143, constant.139), direction=LT
add.145 = s32[] add(subtract.143, constant.141)
select.146 = s32[] select(compare.144, add.145, subtract.143)
dynamic-slice.147 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.135, select.146, constant.139), dynamic_slice_sizes={1,16}
reshape.148 = f32[16]{0} reshape(dynamic-slice.147)
multiply.163 = f32[16]{0} multiply(multiply.162, reshape.148)
constant.138 = f32[] constant(0)
ROOT tuple.165 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(add.164, multiply.163, constant.138, get-tuple-element.135, get-tuple-element.136, get-tuple-element.137)
}
region_5.166 {
arg_tuple.167 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.169 = f32[16]{0} get-tuple-element(arg_tuple.167), index=1
get-tuple-element.170 = f32[] get-tuple-element(arg_tuple.167), index=2
get-tuple-element.171 = f32[16,16]{1,0} get-tuple-element(arg_tuple.167), index=3
get-tuple-element.172 = f32[16,16]{1,0} get-tuple-element(arg_tuple.167), index=4
get-tuple-element.173 = f32[16,16]{1,0} get-tuple-element(arg_tuple.167), index=5
get-tuple-element.168 = s32[] get-tuple-element(arg_tuple.167), index=0
constant.174 = s32[] constant(16)
ROOT compare.175 = pred[] compare(get-tuple-element.168, constant.174), direction=LT
}
ENTRY main.183 {
constant.6 = s32[] constant(0)
Arg_0.1 = f32[16]{0} parameter(0), sharding={devices=[2]<=[2]}
call.55 = (f32[16,16]{1,0}, f32[16,16]{1,0}) call(Arg_0.1), to_apply=core_closed_call.43
get-tuple-element.56 = f32[16,16]{1,0} get-tuple-element(call.55), index=0
get-tuple-element.57 = f32[16,16]{1,0} get-tuple-element(call.55), index=1
constant.7 = f32[] constant(1)
tuple.58 = (f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16]{0}, f32[]) tuple(get-tuple-element.56, get-tuple-element.57, Arg_0.1, constant.7)
opt-barrier.59 = (f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16]{0}, f32[]) opt-barrier(tuple.58)
get-tuple-element.62 = f32[16]{0} get-tuple-element(opt-barrier.59), index=2
constant.4 = f32[] constant(0)
broadcast.5 = f32[16,16]{1,0} broadcast(constant.4), dimensions={}
get-tuple-element.60 = f32[16,16]{1,0} get-tuple-element(opt-barrier.59), index=0
get-tuple-element.61 = f32[16,16]{1,0} get-tuple-element(opt-barrier.59), index=1
tuple.64 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(constant.6, get-tuple-element.62, broadcast.5, broadcast.5, broadcast.5, get-tuple-element.60, get-tuple-element.61)
while.121 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) while(tuple.64), condition=region_3.110, body=region_2.65
get-tuple-element.122 = s32[] get-tuple-element(while.121), index=0
get-tuple-element.123 = f32[16]{0} get-tuple-element(while.121), index=1
get-tuple-element.127 = f32[16,16]{1,0} get-tuple-element(while.121), index=5
get-tuple-element.128 = f32[16,16]{1,0} get-tuple-element(while.121), index=6
constant.2 = f32[] constant(0)
broadcast.3 = f32[16]{0} broadcast(constant.2), dimensions={}
get-tuple-element.63 = f32[] get-tuple-element(opt-barrier.59), index=3
get-tuple-element.124 = f32[16,16]{1,0} get-tuple-element(while.121), index=2
get-tuple-element.125 = f32[16,16]{1,0} get-tuple-element(while.121), index=3
get-tuple-element.126 = f32[16,16]{1,0} get-tuple-element(while.121), index=4
tuple.129 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(constant.6, broadcast.3, get-tuple-element.63, get-tuple-element.124, get-tuple-element.125, get-tuple-element.126)
while.176 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) while(tuple.129), condition=region_5.166, body=region_4.130
get-tuple-element.177 = s32[] get-tuple-element(while.176), index=0
ROOT get-tuple-element.178 = f32[16]{0} get-tuple-element(while.176), index=1
get-tuple-element.179 = f32[] get-tuple-element(while.176), index=2
get-tuple-element.180 = f32[16,16]{1,0} get-tuple-element(while.176), index=3
get-tuple-element.181 = f32[16,16]{1,0} get-tuple-element(while.176), index=4
get-tuple-element.182 = f32[16,16]{1,0} get-tuple-element(while.176), index=5
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
bool changed =
ConvertMemoryPlacementToInternalAnnotations().Run(module.get()).value();
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
int64_t custom_calls_count = 0;
for (auto* c : module->computations()) {
for (auto* instr : c->instructions()) {
if (instr->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget) ||
instr->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) {
++custom_calls_count;
}
}
}
EXPECT_EQ(custom_calls_count, 4);
}
TEST_F(ConvertMemoryPlacementToInternalAnnotationsTest,
ConvertOutputPinnedHostTest) {
constexpr std::string_view hlo_string = R"(
HloModule m, entry_computation_layout={(f32[2,2]{1,0:T(2,128)},f32[2,2]{1,0:T(2,128)})->f32[2,2]{1,0:T(2,128)S(5)}}
ENTRY m {
x = f32[2,2] parameter(0)
y = f32[2,2] parameter(1)
crs = f32[2,2] add(x, y)
ROOT transfer = f32[2,2] custom-call(crs), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="pinned_host"}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
bool changed =
ConvertMemoryPlacementToInternalAnnotations().Run(module.get()).value();
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
int64_t move_to_host_count = 0;
for (auto* c : module->computations()) {
for (auto* instr : c->instructions()) {
move_to_host_count += instr->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget);
}
}
EXPECT_EQ(move_to_host_count, 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convert_memory_placement_to_internal_annotations.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convert_memory_placement_to_internal_annotations_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ce41f9f8-1f4d-4324-9dbd-5d7e74c1ec3e | cpp | tensorflow/tensorflow | gpu_compilation_environment | third_party/xla/xla/service/gpu_compilation_environment.cc | third_party/xla/xla/service/gpu_compilation_environment_test.cc | #include "xla/service/gpu_compilation_environment.h"
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_join.h"
#include "xla/parse_flags_from_env.h"
#include "xla/service/compilation_environments.h"
#include "xla/tsl/util/command_line_flags.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace xla {
void InitializeFlagsForGpuCompEnv(std::vector<tsl::Flag>* flag_list,
GpuCompilationEnvironment* gpu_comp_env) {
auto int64_setter_for =
[gpu_comp_env](
void (GpuCompilationEnvironment::*member_setter)(int64_t)) {
return [gpu_comp_env, member_setter](int64_t value) {
(gpu_comp_env->*member_setter)(value);
return true;
};
};
flag_list->push_back(tsl::Flag(
"dummy_flag",
int64_setter_for(&GpuCompilationEnvironment::set_dummy_flag),
gpu_comp_env->dummy_flag(), "Dummy flag to demonstrate the flow"));
}
absl::StatusOr<GpuCompilationEnvironment> CreateGpuCompEnvFromFlagStrings(
std::vector<std::string>& flags, bool strict) {
GpuCompilationEnvironment gpu_comp_env;
std::vector<tsl::Flag> flag_objects;
InitializeFlagsForGpuCompEnv(&flag_objects, &gpu_comp_env);
bool result = tsl::Flags::Parse(flags, flag_objects);
if (!result || (strict && !flags.empty())) {
return InvalidArgument("Could not parse flags: %s",
absl::StrJoin(flags, ", "));
}
return gpu_comp_env;
}
absl::StatusOr<GpuCompilationEnvironment> CreateGpuCompEnvFromEnvVar() {
GpuCompilationEnvironment env;
std::vector<tsl::Flag> flag_objects;
InitializeFlagsForGpuCompEnv(&flag_objects, &env);
ParseFlagsFromEnvAndIgnoreUnknown("XLA_FLAGS", flag_objects);
return env;
}
GpuCompilationEnvironment CreateGpuCompEnvWithDefaultValues() {
GpuCompilationEnvironment env;
env.set_dummy_flag(1);
return env;
}
absl::Status InitializeMissingFieldsFromXLAFlags(
GpuCompilationEnvironment& env) {
TF_ASSIGN_OR_RETURN(GpuCompilationEnvironment from_env,
CreateGpuCompEnvFromEnvVar());
auto default_env = CreateGpuCompEnvWithDefaultValues();
auto reflection = env.GetReflection();
auto reflection_from_env = from_env.GetReflection();
auto descriptor = GpuCompilationEnvironment::descriptor();
std::vector<const tsl::protobuf::FieldDescriptor*> missing_fields;
for (int j = 0; j < descriptor->field_count(); ++j) {
const tsl::protobuf::FieldDescriptor* field = descriptor->field(j);
if (reflection->HasField(env, field) &&
reflection_from_env->HasField(from_env, field)) {
return InvalidArgument(
"Flag %s is set in both XLA_FLAGS env var and "
"GpuCompilationEnvironment.",
field->name());
} else if (!reflection->HasField(env, field) &&
!reflection_from_env->HasField(from_env, field)) {
missing_fields.push_back(field);
}
}
env.MergeFrom(from_env);
if (!missing_fields.empty()) {
reflection->SwapFields(&env, &default_env, missing_fields);
}
return absl::OkStatus();
}
namespace {
absl::StatusOr<std::unique_ptr<tsl::protobuf::Message>>
ProcessNewGpuCompilationEnvironment(
std::unique_ptr<tsl::protobuf::Message> env) {
if (!env) {
env = std::make_unique<GpuCompilationEnvironment>();
}
return env;
}
}
}
static bool InitModule() {
xla::CompilationEnvironments::RegisterProcessNewEnvFn(
xla::GpuCompilationEnvironment::descriptor(),
xla::ProcessNewGpuCompilationEnvironment);
return true;
}
static bool module_initialized = InitModule(); | #include "xla/service/gpu_compilation_environment.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/parse_flags_from_env.h"
#include "xla/service/compilation_environments.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::tsl::testing::StatusIs;
void set_xla_flags_env_var(const std::string& xla_flags) {
int* pargc;
std::vector<char*>* pargv;
ResetFlagsFromEnvForTesting("XLA_FLAGS", &pargc, &pargv);
tsl::setenv("XLA_FLAGS", xla_flags.c_str(), true );
}
TEST(CreateGpuCompEnvFromFlagStringsTest, ValidFlags) {
std::vector<std::string> flags = {"--dummy_flag=2"};
TF_ASSERT_OK_AND_ASSIGN(
GpuCompilationEnvironment gpu_comp_env,
CreateGpuCompEnvFromFlagStrings(flags, true));
ASSERT_EQ(gpu_comp_env.dummy_flag(), 2);
ASSERT_TRUE(flags.empty());
}
TEST(CreateGpuCompEnvFromFlagStringsTest, EmptyFlags) {
std::vector<std::string> flags;
TF_ASSERT_OK_AND_ASSIGN(
GpuCompilationEnvironment gpu_comp_env,
CreateGpuCompEnvFromFlagStrings(flags, true));
}
TEST(CreateGpuCompEnvFromFlagStringsTest, InvalidFlagName) {
std::vector<std::string> flags = {"--xla_gpu_invalid_flag=2"};
EXPECT_THAT(CreateGpuCompEnvFromFlagStrings(flags, true),
StatusIs(tsl::error::INVALID_ARGUMENT));
TF_ASSERT_OK_AND_ASSIGN(
GpuCompilationEnvironment gpu_comp_env,
CreateGpuCompEnvFromFlagStrings(flags, false));
ASSERT_EQ(flags.size(), 1);
}
TEST(CreateGpuCompEnvFromEnvVarTest, ValidFlags) {
set_xla_flags_env_var("--dummy_flag=4");
TF_ASSERT_OK_AND_ASSIGN(GpuCompilationEnvironment gpu_comp_env,
CreateGpuCompEnvFromEnvVar());
ASSERT_EQ(gpu_comp_env.dummy_flag(), 4);
}
TEST(InitializeMissingFieldsFromXLAFlagsTest, BothProtoAndEnvVarUnset) {
set_xla_flags_env_var("");
GpuCompilationEnvironment env;
TF_ASSERT_OK(InitializeMissingFieldsFromXLAFlags(env));
EXPECT_EQ(env.dummy_flag(), 1);
}
TEST(InitializeMissingFieldsFromXLAFlagsTest, ProtoSetButEnvVarUnset) {
set_xla_flags_env_var("");
GpuCompilationEnvironment env;
env.set_dummy_flag(2);
TF_ASSERT_OK(InitializeMissingFieldsFromXLAFlags(env));
EXPECT_EQ(env.dummy_flag(), 2);
}
TEST(InitializeMissingFieldsFromXLAFlagsTest, ProtoUnsetButEnvVarSet) {
set_xla_flags_env_var("--dummy_flag=4");
GpuCompilationEnvironment env;
TF_ASSERT_OK(InitializeMissingFieldsFromXLAFlags(env));
EXPECT_EQ(env.dummy_flag(), 4);
}
TEST(InitializeMissingFieldsFromXLAFlagsTest,
BothProtoAndEnvVarSetButNoConflict) {
set_xla_flags_env_var("--dummy_flag=4");
CompilationEnvironments envs;
GpuCompilationEnvironment env;
TF_ASSERT_OK(InitializeMissingFieldsFromXLAFlags(env));
EXPECT_EQ(env.dummy_flag(), 4);
}
TEST(InitializeMissingFieldsFromXLAFlagsTest,
BothProtoAndEnvVarSetWithConflict) {
set_xla_flags_env_var("--dummy_flag=4");
CompilationEnvironments envs;
GpuCompilationEnvironment env;
env.set_dummy_flag(2);
EXPECT_THAT(InitializeMissingFieldsFromXLAFlags(env),
StatusIs(tsl::error::INVALID_ARGUMENT));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu_compilation_environment.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu_compilation_environment_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
371d52b9-46aa-4f5f-a647-47eba0f32fe7 | cpp | tensorflow/tensorflow | instruction_fusion | third_party/xla/xla/service/gpu/transforms/instruction_fusion.cc | third_party/xla/xla/service/gpu/transforms/instruction_fusion_test.cc | #include "xla/service/gpu/transforms/instruction_fusion.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/meta/type_traits.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/fusion_node_indexing_evaluation.h"
#include "xla/service/fusion_queue.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
bool ElementIsF32OrF16(const Shape& shape) {
PrimitiveType type = shape.element_type();
return type == F32 || type == F16;
}
class EmptyFusionQueue : public FusionQueue {
public:
std::pair<HloInstruction*, std::vector<int64_t>>
DequeueNextInstructionAndOperandsToFuseInOrder() override {
return {nullptr, {}};
}
void RemoveInstruction(HloInstruction* instruction) override {};
const std::vector<bool>* FusionConfiguration() override { return nullptr; };
};
}
absl::StatusOr<bool> GpuInstructionFusion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
fusion_node_evaluations_.clear();
auto fusible_computations =
GetFusibleComputations(*module, execution_threads);
fusible_computations_ = {fusible_computations.begin(),
fusible_computations.end()};
return InstructionFusion::Run(module, execution_threads);
}
bool GpuInstructionFusion::IsExpensive(
const HloInstruction& instruction) {
switch (instruction.opcode()) {
case HloOpcode::kDivide:
case HloOpcode::kSqrt:
case HloOpcode::kRsqrt:
case HloOpcode::kExp:
if (ElementIsF32OrF16(instruction.shape())) {
return false;
}
break;
default:
break;
}
return InstructionFusion::IsExpensive(instruction);
}
FusionDecision GpuInstructionFusion::ShouldFuseInexpensiveChecks(
HloInstruction* consumer, int64_t operand_index) {
HloInstruction* producer = consumer->mutable_operand(operand_index);
if (producer->opcode() == HloOpcode::kFusion) {
return FusionDecision::Forbid("the producer is a fusion");
}
if (consumer->IsCustomFusion()) {
return FusionDecision::Forbid("the consumer is a custom fusion");
}
if (is_expensive(*producer) &&
ReusesOperandElements(consumer, operand_index)) {
return FusionDecision::Forbid(
"the producer is expensive, and the consumer reuses inputs");
}
if (IsInputFusibleReduction(*consumer) &&
IsPhysicallyTransposing(*producer)) {
return FusionDecision::Forbid(
"fusing the producer would break read coalescing");
}
RETURN_IF_NOT_FUSIBLE(IsProducerConsumerFusible(*producer, *consumer));
if (CreatesHeavyComputation(*producer, *consumer)) {
return FusionDecision::Forbid(
"the fusion would create a heavy computation");
}
return InstructionFusion::ShouldFuse(consumer, operand_index);
}
FusionDecision GpuInstructionFusion::ShouldFuse(HloInstruction* consumer,
int64_t operand_index) {
RETURN_IF_NOT_FUSIBLE(ShouldFuseInexpensiveChecks(consumer, operand_index));
auto producer = consumer->operand(operand_index);
RETURN_IF_NOT_FUSIBLE(
FusionFitsInBudget(*consumer, *producer, device_info_,
true));
if (consumer->opcode() != HloOpcode::kFusion) {
return FusionDecision::Allow();
}
if (fusion_node_evaluations_.find(consumer) ==
fusion_node_evaluations_.end()) {
fusion_node_evaluations_.emplace(consumer,
FusionNodeIndexingEvaluation(consumer));
}
if (fusion_node_evaluations_.at(consumer).CodeDuplicationTooHigh(producer)) {
return FusionDecision::Forbid(
"the fusion would result in an overly large code duplication");
}
return FusionDecision::Allow();
}
HloInstruction::FusionKind GpuInstructionFusion::ChooseKind(
const HloInstruction* producer, const HloInstruction* consumer) {
return ChooseFusionKind(*producer, *consumer);
}
HloInstruction* GpuInstructionFusion::FuseInstruction(
HloInstruction* fusion_instruction, HloInstruction* producer) {
auto evaluation = fusion_node_evaluations_.find(fusion_instruction);
if (evaluation == fusion_node_evaluations_.end()) {
evaluation = fusion_node_evaluations_
.emplace(fusion_instruction,
FusionNodeIndexingEvaluation(fusion_instruction))
.first;
}
auto indexing_users = evaluation->second.RemoveFusionOperand(producer);
HloInstruction* new_producer =
InstructionFusion::FuseInstruction(fusion_instruction, producer);
evaluation->second.UpdateEvaluationCache(new_producer, indexing_users);
return new_producer;
}
std::unique_ptr<FusionQueue> GpuInstructionFusion::GetFusionQueue(
HloComputation* computation) {
if (fusible_computations_.contains(computation)) {
return InstructionFusion::GetFusionQueue(computation);
}
return std::make_unique<EmptyFusionQueue>();
}
}
} | #include "xla/service/gpu/transforms/instruction_fusion.h"
#include <cstdint>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace m = ::xla::match;
namespace xla {
namespace gpu {
class InstructionFusionTest : public HloTestBase {
public:
GpuInstructionFusion duplicating_instruction_fusion_{
true, TestGpuDeviceInfo::RTXA6000DeviceInfo()};
};
TEST_F(InstructionFusionTest, NoFusionIntoCustomFusionConsumer) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule m
c {
p0 = bf16[3000,53]{1,0} parameter(0)
p1 = bf16[22,53]{1,0} parameter(1)
d = bf16[3000,22]{1,0} dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
r = bf16[1,1,3000,22]{3,2,1,0} reshape(d)
ROOT c = bf16[1,1,3000,22]{2,1,3,0} copy(r)
}
ENTRY e {
p1 = bf16[3000,53]{1,0} parameter(1)
p0 = bf16[22,53]{1,0} parameter(0)
cp0 = bf16[22,53]{1,0} convert(p0)
ROOT f = bf16[1,1,3000,22]{2,1,3,0} fusion(p1, cp0), kind=kCustom, calls=c
})"));
EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value());
}
TEST_F(InstructionFusionTest,
CostlyProducerAndOperandElementReusingConsumerNotFused) {
HloComputation::Builder builder(TestName());
HloInstruction* const0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(5.0f)));
HloInstruction* log1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kLog, const0));
HloInstruction* broadcast2 =
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {1}), log1, {}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(broadcast2, computation->root_instruction());
EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value());
EXPECT_EQ(broadcast2, computation->root_instruction());
}
TEST_F(InstructionFusionTest,
NonCostlyProducerAndOperandElementReusingConsumerFused) {
HloComputation::Builder builder(TestName());
HloInstruction* const0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(5)));
HloInstruction* negate1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(S32, {}), HloOpcode::kNegate, const0));
HloInstruction* broadcast2 =
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(S32, {1}), negate1, {}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(broadcast2, computation->root_instruction());
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Fusion()));
}
TEST_F(InstructionFusionTest,
CostlyProducerAndNonOperandElementReusingConsumerFused_Reshape) {
HloComputation::Builder builder(TestName());
HloInstruction* const0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(5.0f)));
HloInstruction* exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kExp, const0));
HloInstruction* reshape2 = builder.AddInstruction(
HloInstruction::CreateReshape(ShapeUtil::MakeShape(F32, {}), exp1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(reshape2, computation->root_instruction());
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Fusion()));
}
TEST_F(InstructionFusionTest,
CostlyProducerAndNonOperandElementReusingConsumerFused_Transpose) {
HloComputation::Builder builder(TestName());
Shape operand_shape = ShapeUtil::MakeShape(F32, {64, 32});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, operand_shape, "param0"));
HloInstruction* exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(operand_shape, HloOpcode::kExp, param));
HloInstruction* transpose2 =
builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {32, 64}), exp1, {1, 0}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(transpose2, computation->root_instruction());
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Fusion()));
}
TEST_F(InstructionFusionTest, PotentialBitcastReshapeOfDotFused) {
HloComputation::Builder builder(TestName());
auto param0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 1}), "0"));
auto dot1 = builder.AddInstruction(
CreateCanonicalDot(ShapeUtil::MakeShape(F32, {1, 1}), param0, param0));
auto reshape2 = builder.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {1, 1, 1}), dot1));
auto log = builder.AddInstruction(HloInstruction::CreateUnary(
reshape2->shape(), xla::HloOpcode::kLog, reshape2));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(log, computation->root_instruction());
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
}
TEST_F(InstructionFusionTest, PotentialBitcastTransposeOfDotUnfused) {
HloComputation::Builder builder(TestName());
auto param0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(S32, {1, 1}), "0"));
auto dot1 = builder.AddInstruction(
CreateCanonicalDot(ShapeUtil::MakeShape(S32, {1, 1}), param0, param0));
auto transpose2 = builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(S32, {1, 1}), dot1, {0, 1}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(transpose2, computation->root_instruction());
EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value());
}
TEST_F(InstructionFusionTest, BroadcastIntoReduce) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY BroadcastIntoReduce {
constant = f32[] constant(1)
broadcast = f32[16,16,16,16]{3,2,1,0} broadcast(constant), dimensions={}
constant.1 = f32[] constant(0)
ROOT reduce = f32[] reduce(broadcast, constant.1), dimensions={0,1,2,3},
to_apply=add
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Fusion()));
EXPECT_THAT(
root->fused_expression_root(),
GmockMatch(m::Reduce(m::Broadcast(m::Constant()), m::Constant())));
}
TEST_F(InstructionFusionTest, DoNotFuseLayoutChangingOpWithReduce) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
p0 = f32[16,16,16,16]{3,2,1,0} parameter(0)
copy = f32[16,16,16,16]{0,1,2,3} copy(p0)
constant.1 = f32[] constant(0)
ROOT reduce = f32[16] reduce(copy, constant.1), dimensions={0,1,2}, to_apply=add
})")
.value();
EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value());
}
TEST_F(InstructionFusionTest, DoNotFuseLayoutChangingOpWithReduceFusion) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
fused_reduce {
p0.1 = f32[16,16,16,16]{0,1,2,3} parameter(0)
mul = f32[16,16,16,16]{0,1,2,3} multiply(p0.1, p0.1)
c0.1 = f32[] constant(0)
ROOT root = f32[] reduce(mul, c0.1), dimensions={0,1,2,3}, to_apply=add
}
ENTRY entry {
p0 = f32[16,16,16,16]{3,2,1,0} parameter(0)
copy = f32[16,16,16,16]{0,1,2,3} copy(p0)
fusion = f32[] fusion(copy), kind=kInput, calls=fused_reduce
ROOT root = (f32[]) tuple(fusion)
})")
.value();
EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value());
}
TEST_F(InstructionFusionTest, DoNotRepeatLargeReduceWindow) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
p0 = s32[512,512,2] parameter(0)
p1 = f32[1,1,512,512] parameter(1)
constant_1 = f32[] constant(1)
reduce-window.1 = reduce-window(p1, constant_1),
window={size=1x1x9x9}, to_apply=add
ROOT ret = gather(reduce-window.1, p0), offset_dims={0,1,2,3},
collapsed_slice_dims={}, start_index_map={1,2},
index_vector_dim=2, slice_sizes={1,1,1,1}
})")
.value();
EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value());
}
TEST_F(InstructionFusionTest, FuseLayoutChangingOpWithElementwise) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY entry {
p0 = f32[16,16,16,16]{3,2,1,0} parameter(0)
copy = f32[16,16,16,16]{0,1,2,3} copy(p0)
ROOT add = f32[16,16,16,16]{0,1,2,3} add(copy, copy)
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Fusion()));
EXPECT_THAT(root->fused_expression_root(),
GmockMatch(m::Add(m::Copy(), m::Copy())));
}
TEST_F(InstructionFusionTest, BitcastIntoAdd) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY BroadcastIntoAdd {
p0 = f32[4,1,1]{2,1,0} parameter(0)
p1 = f32[4,1]{1,0} parameter(1)
bitcast = f32[4,1]{1,0} bitcast(p0)
ROOT add = f32[4,1] add(bitcast, p1)
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Fusion()));
EXPECT_THAT(root->fused_expression_root(),
GmockMatch(m::Add(m::Bitcast(m::Parameter()), m::Parameter())));
}
TEST_F(InstructionFusionTest, AddIntoBitcast) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY BroadcastIntoAdd {
p0 = f32[4,1]{1,0} parameter(0)
p1 = f32[4,1]{1,0} parameter(1)
add = f32[4,1] add(p0, p1)
ROOT bitcast = f32[4,1,1] bitcast(add)
})")
.value();
EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value());
}
TEST_F(InstructionFusionTest, ConvertIntoBitcastBothConsumedByTuple) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
ENTRY main {
param_0 = f32[2048,16000]{1,0} parameter(0)
convert = bf16[2048,16000]{1,0} convert(param_0)
bitcast = bf16[16000,1,2048]{2,1,0} bitcast(convert)
ROOT tuple.143 = (bf16[16000,1,2048]{2,1,0}, bf16[2048,16000]{1,0}) tuple(bitcast, convert)
})")
.value();
EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value());
}
TEST_F(InstructionFusionTest, DontFuseGTE) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY DontFuseGTE {
p0 = (f32[10], f32[10]) parameter(0)
gte0 = f32[10] get-tuple-element(p0), index=0
gte1 = f32[10] get-tuple-element(p0), index=1
ROOT add = f32[10] add(gte0, gte1)
})")
.value();
EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value());
}
TEST_F(InstructionFusionTest, FloatingPointDivIsCheap) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
Add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY TestComputation {
zero = f32[] constant(0)
p0 = f32[100] parameter(0)
p1 = f32[100] parameter(1)
recip = f32[100] divide(p1, p0)
sum1 = f32[] reduce(recip, zero), dimensions={0}, to_apply=Add
sum2 = f32[] reduce(recip, zero), dimensions={0}, to_apply=Add
ROOT root = (f32[], f32[]) tuple(sum1, sum2)
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Tuple(m::Fusion(), m::Fusion())))
<< module->ToString();
}
TEST_F(InstructionFusionTest, IntegerDivIsNotCheap) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
Add {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY TestComputation {
zero = s32[] constant(0)
p0 = s32[100] parameter(0)
p1 = s32[100] parameter(1)
recip = s32[100] divide(p1, p0)
sum1 = s32[] reduce(recip, zero), dimensions={0}, to_apply=Add
sum2 = s32[] reduce(recip, zero), dimensions={0}, to_apply=Add
ROOT mul = (s32[], s32[]) tuple(sum1, sum2)
})")
.value();
EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value())
<< module->ToString();
}
TEST_F(InstructionFusionTest, DotOutputFusionImpossible) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY NoOutputFusion {
alpha = f32[] constant(3)
broadcast = f32[4,4]{1,0} broadcast(alpha), dimensions={}
p0 = f32[4,3]{1,0} parameter(0)
p1 = f32[3,4]{1,0} parameter(1)
dot = f32[4,4]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
d = f32[4,4]{1,0} multiply(dot, dot)
ROOT mul = f32[4,4] multiply(d, broadcast)
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Fusion()));
EXPECT_EQ(root->fusion_kind(), HloInstruction::FusionKind::kLoop);
EXPECT_THAT(
root->fused_expression_root(),
GmockMatch(m::Multiply(m::Multiply(m::Parameter(), m::Parameter()),
m::Broadcast(m::Constant()))));
}
static int Count(const HloModule& module, HloOpcode op) {
int count = 0;
for (const auto* computation : module.computations()) {
for (const auto* instruction : computation->instructions()) {
if (instruction->opcode() == op) {
++count;
}
}
}
return count;
}
TEST_F(InstructionFusionTest, MultiOutputFusion) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY OutputFusion {
p0 = f32[4,3]{1,0} parameter(0)
p1 = f32[4,3]{1,0} parameter(1)
p2 = f32[4,3]{1,0} parameter(2)
sub = f32[4,3]{1,0} subtract(p0, p2)
add = f32[4,3]{1,0} add(sub, p1)
ROOT tuple = (f32[4,3]{1,0}, f32[4,3]{1,0}) tuple(sub, add)
})")
.value();
ASSERT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value());
}
TEST_F(InstructionFusionTest, FuseScalarConstant) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY FuseScalarConstant {
p0 = f32[] parameter(0)
c0 = f32[] constant(1)
add1 = f32[] add(p0, c0)
b0 = f32[2]{0} broadcast(add1), dimensions={}
c1 = f32[2]{0} constant({1, 2})
ROOT add2 = f32[2]{0} add(b0, c1)
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Fusion()));
EXPECT_THAT(
root->fused_expression_root(),
GmockMatch(m::Add(m::Broadcast(m::Add(m::Parameter(), m::Constant())),
m::Parameter())));
}
TEST_F(InstructionFusionTest, AvoidsLargeFusion) {
constexpr int64_t kNumParams = 200;
ASSERT_GT(kNumParams, MaxOperandsAndOutputsPerFusion());
HloComputation::Builder b(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {10, 100});
auto param0 =
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "p"));
auto sum = param0;
for (int64_t i = 1; i < kNumParams; ++i) {
auto param =
b.AddInstruction(HloInstruction::CreateParameter(i, shape, "p"));
sum = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, sum, param));
}
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(b.Build());
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
for (const HloInstruction* instr : computation->instructions()) {
EXPECT_LE(instr->operand_count(), MaxOperandsAndOutputsPerFusion())
<< instr->ToString();
}
}
TEST_F(InstructionFusionTest, FuseIntoScatter) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY FuseIntoScatter {
p0 = s32[3,3] parameter(0)
p1 = s32[2] parameter(1)
indices = s32[2] add(p1, p1)
p2 = s32[2,3] parameter(2)
updates = s32[2,3] add(p2, p2)
scatter = s32[3,3] scatter(p0, indices, updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
ROOT add = s32[3,3] add(scatter, scatter)
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Add(m::Fusion(&fusion), m::Fusion())));
EXPECT_EQ(fusion->fusion_kind(), HloInstruction::FusionKind::kInput);
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Scatter(m::Parameter(), m::Add(), m::Add())));
}
TEST_F(InstructionFusionTest, DontFuseIntoFirstOperandOfScatter) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY FuseIntoScatter {
p0 = s32[3,3] parameter(0)
operand = s32[3,3] add(p0, p0)
p1 = s32[2] parameter(1)
indices = s32[2] add(p1, p1)
p2 = s32[2,3] parameter(2)
updates = s32[2,3] add(p2, p2)
scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
ROOT add = s32[3,3] add(scatter, scatter)
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Add(m::Fusion(&fusion), m::Fusion())));
EXPECT_EQ(fusion->fusion_kind(), HloInstruction::FusionKind::kInput);
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Scatter(m::Parameter(), m::Add(), m::Add())));
}
TEST_F(InstructionFusionTest, ScatterOpShouldNotFuseWithSharedOperand) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY Test {
parameter.0 = f32[8,8] parameter(0)
parameter.1 = s32[7] parameter(1)
indices = s32[7] add(parameter.1, parameter.1)
slice = f32[7,8] slice(parameter.0), slice={[0:7],[0:8]}
ROOT scatter = f32[8,8] scatter(parameter.0, indices, slice),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, GmockMatch(m::Fusion(m::Parameter(), m::Slice(), m::Parameter())));
}
TEST_F(InstructionFusionTest, NonscalarConstantsNotFused) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY BroadcastIntoReduce {
constant = f32[16] constant({0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15})
broadcast = f32[16,16,16,16]{3,2,1,0} broadcast(constant), dimensions={0}
constant.1 = f32[] constant(0)
ROOT reduce = f32[] reduce(broadcast, constant.1), dimensions={0,1,2,3},
to_apply=add
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Fusion()));
EXPECT_THAT(
root->fused_instructions_computation()->root_instruction(),
GmockMatch(m::Reduce(m::Broadcast(m::Parameter()), m::Constant())));
}
TEST_F(InstructionFusionTest, FuseReverse) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY Reverse {
p0 = f32[50,96,1024]{2,1,0} parameter(0)
add = f32[50,96,1024]{2,1,0} add(p0, p0)
ROOT reverse = f32[50,96,1024] reverse(add), dimensions={0}
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Fusion()));
EXPECT_THAT(root->fused_expression_root(),
GmockMatch(m::Reverse(m::Add(m::Parameter(), m::Parameter()))));
}
TEST_F(InstructionFusionTest, GpuIsExpensiveF32) {
auto m = CreateNewVerifiedModule();
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "param0"));
HloInstruction* one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
HloInstruction* div = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kDivide, param0, one));
HloInstruction* rem = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kRemainder, param0, one));
HloInstruction* sqrt = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kSqrt, param0));
HloInstruction* rsqrt = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kRsqrt, param0));
HloInstruction* exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kExp, param0));
EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*div));
EXPECT_TRUE(GpuInstructionFusion::IsExpensive(*rem));
EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*sqrt));
EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*rsqrt));
EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*exp));
}
TEST_F(InstructionFusionTest, GpuIsExpensiveF64) {
auto m = CreateNewVerifiedModule();
Shape r0f64 = ShapeUtil::MakeShape(F64, {});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f64, "param0"));
HloInstruction* one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
HloInstruction* div = builder.AddInstruction(
HloInstruction::CreateBinary(r0f64, HloOpcode::kDivide, param0, one));
HloInstruction* rem = builder.AddInstruction(
HloInstruction::CreateBinary(r0f64, HloOpcode::kRemainder, param0, one));
HloInstruction* sqrt = builder.AddInstruction(
HloInstruction::CreateUnary(r0f64, HloOpcode::kSqrt, param0));
HloInstruction* rsqrt = builder.AddInstruction(
HloInstruction::CreateUnary(r0f64, HloOpcode::kRsqrt, param0));
HloInstruction* exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f64, HloOpcode::kExp, param0));
EXPECT_TRUE(GpuInstructionFusion::IsExpensive(*div));
EXPECT_TRUE(GpuInstructionFusion::IsExpensive(*rem));
EXPECT_TRUE(GpuInstructionFusion::IsExpensive(*sqrt));
EXPECT_TRUE(GpuInstructionFusion::IsExpensive(*rsqrt));
EXPECT_TRUE(GpuInstructionFusion::IsExpensive(*exp));
}
TEST_F(InstructionFusionTest, GpuIsExpensiveS32) {
auto m = CreateNewVerifiedModule();
Shape r0s32 = ShapeUtil::MakeShape(S32, {});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0s32, "param0"));
HloInstruction* one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
HloInstruction* div = builder.AddInstruction(
HloInstruction::CreateBinary(r0s32, HloOpcode::kDivide, param0, one));
HloInstruction* rem = builder.AddInstruction(
HloInstruction::CreateBinary(r0s32, HloOpcode::kRemainder, param0, one));
EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*div));
EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*rem));
}
TEST_F(InstructionFusionTest, GpuIsExpensiveBroadcastS32) {
auto m = CreateNewVerifiedModule();
Shape r1s32 = ShapeUtil::MakeShape(S32, {10});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r1s32, "param0"));
HloInstruction* one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
HloInstruction* one_broad =
builder.AddInstruction(HloInstruction::CreateBroadcast(r1s32, one, {}));
HloInstruction* div = builder.AddInstruction(HloInstruction::CreateBinary(
r1s32, HloOpcode::kDivide, param0, one_broad));
HloInstruction* rem = builder.AddInstruction(HloInstruction::CreateBinary(
r1s32, HloOpcode::kRemainder, param0, one_broad));
EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*div));
EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*rem));
}
TEST_F(InstructionFusionTest, FloatingPointExpIsCheap) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
Add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY TestComputation {
zero = f32[] constant(0)
p0 = f32[100] parameter(0)
recip = f32[100] exponential(p0)
sum1 = f32[] reduce(recip, zero), dimensions={0}, to_apply=Add
sum2 = f32[] reduce(recip, zero), dimensions={0}, to_apply=Add
ROOT root = (f32[], f32[]) tuple(sum1, sum2)
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Tuple(m::Fusion(), m::Fusion())))
<< module->ToString();
}
TEST_F(InstructionFusionTest, SmallReducedDimensionIsNotLoweredToLoop) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY FuseSmallReduction {
p0 = s32[1048576,4] parameter(0)
p1 = s32[1048576,4] parameter(1)
sum = s32[1048576,4] add(p0, p1)
init = s32[] constant(0)
ROOT reduce = s32[1048576] reduce(sum, init), dimensions={1}, to_apply=add
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Fusion()));
EXPECT_EQ(root->fusion_kind(), HloInstruction::FusionKind::kInput);
}
TEST_F(InstructionFusionTest, IotaIntoVariadicReduction) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f {
tmp_0 = f32[] parameter(0)
tmp_1 = f32[] parameter(1)
tmp_2 = pred[] compare(tmp_0, tmp_1), direction=GE
tmp_3 = f32[] select(tmp_2, tmp_0, tmp_1)
tmp_4 = pred[] compare(tmp_0, tmp_1), direction=EQ
tmp_5 = s32[] parameter(2)
tmp_6 = s32[] parameter(3)
tmp_7 = s32[] minimum(tmp_5, tmp_6)
tmp_8 = s32[] select(tmp_2, tmp_5, tmp_6)
tmp_9 = s32[] select(tmp_4, tmp_7, tmp_8)
ROOT tmp_10 = (f32[], s32[]) tuple(tmp_3, tmp_9)
}
minmax {
tmp_0 = f32[] parameter(0)
tmp_1 = f32[] parameter(2)
tmp_2 = s32[] parameter(1)
tmp_3 = s32[] parameter(3)
ROOT tmp_4 = (f32[], s32[]) fusion(tmp_0, tmp_1, tmp_2, tmp_3), kind=kLoop, calls=f
}
ENTRY e {
tmp_0 = f32[554112,10]{1,0} parameter(0)
tmp_1 = s32[554112,10]{1,0} iota(), iota_dimension=1
tmp_2 = f32[] constant(-inf)
tmp_3 = s32[] constant(0)
ROOT tmp_4 = (f32[554112]{0}, s32[554112]{0}) reduce(tmp_0, tmp_1, tmp_2, tmp_3), dimensions={1}, to_apply=minmax
})")
.value();
EXPECT_TRUE(GpuInstructionFusion(false,
TestGpuDeviceInfo::RTXA6000DeviceInfo())
.Run(module.get())
.value());
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter())));
EXPECT_THAT(
module->entry_computation()->root_instruction()->fused_expression_root(),
GmockMatch(
m::Reduce(m::Parameter(), m::Iota(), m::Constant(), m::Constant())));
}
TEST_F(InstructionFusionTest, InputReductionFusion) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add.clone.13 {
x.27 = f32[] parameter(0)
y.27 = f32[] parameter(1)
ROOT add.1036 = f32[] add(x.27, y.27)
}
add.clone.14 {
x.28 = f32[] parameter(0)
y.28 = f32[] parameter(1)
ROOT add.1037 = f32[] add(x.28, y.28)
}
add {
x = bf16[] parameter(0)
convert.448 = f32[] convert(x)
y = bf16[] parameter(1)
convert.449 = f32[] convert(y)
add.597 = f32[] add(convert.448, convert.449)
ROOT convert.450 = bf16[] convert(add.597)
}
ENTRY FuseSmallReduction {
param_2.7 = bf16[8,16,64,2048]{3,2,1,0} parameter(2)
convert.1395 = f32[8,16,64,2048]{3,2,1,0} convert(param_2.7)
param_0.85 = bf16[8,16,64,2048]{3,2,1,0} parameter(0)
convert.1393 = f32[8,16,64,2048]{3,2,1,0} convert(param_0.85)
multiply.1652 = f32[8,16,64,2048]{3,2,1,0} multiply(convert.1395, convert.1393)
convert.1392 = bf16[8,16,64,2048]{3,2,1,0} convert(multiply.1652)
bitcast.15934 = bf16[128,64,2048]{2,1,0} bitcast(convert.1392)
convert.1391 = f32[128,64,2048]{2,1,0} convert(bitcast.15934)
param_1.15 = bf16[] parameter(1)
convert.1394 = f32[] convert(param_1.15)
reduce.462 = f32[128,64]{1,0} reduce(convert.1391, convert.1394), dimensions={2}, to_apply=add.clone.13
reduce.121 = f32[64]{0} reduce(reduce.462, convert.1394), dimensions={0}, to_apply=add.clone.14
ROOT convert.890 = bf16[64]{0} convert(reduce.121)
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* fused_convert_fusion =
module->entry_computation()->root_instruction();
ASSERT_THAT(fused_convert_fusion, GmockMatch(m::Fusion()));
SCOPED_TRACE(module->ToString());
EXPECT_EQ(fused_convert_fusion->fusion_kind(),
HloInstruction::FusionKind::kInput);
}
TEST_F(InstructionFusionTest, DotStrengthReductionFusion) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
scalar_add_computation {
scalar_rhs = f32[] parameter(1)
scalar_lhs = f32[] parameter(0)
ROOT add.1 = f32[] add(scalar_lhs, scalar_rhs)
}
ENTRY main {
param_1.3 = f16[16,64,96,6,2,16]{5,4,3,2,1,0} parameter(1)
param_0.6 = f16[16,64,96,1,2,16]{5,4,3,2,1,0} parameter(0)
bitcast.26 = f16[16,64,96,2,16]{4,3,2,1,0} bitcast(param_0.6)
broadcast.4 = f16[16,64,96,6,2,16]{5,4,3,2,1,0} broadcast(bitcast.26), dimensions={0,1,2,4,5}
multiply.4 = f16[16,64,96,6,2,16]{5,4,3,2,1,0} multiply(broadcast.4, param_1.3)
convert.8 = f32[16,64,96,6,2,16]{5,4,3,2,1,0} convert(multiply.4)
constant_2 = f32[] constant(0)
reduce.3 = f32[16,64,96,6,2]{3,4,2,1,0} reduce(convert.8, constant_2), dimensions={5}, to_apply=scalar_add_computation
bitcast.25 = f32[16,64,96,2,6]{4,3,2,1,0} bitcast(reduce.3)
convert.7 = f16[16,64,96,2,6]{4,3,2,1,0} convert(bitcast.25)
ROOT bitcast.24 = f16[16,64,96,2,1,6]{5,4,3,2,1,0} bitcast(convert.7)
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
const HloInstruction* fused_convert_fusion =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_THAT(fused_convert_fusion, GmockMatch(m::Fusion()));
SCOPED_TRACE(module->ToString());
EXPECT_EQ(fused_convert_fusion->fusion_kind(),
HloInstruction::FusionKind::kInput);
EXPECT_EQ(Count(*module, HloOpcode::kFusion), 1);
}
TEST_F(InstructionFusionTest, ReductionFusionOtherUnaryElementwiseOpsAreFused) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
scalar_add_computation {
scalar_rhs = f32[] parameter(1)
scalar_lhs = f32[] parameter(0)
ROOT add.1 = f32[] add(scalar_lhs, scalar_rhs)
}
ENTRY main {
param_0 = f16[64,96,6,16]{3,2,1,0} parameter(0)
constant_2 = f32[] constant(0)
reduce.3 = f32[64,6,16]{2,1,0} reduce(param_0, constant_2), dimensions={1}, to_apply=scalar_add_computation
negate = f32[64,6,16]{2,1,0} negate(reduce.3)
ROOT sine = f16[64,6,16]{2,1,0} sine(negate)
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* fused_convert_fusion =
module->entry_computation()->root_instruction();
ASSERT_THAT(fused_convert_fusion, GmockMatch(m::Fusion()));
SCOPED_TRACE(module->ToString());
EXPECT_EQ(fused_convert_fusion->fusion_kind(),
HloInstruction::FusionKind::kInput);
EXPECT_EQ(Count(*module, HloOpcode::kFusion), 1);
}
TEST_F(InstructionFusionTest, DoNotFuseInsideReducer) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
scalar_add_computation {
scalar_rhs = f32[] parameter(1)
scalar_lhs = f32[] parameter(0)
add.1 = f32[] add(scalar_lhs, scalar_rhs)
ROOT add.2 = f32[] add(add.1, scalar_rhs)
}
ENTRY main {
param_0 = f16[64,96] parameter(0)
constant_2 = f32[] constant(0)
ROOT reduce = f32[64] reduce(param_0, constant_2), dimensions={1}, to_apply=scalar_add_computation
})")
.value();
EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/instruction_fusion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/instruction_fusion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
548583df-a9ce-42fe-a3f2-1f92da313ff7 | cpp | tensorflow/tensorflow | conditional_simplifier | third_party/xla/xla/service/conditional_simplifier.cc | third_party/xla/xla/service/conditional_simplifier_test.cc | #include "xla/service/conditional_simplifier.h"
#include <iterator>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/service/call_graph.h"
#include "xla/service/call_inliner.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
bool ComputationIsEmptyWithArrayRoot(const HloComputation* computation) {
bool empty_operations = absl::c_all_of(
computation->MakeInstructionPostOrder(),
HloPredicateIsOp<HloOpcode::kTuple, HloOpcode::kGetTupleElement,
HloOpcode::kParameter>);
bool contains_array = false;
ShapeUtil::ForEachSubshape(computation->root_instruction()->shape(),
[&](const Shape& shape, const ShapeIndex& index) {
if (shape.IsArray()) {
contains_array = true;
}
});
return empty_operations && contains_array;
}
absl::StatusOr<bool> TryRemoveUnusedConditionalOperands(
HloComputation* computation,
const absl::flat_hash_set<HloInstruction*>& calling_conditionals) {
HloInstruction* param = computation->parameter_instruction(0);
if (param == computation->root_instruction()) {
return false;
}
if (!param->shape().IsTuple()) {
return false;
}
std::set<int64_t> tuple_indices_to_keep;
for (HloInstruction* user : param->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
return false;
}
tuple_indices_to_keep.insert(user->tuple_index());
}
int64_t old_tuple_element_count =
ShapeUtil::TupleElementCount(param->shape());
if (tuple_indices_to_keep.size() == old_tuple_element_count) {
return false;
}
std::vector<const Shape*> new_tuple_shapes;
new_tuple_shapes.reserve(tuple_indices_to_keep.size());
std::vector<int64_t> map(old_tuple_element_count, -1);
for (int64_t i : tuple_indices_to_keep) {
map[i] = new_tuple_shapes.size();
new_tuple_shapes.push_back(¶m->shape().tuple_shapes(i));
}
Shape tuple_shape = ShapeUtil::MakeTupleShapeWithPtrs(new_tuple_shapes);
HloComputation* new_computation =
computation->parent()->AddEmbeddedComputation(computation->Clone());
param = new_computation->parameter_instruction(0);
*param->mutable_shape() = tuple_shape;
for (HloInstruction* user : param->users()) {
user->set_tuple_index(map[user->tuple_index()]);
}
for (HloInstruction* conditional : calling_conditionals) {
if (conditional->has_sharding()) {
continue;
}
for (int64_t branch = 0; branch < conditional->branch_count(); ++branch) {
if (conditional->branch_computation(branch) != computation) {
continue;
}
conditional->set_branch_computation(branch, new_computation);
const Shape& old_shape = conditional->operand(branch + 1)->shape();
std::vector<HloInstruction*> new_tuple_operands;
new_tuple_operands.reserve(tuple_indices_to_keep.size());
for (int64_t i : tuple_indices_to_keep) {
new_tuple_operands.push_back(conditional->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(
old_shape.tuple_shapes(i),
conditional->mutable_operand(branch + 1), i)));
}
HloInstruction* new_tuple = conditional->parent()->AddInstruction(
HloInstruction::CreateTuple(new_tuple_operands));
TF_RETURN_IF_ERROR(
conditional->ReplaceOperandWithDifferentShape(branch + 1, new_tuple));
CHECK(ShapeUtil::Compatible(conditional->operand(branch + 1)->shape(),
conditional->branch_computation(branch)
->parameter_instruction(0)
->shape()));
CHECK(ShapeUtil::Compatible(
conditional->shape(),
conditional->branch_computation(branch)->root_instruction()->shape()))
<< conditional->branch_computation(branch)->ToString();
}
}
return true;
}
bool ReplaceRootWithEmptyTupleIfNoUsers(HloInstruction* conditional_op) {
const Shape empty_tuple = ShapeUtil::MakeTupleShape({});
if (conditional_op->user_count() == 0 &&
conditional_op != conditional_op->parent()->root_instruction() &&
!ShapeUtil::Compatible(empty_tuple, conditional_op->shape())) {
for (int64_t branch_id = 0; branch_id < conditional_op->branch_count();
++branch_id) {
auto branch_computation =
conditional_op->GetModule()->AddEmbeddedComputation(
conditional_op->branch_computation(branch_id)->Clone());
conditional_op->set_branch_computation(branch_id, branch_computation);
auto new_empty_root =
branch_computation->AddInstruction(HloInstruction::CreateTuple({}));
branch_computation->set_root_instruction(new_empty_root,
true);
}
*conditional_op->mutable_shape() = empty_tuple;
return true;
}
return false;
}
bool RemoveUnusedTupleElements(HloInstruction* conditional_op) {
if (conditional_op->user_count() == 0 ||
conditional_op == conditional_op->parent()->root_instruction() ||
!conditional_op->shape().IsTuple()) {
VLOG(3) << "Skip RemoveUnusedTupleElements due to non-tuple result:\n"
<< conditional_op->ToShortString();
return false;
}
const int old_tuple_shapes_size = conditional_op->shape().tuple_shapes_size();
std::vector<bool> used_indices(old_tuple_shapes_size, false);
for (const HloInstruction* user : conditional_op->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
VLOG(3) << "Skip RemoveUnusedTupleElements due to non-GTE user:\n"
<< user->ToShortString();
return false;
}
used_indices[user->tuple_index()] = true;
}
const int new_tuple_shapes_size =
std::count(used_indices.begin(), used_indices.end(), true);
if (new_tuple_shapes_size == old_tuple_shapes_size) {
VLOG(3) << "Skip RemoveUnusedTupleElements due to every index is in use.";
return false;
}
absl::flat_hash_map<int, int> new_to_old_mapping, old_to_new_mapping;
auto old_iter = used_indices.begin();
for (int new_index = 0; new_index < new_tuple_shapes_size; ++new_index) {
old_iter = std::find(old_iter, used_indices.end(), true);
const int old_index = std::distance(used_indices.begin(), old_iter);
new_to_old_mapping[new_index] = old_index;
old_to_new_mapping[old_index] = new_index;
++old_iter;
}
const Shape old_shape = conditional_op->shape();
std::vector<const Shape*> new_tuple_shapes;
new_tuple_shapes.reserve(new_tuple_shapes_size);
for (int new_index = 0; new_index < new_tuple_shapes_size; ++new_index) {
new_tuple_shapes.push_back(
&old_shape.tuple_shapes(new_to_old_mapping[new_index]));
}
const Shape new_shape = ShapeUtil::MakeTupleShapeWithPtrs(new_tuple_shapes);
for (HloComputation* branch : conditional_op->branch_computations()) {
const HloInstruction* root = branch->root_instruction();
if (!root->shape().IsTuple() ||
!ShapeUtil::Compatible(branch->root_instruction()->shape(),
old_shape)) {
VLOG(3) << "Skip RemoveUnusedTupleElements due to some branch "
<< branch->name() << " has in-compatible root shape, expect "
<< old_shape.ToString() << ", but got "
<< root->shape().ToString() << "\n"
<< conditional_op->ToString();
return false;
}
}
for (int branch_id = 0; branch_id < conditional_op->branch_count();
++branch_id) {
HloComputation* old_branch = conditional_op->branch_computation(branch_id);
HloComputation* cloned_branch =
conditional_op->GetModule()->AddEmbeddedComputation(
old_branch->Clone());
conditional_op->set_branch_computation(branch_id, cloned_branch);
HloInstruction* old_root = cloned_branch->root_instruction();
std::vector<HloInstruction*> new_tuple_root_operands;
for (int old_index = 0; old_index < old_tuple_shapes_size; ++old_index) {
if (used_indices[old_index]) {
new_tuple_root_operands.push_back(
cloned_branch->AddInstruction(HloInstruction::CreateGetTupleElement(
old_shape.tuple_shapes(old_index), old_root, old_index)));
}
}
HloInstruction* new_tuple_root = cloned_branch->AddInstruction(
HloInstruction::CreateTuple(new_tuple_root_operands));
cloned_branch->set_root_instruction(new_tuple_root,
true);
}
*conditional_op->mutable_shape() = new_shape;
for (HloInstruction* user : conditional_op->users()) {
const int old_index = user->tuple_index();
const int new_index = old_to_new_mapping[old_index];
user->set_tuple_index(new_index);
}
return true;
}
bool MergeDuplicateTupleElements(HloInstruction* conditional) {
if (conditional->user_count() == 0 ||
conditional == conditional->parent()->root_instruction() ||
!conditional->shape().IsTuple()) {
VLOG(3) << "Skip MergeDuplicateTupleElements due not tuple shape nor root "
"instruction:\n"
<< conditional->ToShortString();
return false;
}
for (const HloInstruction* user : conditional->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
VLOG(3) << "Skip MergeDuplicateTupleElements due not all users are "
"kGetTupleElement:\n"
<< conditional->ToShortString();
return false;
}
}
for (const HloComputation* branch : conditional->branch_computations()) {
if (branch->root_instruction()->opcode() != HloOpcode::kTuple) {
VLOG(3) << "Skip MergeDuplicateTupleElements due not all branch roots "
"are kTuple:\n"
<< conditional->ToShortString();
return false;
}
}
auto vectorize_branches_root_tuple_ith_operand = [conditional](int64_t i) {
std::vector<const HloInstruction*> operands;
absl::c_transform(conditional->branch_computations(),
std::back_inserter(operands),
[i](const HloComputation* branch) {
return branch->root_instruction()->operand(i);
});
return operands;
};
auto replace_root_user_gte_jth_with_gte_ith = [conditional](int64_t i,
int64_t j) {
bool changed = false;
for (HloInstruction* user : conditional->users()) {
if (user->tuple_index() == j) {
user->set_tuple_index(i);
changed |= true;
}
}
return changed;
};
bool changed = false;
absl::flat_hash_map<std::vector<const HloInstruction*>, int64_t>
index_collision_table;
for (int i = 0; i < conditional->shape().tuple_shapes_size(); ++i) {
const std::vector<const HloInstruction*> ith_operands_vector =
vectorize_branches_root_tuple_ith_operand(i);
const auto emplace_res =
index_collision_table.emplace(ith_operands_vector, i);
if (!emplace_res.second) {
changed |=
replace_root_user_gte_jth_with_gte_ith(emplace_res.first->second, i);
}
}
return changed;
}
}
absl::StatusOr<bool> ConditionalSimplifier::TryRemoveConditional(
HloInstruction* conditional) {
CHECK_EQ(conditional->opcode(), HloOpcode::kConditional);
if (!conditional->parent()->IsSafelyRemovable(conditional) ||
conditional->HasSideEffect()) {
VLOG(2) << "Not attempting to remove conditional as it is not removable or "
"has side effect: "
<< conditional->ToShortString();
return false;
}
auto computation = conditional->parent();
auto create_call = [&](int64_t branch) {
auto call = computation->AddInstruction(HloInstruction::CreateCall(
conditional->shape(), {conditional->mutable_operand(1 + branch)},
conditional->branch_computation(branch)));
conditional->SetupDerivedInstruction(call);
return call;
};
if (conditional->branch_count() == 1) {
HloInstruction* call_op = create_call(0);
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(conditional, call_op));
TF_RETURN_IF_ERROR(CallInliner::Inline(call_op).status());
return true;
}
if (conditional->operand(0)->opcode() == HloOpcode::kConstant) {
int branch_index = 0;
if (conditional->operand(0)->shape().element_type() == PRED) {
branch_index = conditional->operand(0)->literal().Get<bool>({}) ? 0 : 1;
} else {
branch_index = conditional->operand(0)->literal().Get<int32_t>({});
if (branch_index < 0 || branch_index >= conditional->branch_count()) {
branch_index = conditional->branch_count() - 1;
}
}
HloInstruction* call_op = create_call(branch_index);
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(conditional, call_op));
TF_RETURN_IF_ERROR(CallInliner::Inline(call_op).status());
return true;
}
auto instruction_is_expensive = [](const HloInstruction* hlo) {
switch (hlo->opcode()) {
case HloOpcode::kBroadcast:
case HloOpcode::kConcatenate:
case HloOpcode::kDynamicSlice:
case HloOpcode::kGetTupleElement:
case HloOpcode::kReduce:
case HloOpcode::kReshape:
case HloOpcode::kPad:
case HloOpcode::kParameter:
case HloOpcode::kSlice:
case HloOpcode::kTuple:
return false;
default:
return !hlo->IsElementwise();
}
};
if (conditional->branch_count() != 2 ||
conditional->operand(0)->shape().element_type() != PRED ||
absl::c_any_of(conditional->branch_computation(0)->instructions(),
instruction_is_expensive) ||
absl::c_any_of(conditional->branch_computation(1)->instructions(),
instruction_is_expensive)) {
VLOG(2)
<< "Not attempting to remove conditional as its branch_index is not a "
"compile-time constant or contains expensive instructions: "
<< conditional->ToShortString();
return false;
}
bool branch_empty =
ComputationIsEmptyWithArrayRoot(conditional->branch_computation(0)) ||
ComputationIsEmptyWithArrayRoot(conditional->branch_computation(1));
if (branch_empty) {
return false;
}
HloInstruction* true_call_op = create_call(0);
HloInstruction* false_call_op = create_call(1);
auto condition_broadcast = [&](const Shape& shape) {
if (ShapeUtil::IsScalar(shape)) {
return conditional->mutable_operand(0);
}
Shape new_shape = ShapeUtil::ChangeElementType(shape, PRED);
UpdateLayout(&new_shape);
return computation->AddInstruction(HloInstruction::CreateBroadcast(
new_shape, conditional->mutable_operand(0), {}));
};
auto gte = [&](HloInstruction* hlo, int64_t i) {
return computation->AddInstruction(HloInstruction::CreateGetTupleElement(
hlo->shape().tuple_shapes(i), hlo, i));
};
std::function<HloInstruction*(HloInstruction*, HloInstruction*)> select =
[&](HloInstruction* t, HloInstruction* f) {
if (f->shape().IsToken()) {
return computation->AddInstruction(
HloInstruction::CreateAfterAll({t, f}));
}
if (f->shape().IsArray()) {
return computation->AddInstruction(HloInstruction::CreateTernary(
f->shape(), HloOpcode::kSelect, condition_broadcast(f->shape()),
t, f));
}
std::vector<HloInstruction*> selects;
const int64_t tuple_element_count =
ShapeUtil::TupleElementCount(f->shape());
selects.reserve(tuple_element_count);
for (int64_t i = 0; i < tuple_element_count; ++i) {
selects.push_back(select(gte(t, i), gte(f, i)));
}
return computation->AddInstruction(
HloInstruction::CreateTuple(selects));
};
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(
conditional, select(true_call_op, false_call_op)));
TF_RETURN_IF_ERROR(CallInliner::Inline(false_call_op).status());
TF_RETURN_IF_ERROR(CallInliner::Inline(true_call_op).status());
return true;
}
static bool ComputationCallsChannelInstructions(
const HloComputation& computation) {
std::vector<const HloComputation*> worklist = {&computation};
while (!worklist.empty()) {
const HloComputation* work = worklist.back();
worklist.pop_back();
for (const HloInstruction* instruction : work->instructions()) {
if (DynCast<HloChannelInstruction>(instruction) != nullptr) {
return true;
}
worklist.insert(worklist.end(),
instruction->called_computations().begin(),
instruction->called_computations().end());
}
}
return false;
}
static bool InstructionCallsChannelInstructions(
const HloInstruction& instruction) {
for (const HloComputation* called_computation :
instruction.called_computations()) {
if (ComputationCallsChannelInstructions(*called_computation)) {
return true;
}
}
return false;
}
absl::StatusOr<bool> ConditionalSimplifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
3, "ConditionalSimplifier::Run(), before:\n" + module->ToString());
bool changed = false;
std::vector<HloInstruction*> conditional_ops;
for (auto* comp : module->computations(execution_threads)) {
for (auto* instr : comp->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kConditional) {
if (InstructionCallsChannelInstructions(*instr)) {
continue;
}
if (instr->has_sharding()) {
continue;
}
conditional_ops.push_back(instr);
}
}
}
absl::flat_hash_set<HloInstruction*> removed_conditionals;
for (HloInstruction* conditional_op : conditional_ops) {
changed |= MergeDuplicateTupleElements(conditional_op);
changed |= RemoveUnusedTupleElements(conditional_op);
changed |= ReplaceRootWithEmptyTupleIfNoUsers(conditional_op);
TF_ASSIGN_OR_RETURN(bool result, TryRemoveConditional(conditional_op));
if (result) {
removed_conditionals.insert(conditional_op);
changed = true;
}
}
absl::flat_hash_map<HloComputation*, absl::flat_hash_set<HloInstruction*>>
calling_conditionals;
std::vector<HloComputation*> calling_computationals_vector;
for (HloInstruction* conditional : conditional_ops) {
if (removed_conditionals.contains(conditional)) {
continue;
}
for (int64_t branch = 0; branch < conditional->branch_count(); ++branch) {
auto* branch_comp = conditional->branch_computation(branch);
if (!calling_conditionals.contains(branch_comp)) {
calling_computationals_vector.push_back(branch_comp);
}
calling_conditionals[branch_comp].insert(conditional);
}
}
for (auto* comp : calling_computationals_vector) {
auto entry = calling_conditionals.find(comp);
CHECK(entry != calling_conditionals.end());
TF_ASSIGN_OR_RETURN(bool result, TryRemoveUnusedConditionalOperands(
entry->first, entry->second));
changed |= result;
}
XLA_VLOG_LINES(3,
"ConditionalSimplifier::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/conditional_simplifier.h"
#include <string>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class ConditionalSimplifierTest : public HloTestBase {
public:
HloComputation* MakeConditional(HloModule* module, bool is_constant = true);
};
HloComputation* ConditionalSimplifierTest::MakeConditional(HloModule* module,
bool is_constant) {
HloComputation::Builder builder(TestName());
HloComputation* true_computation;
{
HloComputation::Builder true_computation_builder(TestName() +
".true_computation");
auto param =
true_computation_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(S32, {}), "param"));
auto one = true_computation_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
true_computation_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(S32, {}), HloOpcode::kAdd, param, one));
true_computation =
module->AddEmbeddedComputation(true_computation_builder.Build());
}
HloComputation* false_computation;
{
HloComputation::Builder false_computation_builder(TestName() +
".false_computation");
auto param = false_computation_builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(S32, {}),
"param"));
auto forty_two = false_computation_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(42)));
false_computation_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(S32, {}), HloOpcode::kAdd, param, forty_two));
false_computation =
module->AddEmbeddedComputation(false_computation_builder.Build());
}
auto false_instrn = builder.AddInstruction(
is_constant
? HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))
: HloInstruction::CreateParameter(1, ShapeUtil::MakeShape(PRED, {}),
"cond"));
auto false_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(S32, {}), "false_param"));
auto one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
builder.AddInstruction(HloInstruction::CreateConditional(
ShapeUtil::MakeShape(S32, {}), false_instrn, one, true_computation,
false_param, false_computation));
return module->AddEntryComputation(builder.Build());
}
TEST_F(ConditionalSimplifierTest, ConditionalGetsInlined) {
auto m = CreateNewVerifiedModule();
HloComputation* computation = MakeConditional(m.get());
ASSERT_TRUE(ConditionalSimplifier().Run(m.get()).value());
EXPECT_THAT(computation->root_instruction(),
op::Add(op::Parameter(), op::Constant()));
}
TEST_F(ConditionalSimplifierTest, BranchGetsInlined) {
auto m = CreateNewVerifiedModule();
HloComputation* computation = MakeConditional(m.get(), false);
ASSERT_TRUE(ConditionalSimplifier().Run(m.get()).value());
EXPECT_THAT(
computation->root_instruction(),
op::Select(op::Parameter(1), op::Add(op::Constant(), op::Constant()),
op::Add(op::Parameter(0), op::Constant())));
}
TEST_F(ConditionalSimplifierTest, ConditionalWithControlDependency) {
auto m = CreateNewVerifiedModule();
HloComputation* computation = MakeConditional(m.get());
auto* true_op = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
TF_ASSERT_OK(
true_op->AddControlDependencyTo(computation->root_instruction()));
EXPECT_FALSE(ConditionalSimplifier().Run(m.get()).value());
}
TEST_F(ConditionalSimplifierTest, NotRemovedIfContainsSend) {
auto m = CreateNewVerifiedModule();
HloComputation* computation = MakeConditional(m.get());
auto* conditional = computation->root_instruction();
ASSERT_EQ(conditional->opcode(), HloOpcode::kConditional);
auto* true_computation = conditional->true_computation();
auto* token = true_computation->AddInstruction(HloInstruction::CreateToken());
auto* send = true_computation->AddInstruction(HloInstruction::CreateSend(
true_computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))),
token, 0));
true_computation->AddInstruction(HloInstruction::CreateSendDone(send));
EXPECT_FALSE(ConditionalSimplifier().Run(m.get()).value());
}
TEST_F(ConditionalSimplifierTest, NotRemovedIfContainsRecv) {
auto m = CreateNewVerifiedModule();
HloComputation* computation = MakeConditional(m.get());
auto* conditional = computation->root_instruction();
ASSERT_EQ(conditional->opcode(), HloOpcode::kConditional);
auto* true_computation = conditional->true_computation();
auto* token = true_computation->AddInstruction(HloInstruction::CreateToken());
auto* recv = true_computation->AddInstruction(HloInstruction::CreateRecv(
ShapeUtil::MakeShape(F32, {1}), token, 0));
true_computation->AddInstruction(HloInstruction::CreateRecvDone(recv));
EXPECT_FALSE(ConditionalSimplifier().Run(m.get()).value());
}
TEST_F(ConditionalSimplifierTest, NotRemovedIfContainsNonRemovableInstruction) {
auto m = CreateNewVerifiedModule();
HloComputation* computation = MakeConditional(m.get());
auto* conditional = computation->root_instruction();
ASSERT_EQ(conditional->opcode(), HloOpcode::kConditional);
auto* false_computation = conditional->false_computation();
auto token = false_computation->AddInstruction(HloInstruction::CreateToken());
false_computation->AddInstruction(HloInstruction::CreateInfeed(
ShapeUtil::MakeShape(F32, {1}), token, "config"));
EXPECT_FALSE(ConditionalSimplifier().Run(m.get()).value());
}
TEST_F(ConditionalSimplifierTest, TrivalOperandsRemoved) {
absl::string_view hlo_string =
R"(
HloModule UnusedTupleOperands
on_false {
t = (f32[20,40], f32[40,40], f32[20,40], f32[40,40]) parameter(0)
lhs = f32[20,40] get-tuple-element(t), index=0
rhs = f32[40,40] get-tuple-element(t), index=1
dot = f32[20,40] dot(lhs, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT result = (f32[20,40]) tuple(dot)
}
on_true {
t = (f32[20,40], f32[40,40], f32[20,40], f32[40,40]) parameter(0)
lhs = f32[20,40] get-tuple-element(t), index=2
rhs = f32[40,40] get-tuple-element(t), index=3
dot = f32[20,40] dot(lhs, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT result = (f32[20,40]) tuple(dot)
}
ENTRY main {
c0_0 = f32[20,40] parameter(0)
c0_1 = f32[40,40] parameter(1)
c1_0 = f32[20,40] parameter(2)
c1_1 = f32[40,40] parameter(3)
p = pred[] parameter(4)
t = (f32[20,40], f32[40,40], f32[20,40], f32[40,40]) tuple(c0_0, c0_1, c1_0, c1_1)
call = (f32[20,40]) call(t), to_apply=on_true
ROOT result = (f32[20,40]) conditional(p,t,t), false_computation=on_false, true_computation=on_true
}
)";
auto status = ParseAndReturnVerifiedModule(hlo_string);
TF_ASSERT_OK(status.status());
std::unique_ptr<HloModule> module = std::move(status).value();
HloVerifier v(false, false);
TF_ASSERT_OK(v.Run(module.get()).status());
EXPECT_TRUE(ConditionalSimplifier().Run(module.get()).value());
TF_ASSERT_OK(v.Run(module.get()).status());
HloInstruction* conditional = module->entry_computation()->root_instruction();
EXPECT_TRUE(conditional != nullptr);
EXPECT_EQ(conditional->operand(1)->shape().tuple_shapes().size(), 2);
EXPECT_EQ(conditional->operand(2)->shape().tuple_shapes().size(), 2);
HloInstruction* call = FindInstruction(module.get(), "call");
EXPECT_EQ(
call->to_apply()->parameter_instruction(0)->shape().tuple_shapes().size(),
4);
}
TEST_F(ConditionalSimplifierTest,
TwoConditionalsCreatedInReversedLexicalOrder) {
absl::string_view hlo_string = R"(
HloModule DeadConditional
computation.1 {
param.1 = s64[] parameter(0)
constant.1 = s64[] constant(1)
ROOT add.1 = s64[] add(param.1, constant.1)
}
computation.2 {
param.2 = s64[] parameter(0)
constant.2 = s64[] constant(2)
ROOT add.2 = s64[] add(param.2, constant.2)
}
computation.3 {
param.3 = s64[] parameter(0)
constant.3 = s64[] constant(3)
ROOT add.3 = s64[] add(param.3, constant.3)
}
computation.4 {
param.4 = s64[] parameter(0)
constant.4 = s64[] constant(4)
ROOT add.4 = s64[] add(param.4, constant.4)
}
ENTRY KernelEntry {
param.1 = s64[] parameter(0)
param.2 = s64[] parameter(1)
param.3 = s64[] parameter(2)
param.4 = pred[] parameter(3)
conditional_1 = s64[] conditional(param.4, param.3, param.2),
true_computation=computation.3, false_computation=computation.4
constant.1 = pred[] constant(false)
ROOT conditional_2 = s64[] conditional(constant.1, conditional_1,
param.1), true_computation=computation.1,
false_computation=computation.2
})";
auto status = ParseAndReturnVerifiedModule(hlo_string);
TF_ASSERT_OK(status.status());
std::unique_ptr<HloModule> module = std::move(status).value();
HloVerifier v(false, false);
TF_ASSERT_OK(v.Run(module.get()).status());
HloInstruction* conditional_1 =
FindInstruction(module.get(), "conditional_1");
HloInstruction* conditional_1_clone =
conditional_1->parent()->AddInstruction(conditional_1->Clone());
TF_ASSERT_OK(conditional_1->ReplaceAllUsesWith(conditional_1_clone));
TF_ASSERT_OK(conditional_1->parent()->RemoveInstruction(conditional_1));
EXPECT_TRUE(ConditionalSimplifier().Run(module.get()).value());
}
TEST_F(ConditionalSimplifierTest, RemoveDeadRoots) {
absl::string_view hlo_string =
R"(
HloModule RemoveDeadRoots
on_false {
t = (f32[20,40], f32[40,40]) parameter(0)
lhs = f32[20,40] get-tuple-element(t), index=0
rhs = f32[40,40] get-tuple-element(t), index=1
dot = f32[20,40] dot(lhs, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
after-all = token[] after-all()
outfeed = token[] outfeed(dot, after-all)
ROOT result = (f32[20,40]) tuple(dot)
}
on_true {
t = (f32[20,40], f32[40,40]) parameter(0)
lhs = f32[20,40] get-tuple-element(t), index=0
add = f32[20,40] add(lhs, lhs)
ROOT result = (f32[20,40]) tuple(add)
}
ENTRY main {
c0_0 = f32[20,40] parameter(0)
c0_1 = f32[40,40] parameter(1)
p = pred[] parameter(2)
t = (f32[20,40], f32[40,40]) tuple(c0_0, c0_1)
conditional = (f32[20, 40]) conditional(p,t,t), false_computation=on_false, true_computation=on_true
ROOT result = () tuple()
}
)";
auto status = ParseAndReturnVerifiedModule(hlo_string);
TF_ASSERT_OK(status.status());
HloVerifier v(false, false);
TF_ASSERT_OK(v.Run(status.value().get()).status());
EXPECT_TRUE(ConditionalSimplifier().Run(status.value().get()).value());
TF_ASSERT_OK(v.Run(status.value().get()).status());
HloInstruction* conditional =
FindInstruction(status.value().get(), "conditional");
EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 0);
}
TEST_F(ConditionalSimplifierTest, SecondTupleElementUnusedAndRemoved) {
absl::string_view hlo_string =
R"(
HloModule SecondTupleElementUnusedAndRemoved
on_true {
arg_tuple.7 = (f32[10,10]{1,0}) parameter(0)
get-tuple-element.9 = f32[10,10]{1,0} get-tuple-element(arg_tuple.7), index=0
copy = f32[10,10]{1,0} copy(get-tuple-element.9)
ROOT tuple.6 = (f32[10,10]{1,0}, f32[10,10]{1,0}) tuple(copy, get-tuple-element.9)
}
on_false {
constant.17 = f32[] constant(0)
constant.18 = f32[] constant(1)
rng.19 = f32[10,10]{1,0} rng(constant.17, constant.18), distribution=rng_uniform
arg_tuple.14 = (f32[10,10]{1,0}) parameter(0)
get-tuple-element.16 = f32[10,10]{1,0} get-tuple-element(arg_tuple.14), index=0
ROOT tuple.7 = (f32[10,10]{1,0}, f32[10,10]{1,0}) tuple(rng.19, get-tuple-element.16)
}
ENTRY main {
constant.38 = pred[] constant(true)
arg_tuple.30 = (s32[], f32[10,10]{1,0}) parameter(0)
get-tuple-element.21 = f32[10,10]{1,0} get-tuple-element(arg_tuple.30), index=1
tuple.1 = (f32[10,10]{1,0}) tuple(get-tuple-element.21)
conditional = (f32[10,10]{1,0}, f32[10,10]{1,0}) conditional(constant.38, tuple.1, tuple.1), true_computation=on_true, false_computation=on_false
get-first-index = f32[10,10]{1,0} get-tuple-element(conditional), index=0
ROOT result = (f32[10,10]{1,0}) tuple(get-first-index)
}
)";
auto status = ParseAndReturnVerifiedModule(hlo_string);
TF_ASSERT_OK(status.status());
HloVerifier v(false, false);
TF_ASSERT_OK(v.Run(status.value().get()).status());
EXPECT_TRUE(ConditionalSimplifier().Run(status.value().get()).value());
TF_ASSERT_OK(v.Run(status.value().get()).status());
const HloInstruction* conditional =
FindInstruction(status.value().get(), "conditional");
EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 1);
}
TEST_F(ConditionalSimplifierTest, FirstTupleElementUnusedAndRemoved) {
absl::string_view hlo_string =
R"(
HloModule FirstTupleElementUnusedAndRemoved
on_true {
arg_tuple.7 = (f32[10,10]{1,0}) parameter(0)
get-tuple-element.9 = f32[10,10]{1,0} get-tuple-element(arg_tuple.7), index=0
copy = f32[10,10]{1,0} copy(get-tuple-element.9)
ROOT tuple.6 = (f32[10,10]{1,0}, f32[10,10]{1,0}) tuple(copy, get-tuple-element.9)
}
on_false {
constant.17 = f32[] constant(0)
constant.18 = f32[] constant(1)
rng.19 = f32[10,10]{1,0} rng(constant.17, constant.18), distribution=rng_uniform
arg_tuple.14 = (f32[10,10]{1,0}) parameter(0)
get-tuple-element.16 = f32[10,10]{1,0} get-tuple-element(arg_tuple.14), index=0
ROOT tuple.7 = (f32[10,10]{1,0}, f32[10,10]{1,0}) tuple(rng.19, get-tuple-element.16)
}
ENTRY main {
constant.38 = pred[] constant(true)
arg_tuple.30 = (s32[], f32[10,10]{1,0}) parameter(0)
get-tuple-element.21 = f32[10,10]{1,0} get-tuple-element(arg_tuple.30), index=1
tuple.1 = (f32[10,10]{1,0}) tuple(get-tuple-element.21)
conditional = (f32[10,10]{1,0}, f32[10,10]{1,0}) conditional(constant.38, tuple.1, tuple.1), true_computation=on_true, false_computation=on_false
get-second-index = f32[10,10]{1,0} get-tuple-element(conditional), index=1
ROOT result = (f32[10,10]{1,0}) tuple(get-second-index)
}
)";
auto status = ParseAndReturnVerifiedModule(hlo_string);
TF_ASSERT_OK(status.status());
HloVerifier v(false, false);
TF_ASSERT_OK(v.Run(status.value().get()).status());
EXPECT_TRUE(ConditionalSimplifier().Run(status.value().get()).value());
TF_ASSERT_OK(v.Run(status.value().get()).status());
const HloInstruction* conditional =
FindInstruction(status.value().get(), "conditional");
EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 1);
}
TEST_F(ConditionalSimplifierTest, MergeDuplicateTupleElements) {
absl::string_view hlo_string =
R"(
HloModule MergeDuplicateTupleElements
on_true {
param-true = (f32[]) parameter(0)
gte-true = f32[] get-tuple-element(param-true), index=0
ROOT tuple-true = (f32[], f32[]) tuple(gte-true, gte-true)
}
on_false {
param-false = (f32[]) parameter(0)
constant.0 = f32[] constant(0)
constant.1 = f32[] constant(1)
rng = f32[] rng(constant.0, constant.1), distribution=rng_uniform
ROOT tuple-false = (f32[], f32[]) tuple(rng, rng)
}
ENTRY main {
comp = pred[] parameter(0)
arg = (f32[]) parameter(1)
conditional = (f32[], f32[]) conditional(comp, arg, arg), true_computation=on_true, false_computation=on_false
gte.0 = f32[] get-tuple-element(conditional), index=0
gte.1 = f32[] get-tuple-element(conditional), index=1
ROOT add = f32[] add(gte.0, gte.1)
}
)";
auto status = ParseAndReturnVerifiedModule(hlo_string);
TF_ASSERT_OK(status.status());
HloVerifier v(false, false);
TF_ASSERT_OK(v.Run(status.value().get()).status());
EXPECT_TRUE(ConditionalSimplifier().Run(status.value().get()).value());
TF_ASSERT_OK(v.Run(status.value().get()).status());
const HloInstruction* conditional =
FindInstruction(status.value().get(), "conditional");
EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 1);
const HloInstruction* gte_0 = FindInstruction(status.value().get(), "gte.0");
const HloInstruction* gte_1 = FindInstruction(status.value().get(), "gte.1");
EXPECT_EQ(gte_0->tuple_index(), 0);
EXPECT_EQ(gte_1->tuple_index(), 0);
}
TEST_F(ConditionalSimplifierTest, SimplifyConditionalWithTokens) {
absl::string_view hlo_string =
R"(
HloModule SimplifyConditionalWithTokens
true_comp {
ROOT parameter.13 = (token[]) parameter(0)
}
false_comp {
ROOT parameter.21 = (token[]) parameter(0)
}
ENTRY entry {
parameter.29 = pred[] parameter(0)
token.1 = token[] after-all()
token.2 = token[] after-all()
tuple.3 = (token[]) tuple(token.1)
tuple.4 = (token[]) tuple(token.2)
ROOT conditional.5 = (token[]) conditional(parameter.29, tuple.3, tuple.4), true_computation=true_comp, false_computation=false_comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloVerifier v(false, false);
TF_ASSERT_OK(v.Run(module.get()).status());
EXPECT_TRUE(ConditionalSimplifier().Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AfterAll(
op::GetTupleElement(op::Tuple(op::AfterAll()), 0),
op::GetTupleElement(op::Tuple(op::AfterAll()), 0))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_simplifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_simplifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0a481887-ac82-46c5-9d5c-6fcea92ba784 | cpp | tensorflow/tensorflow | collectives_schedule_linearizer | third_party/xla/xla/service/collectives_schedule_linearizer.cc | third_party/xla/xla/service/collectives_schedule_linearizer_test.cc | #include "xla/service/collectives_schedule_linearizer.h"
#include <algorithm>
#include <list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_reachability.h"
#include "tsl/platform/errors.h"
namespace xla {
absl::StatusOr<bool> CollectivesScheduleLinearizer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (is_enabled_ && !is_enabled_(module)) {
return false;
}
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
std::unique_ptr<HloReachabilityMap> reachability;
HloInstruction* prev_done = nullptr;
for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {
auto* next = DynCast<HloCollectiveInstruction>(inst);
if (!next) {
continue;
}
if (!reachability) {
reachability = HloReachabilityMap::Build(computation);
}
HloInstruction* start = next;
HloInstruction* done = next;
switch (next->opcode()) {
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllGatherStart:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kAsyncStart:
CHECK_EQ(start->user_count(), 1);
done = start->users()[0];
break;
default:
break;
}
if (prev_done && !reachability->IsConnected(start, prev_done)) {
TF_RETURN_IF_ERROR(prev_done->AddControlDependencyTo(next));
VLOG(1) << "Adding control dependency from " << prev_done->ToString()
<< " to " << start->ToString();
changed = true;
}
prev_done = done;
}
}
return changed;
}
} | #include "xla/service/collectives_schedule_linearizer.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/pattern_matcher.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
namespace m = match;
int64_t CountControlEdges(const HloComputation& computation) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
count += instruction->control_successors().size();
}
return count;
}
class CollectivesScheduleLinearizerTest : public HloTestBase {
protected:
void InsertCollectivesSchedule(HloModule* module) {
CollectivesScheduleLinearizer collectives_schedule_linearizer;
ASSERT_IS_OK(collectives_schedule_linearizer.Run(module).status());
}
};
TEST_F(CollectivesScheduleLinearizerTest, FixOrdering) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT out = f32[] add(a, b)
}
ENTRY entry {
p0 = f32[100] parameter(0), parameter_replication={false}
p1 = f32[100] parameter(1), parameter_replication={false}
c1 = f32[100] all-reduce(p0), replica_groups={}, to_apply=sum
c2 = f32[100] all-reduce(p1), replica_groups={}, to_apply=sum
ROOT out = f32[100] add(c1, c2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCollectivesSchedule(module.get());
EXPECT_EQ(CountControlEdges(*module->entry_computation()), 1);
HloInstruction *c1 = nullptr, *c2 = nullptr;
for (HloInstruction* instr : module->entry_computation()->instructions()) {
if (Match(instr, m::AllReduce(m::Parameter(0)))) {
c1 = instr;
}
if (Match(instr, m::AllReduce(m::Parameter(1)))) {
c2 = instr;
}
}
EXPECT_TRUE(c1 != nullptr && c2 != nullptr);
EXPECT_TRUE(absl::c_linear_search(c2->control_predecessors(), c1));
}
TEST_F(CollectivesScheduleLinearizerTest, NoFixRequired) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT out = f32[] add(a, b)
}
ENTRY entry {
p0 = f32[100] parameter(0), parameter_replication={false}
p1 = f32[100] parameter(1), parameter_replication={false}
c1 = f32[100] all-reduce(p0), replica_groups={}, to_apply=sum
c2 = f32[100] all-reduce(p1), replica_groups={}, to_apply=sum, control-predecessors={c1}
ROOT out = f32[100] add(c1, c2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCollectivesSchedule(module.get());
EXPECT_EQ(CountControlEdges(*module->entry_computation()), 1);
}
TEST_F(CollectivesScheduleLinearizerTest, DependentCollectives) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT out = f32[] add(a, b)
}
ENTRY entry {
p0 = f32[100] parameter(0), parameter_replication={false}
p1 = f32[100] parameter(1), parameter_replication={false}
c1 = f32[100] all-reduce(p0), replica_groups={}, to_apply=sum
c2 = f32[100] all-reduce(c1), replica_groups={}, to_apply=sum
ROOT out = f32[100] add(c1, c2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCollectivesSchedule(module.get());
EXPECT_EQ(CountControlEdges(*module->entry_computation()), 0);
}
TEST_F(CollectivesScheduleLinearizerTest, NonPostorder) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT out = f32[] add(a, b)
}
ENTRY entry {
p0 = f32[100] parameter(0), parameter_replication={false}
p1 = f32[100] parameter(1), parameter_replication={false}
c1 = f32[100] all-reduce(p0), replica_groups={}, to_apply=sum
c2 = f32[100] all-reduce(p1), replica_groups={}, to_apply=sum
c3 = f32[100] all-reduce(p1), replica_groups={}, to_apply=sum
t = f32[100] add(c1, c2)
ROOT out = f32[100] add(t, c3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
ASSERT_IS_OK(
module->entry_computation()
->GetInstructionWithName("c3")
->AddControlDependencyTo(
module->entry_computation()->GetInstructionWithName("c1")));
InsertCollectivesSchedule(module.get());
EXPECT_EQ(CountControlEdges(*module->entry_computation()), 2);
}
TEST_F(CollectivesScheduleLinearizerTest, AsyncOrdering) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT out = f32[] add(a, b)
}
ENTRY entry {
p0 = f32[100] parameter(0), parameter_replication={false}
p1 = f32[100] parameter(1), parameter_replication={false}
ars0 = f32[100] all-reduce-start(p0), replica_groups={}, to_apply=sum
ard0 = f32[100] all-reduce-done(ars0)
ars1 = f32[100] all-reduce-start(p1), replica_groups={}, to_apply=sum
ard1 = f32[100] all-reduce-done(ars1)
ROOT out = f32[100] add(ard0, ard1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCollectivesSchedule(module.get());
EXPECT_EQ(CountControlEdges(*module->entry_computation()), 1);
const HloInstruction *root = module->entry_computation()->root_instruction();
const HloInstruction *ard0 = root->operand(0);
const HloInstruction *ard1 = root->operand(1);
EXPECT_EQ(ard0->opcode(), HloOpcode::kAllReduceDone);
EXPECT_EQ(ard1->opcode(), HloOpcode::kAllReduceDone);
const HloInstruction *ars1 = ard1->operand(0);
EXPECT_EQ(ars1->opcode(), HloOpcode::kAllReduceStart);
EXPECT_TRUE(absl::c_linear_search(ars1->control_predecessors(), ard0));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collectives_schedule_linearizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collectives_schedule_linearizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7cf7d152-112a-442f-9cb2-370feb836587 | cpp | tensorflow/tensorflow | gather_expander | third_party/xla/xla/service/gather_expander.cc | third_party/xla/xla/service/gather_expander_test.cc | #include "xla/service/gather_expander.h"
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/while_util.h"
#include "xla/util.h"
namespace xla {
namespace {
absl::StatusOr<HloInstruction*> TransposeIndexVectorDimToLast(
HloInstruction* start_indices, int64_t index_vector_dim) {
const Shape& start_indices_shape = start_indices->shape();
if (start_indices_shape.dimensions_size() == index_vector_dim) {
return start_indices;
}
if (index_vector_dim == (start_indices_shape.dimensions_size() - 1)) {
return start_indices;
}
std::vector<int64_t> permutation;
permutation.reserve(start_indices_shape.dimensions_size());
for (int64_t i = 0, e = start_indices_shape.dimensions_size(); i < e; i++) {
if (i != index_vector_dim) {
permutation.push_back(i);
}
}
permutation.push_back(index_vector_dim);
return MakeTransposeHlo(start_indices, permutation);
}
absl::StatusOr<HloInstruction*> CanonicalizeGatherIndices(
HloInstruction* start_indices, int64_t index_vector_dim) {
TF_ASSIGN_OR_RETURN(
HloInstruction * transposed_start_indices,
TransposeIndexVectorDimToLast(start_indices, index_vector_dim));
bool indices_are_scalar =
index_vector_dim == start_indices->shape().dimensions_size();
const int64_t index_dims_in_start_indices = indices_are_scalar ? 0 : 1;
const Shape& shape = transposed_start_indices->shape();
if (shape.dimensions_size() == index_dims_in_start_indices) {
return PrependDegenerateDims(transposed_start_indices, 1);
} else {
return CollapseFirstNDims(
transposed_start_indices,
shape.dimensions_size() - index_dims_in_start_indices);
}
}
absl::StatusOr<HloInstruction*> AdjustBatchDimsInAccumulator(
const Shape& start_indices_shape, HloInstruction* accumulator,
int64_t index_vector_dim) {
std::vector<int64_t> batch_dim_bounds;
batch_dim_bounds.reserve(start_indices_shape.dimensions_size());
for (int64_t i = 0, e = start_indices_shape.dimensions_size(); i < e; i++) {
if (i != index_vector_dim) {
batch_dim_bounds.push_back(start_indices_shape.dimensions(i));
}
}
if (batch_dim_bounds.empty()) {
return ElideDegenerateDims(accumulator, {0});
}
return ExpandFirstDimIntoNDims(accumulator, batch_dim_bounds);
}
absl::StatusOr<HloInstruction*> ExpandIndexVectorIntoOperandSpace(
HloInstruction* index_vector, const GatherDimensionNumbers& dim_numbers,
int64_t operand_rank) {
HloComputation* computation = index_vector->parent();
const Shape& index_shape = index_vector->shape();
if (operand_rank == 0) {
return computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateFromDimensions(index_shape.element_type(), {0})));
}
HloInstruction* zero =
computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateFromDimensions(index_shape.element_type(), {1})));
std::vector<HloInstruction*> expanded_index_components;
for (int i = 0; i < operand_rank; i++) {
int64_t index_vector_dim_index =
FindIndex(dim_numbers.start_index_map(), i);
if (index_vector_dim_index != dim_numbers.start_index_map_size()) {
TF_ASSIGN_OR_RETURN(
HloInstruction * component_to_concat,
MakeSliceHlo(index_vector, {index_vector_dim_index},
{index_vector_dim_index + 1},
{1}));
expanded_index_components.push_back(component_to_concat);
} else {
expanded_index_components.push_back(zero);
}
}
return MakeConcatHlo(expanded_index_components, 0);
}
absl::StatusOr<std::vector<HloInstruction*>> GatherLoopBody(
const HloInstruction& gather, HloInstruction* induction_var,
const std::vector<HloInstruction*>& incoming_loop_state) {
const GatherDimensionNumbers& dim_numbers = gather.gather_dimension_numbers();
CHECK_EQ(incoming_loop_state.size(), 3);
HloInstruction* const operand = incoming_loop_state[0];
HloInstruction* const start_indices = incoming_loop_state[1];
HloInstruction* const output_accumulator = incoming_loop_state[2];
bool has_scalar_indices = start_indices->shape().dimensions_size() == 1;
CHECK_EQ(has_scalar_indices,
dim_numbers.index_vector_dim() ==
gather.operand(1)->shape().dimensions_size());
HloInstruction* induction_var_as_vector =
MakeBroadcastHlo(induction_var, {},
{1});
HloInstruction* index_vector;
if (has_scalar_indices) {
TF_ASSIGN_OR_RETURN(
index_vector,
MakeDynamicSliceHlo(start_indices, induction_var_as_vector, {1}));
} else {
TF_ASSIGN_OR_RETURN(
HloInstruction * index_into_start_indices,
PadVectorWithZeros(induction_var_as_vector,
0, 1));
int64_t index_vector_size = start_indices->shape().dimensions(1);
TF_ASSIGN_OR_RETURN(
HloInstruction * index_vector_2d,
MakeDynamicSliceHlo(start_indices, index_into_start_indices,
{1, index_vector_size}));
TF_ASSIGN_OR_RETURN(index_vector,
ElideDegenerateDims(index_vector_2d, {0}));
}
TF_ASSIGN_OR_RETURN(
HloInstruction * gathered_slice_start,
ExpandIndexVectorIntoOperandSpace(index_vector, dim_numbers,
operand->shape().dimensions_size()));
TF_ASSIGN_OR_RETURN(HloInstruction * gathered_slice,
MakeDynamicSliceHlo(operand, gathered_slice_start,
gather.gather_slice_sizes()));
TF_ASSIGN_OR_RETURN(
HloInstruction* const gathered_slice_with_dims_collapsed,
ElideDegenerateDims(gathered_slice, dim_numbers.collapsed_slice_dims()));
TF_ASSIGN_OR_RETURN(
HloInstruction* const gathered_slice_for_update,
PrependDegenerateDims(gathered_slice_with_dims_collapsed, 1));
TF_ASSIGN_OR_RETURN(
HloInstruction* const index_vector_into_accumulator,
PadVectorWithZeros(
induction_var_as_vector, 0,
gathered_slice_with_dims_collapsed->shape().dimensions_size()));
TF_ASSIGN_OR_RETURN(
HloInstruction* const updated_accumulator,
MakeDynamicUpdateSliceHlo(output_accumulator, gathered_slice_for_update,
index_vector_into_accumulator));
return absl::StatusOr<std::vector<HloInstruction*>>{
{operand, start_indices, updated_accumulator}};
}
HloInstruction* CreateGatherLoopAccumulatorInitValue(
HloComputation* computation, PrimitiveType element_type,
absl::Span<const int64_t> slice_sizes, int64_t gather_loop_trip_count,
const GatherDimensionNumbers& dim_numbers) {
std::vector<int64_t> accumulator_state_shape_dims;
accumulator_state_shape_dims.reserve(1 + slice_sizes.size());
accumulator_state_shape_dims.push_back(gather_loop_trip_count);
for (int64_t i = 0; i < slice_sizes.size(); i++) {
if (!absl::c_binary_search(dim_numbers.collapsed_slice_dims(), i)) {
accumulator_state_shape_dims.push_back(slice_sizes[i]);
}
}
return BroadcastZeros(computation, element_type,
accumulator_state_shape_dims);
}
absl::StatusOr<HloInstruction*> PermuteBatchAndOffsetDims(
HloInstruction* accumulator, absl::Span<const int64_t> offset_dims,
int64_t output_rank) {
std::vector<int64_t> permutation;
permutation.reserve(output_rank);
int64_t batch_idx_counter = 0;
int64_t offset_idx_counter = output_rank - offset_dims.size();
for (int64_t i = 0; i < output_rank; i++) {
bool is_offset_dim = absl::c_binary_search(offset_dims, i);
if (is_offset_dim) {
permutation.push_back(offset_idx_counter++);
} else {
permutation.push_back(batch_idx_counter++);
}
}
return MakeTransposeHlo(accumulator, permutation);
}
int64_t GatherLoopTripCount(HloInstruction* gather_instr) {
HloInstruction* start_indices = gather_instr->mutable_operand(1);
const Shape& start_indices_shape = start_indices->shape();
const GatherDimensionNumbers& dim_numbers =
gather_instr->gather_dimension_numbers();
int64_t trip_count = 1;
for (int64_t i = 0, e = start_indices_shape.dimensions_size(); i < e; i++) {
if (i != dim_numbers.index_vector_dim()) {
trip_count *= start_indices_shape.dimensions(i);
}
}
return trip_count;
}
int64_t GatherIsBroadcast(HloInstruction* gather_instr) {
return absl::c_equal(gather_instr->gather_slice_sizes(),
gather_instr->operand(0)->shape().dimensions());
}
}
absl::StatusOr<HloInstruction*> GatherExpander::ExpandInstruction(
HloInstruction* gather_instr) {
CHECK(!ShapeUtil::IsZeroElementArray(gather_instr->shape()));
if (GatherIsBroadcast(gather_instr)) {
if (ShapeUtil::IsZeroElementArray(gather_instr->operand(0)->shape())) {
return MakeScalarLike(gather_instr, 0);
}
Shape broadcast_operand_shape = ShapeUtil::DeleteDimensions(
gather_instr->gather_dimension_numbers().collapsed_slice_dims(),
gather_instr->operand(0)->shape());
TF_ASSIGN_OR_RETURN(HloInstruction * broadcast_operand,
MakeReshapeHlo(broadcast_operand_shape,
gather_instr->mutable_operand(0)));
gather_instr->SetupDerivedInstruction(broadcast_operand);
HloInstruction* broadcast =
MakeBroadcastHlo(broadcast_operand,
gather_instr->gather_dimension_numbers().offset_dims(),
gather_instr->shape());
gather_instr->SetupDerivedInstruction(broadcast);
return broadcast;
}
HloComputation* computation = gather_instr->parent();
HloInstruction* operand = gather_instr->mutable_operand(0);
HloInstruction* start_indices = gather_instr->mutable_operand(1);
const Shape& output_shape = gather_instr->shape();
int64_t output_rank = output_shape.dimensions_size();
const GatherDimensionNumbers& dim_numbers =
gather_instr->gather_dimension_numbers();
int64_t gather_loop_trip_count = GatherLoopTripCount(gather_instr);
if (!IsInt32(gather_loop_trip_count)) {
return Unimplemented(
"Gather operations with more than 2147483647 gather indices are not "
"supported. This error occurred for %s.",
gather_instr->ToString());
}
TF_ASSIGN_OR_RETURN(
HloInstruction * canonical_start_indices,
CanonicalizeGatherIndices(start_indices, dim_numbers.index_vector_dim()));
CHECK_EQ(gather_loop_trip_count,
canonical_start_indices->shape().dimensions(0));
HloInstruction* accumulator_init = CreateGatherLoopAccumulatorInitValue(
computation, output_shape.element_type(),
gather_instr->gather_slice_sizes(), gather_loop_trip_count,
gather_instr->gather_dimension_numbers());
absl::StatusOr<std::vector<HloInstruction*>> gather_loop_result_or_error =
WhileUtil::MakeCountedLoop(
computation, gather_loop_trip_count,
{operand, canonical_start_indices, accumulator_init},
[&](HloInstruction* indvar,
const std::vector<HloInstruction*>& loop_state) {
return GatherLoopBody(*gather_instr, indvar, loop_state);
},
gather_instr->metadata());
TF_ASSIGN_OR_RETURN(std::vector<HloInstruction*> gather_loop_result,
gather_loop_result_or_error);
HloInstruction* accumulator_result = gather_loop_result.back();
TF_ASSIGN_OR_RETURN(
HloInstruction* const accumulator_with_batch_dims_decanonicalized,
AdjustBatchDimsInAccumulator(start_indices->shape(), accumulator_result,
dim_numbers.index_vector_dim()));
return PermuteBatchAndOffsetDims(accumulator_with_batch_dims_decanonicalized,
dim_numbers.offset_dims(), output_rank);
}
bool GatherExpander::InstructionMatchesPattern(HloInstruction* inst) {
return inst->opcode() == HloOpcode::kGather &&
!ShapeUtil::IsZeroElementArray(inst->shape()) &&
(mode_ == kEliminateAllGathers || GatherLoopTripCount(inst) == 1 ||
absl::c_equal(inst->gather_slice_sizes(),
inst->operand(0)->shape().dimensions()));
}
} | #include "xla/service/gather_expander.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
namespace xla {
namespace {
using GatherExpanderTest = HloTestBase;
TEST_F(GatherExpanderTest, ErrorStatusOnTooManyIndices) {
const std::string hlo_text = R"(
HloModule TensorFlowGatherMultipleBatchDims
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2147483647,5] parameter(1)
ROOT gather = s32[2147483647,3,5] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=2,
slice_sizes={3, 1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
absl::Status status = GatherExpander{GatherExpander::kEliminateAllGathers}
.Run(module.get())
.status();
EXPECT_EQ(status.code(), tsl::error::UNIMPLEMENTED);
ASSERT_THAT(
status.message(),
::testing::HasSubstr("Gather operations with more than 2147483647 gather "
"indices are not supported."));
}
TEST_F(GatherExpanderTest, AvoidDegenerateDims) {
const std::string hlo_text = R"(
HloModule TensorFlowGatherV2
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
ROOT gather = s32[3,2] gather(operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3, 1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
GatherExpander{GatherExpander::kEliminateAllGathers}.Run(module.get()));
ASSERT_TRUE(changed);
HloInstruction* while_instr = nullptr;
for (auto* instr : module->entry_computation()->instructions()) {
if (instr->opcode() == HloOpcode::kWhile) {
ASSERT_EQ(while_instr, nullptr)
<< "Expected exactly one while instruction in the entry computation "
"after gather expansion";
while_instr = instr;
}
}
ASSERT_NE(while_instr, nullptr)
<< "Expected exactly one while instruction in the entry computation "
"after gather expansion";
const Shape& while_shape = while_instr->shape();
ASSERT_TRUE(while_shape.IsTuple());
ASSERT_EQ(ShapeUtil::TupleElementCount(while_shape), 4);
EXPECT_TRUE(ShapeUtil::SameDimensions(
ShapeUtil::MakeShape(S32, {3, 3}),
ShapeUtil::GetTupleElementShape(while_shape, 1)));
EXPECT_TRUE(ShapeUtil::SameDimensions(
ShapeUtil::MakeShape(S32, {2}),
ShapeUtil::GetTupleElementShape(while_shape, 2)));
EXPECT_TRUE(ShapeUtil::SameDimensions(
ShapeUtil::MakeShape(S32, {2, 3}),
ShapeUtil::GetTupleElementShape(while_shape, 3)));
}
TEST_F(GatherExpanderTest, CheckOpMetadata) {
const std::string hlo_text = R"(
HloModule TensorFlowGatherV2
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
ROOT gather = s32[3,2] gather(operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3, 1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
OpMetadata metadata;
metadata.set_op_name("Gather");
module->entry_computation()->root_instruction()->set_metadata(metadata);
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
GatherExpander{GatherExpander::kEliminateAllGathers}.Run(module.get()));
ASSERT_TRUE(changed);
HloInstruction* while_instr = nullptr;
for (auto* instr : module->entry_computation()->instructions()) {
if (instr->opcode() == HloOpcode::kWhile) {
ASSERT_EQ(while_instr, nullptr)
<< "Expected exactly one while instruction in the entry computation "
"after gather expansion";
while_instr = instr;
}
}
ASSERT_NE(while_instr, nullptr)
<< "Expected exactly one while instruction in the entry computation "
"after gather expansion";
EXPECT_EQ(while_instr->metadata().op_name(), "Gather");
}
TEST_F(GatherExpanderTest, EliminateSimpleGathersSkipsNontrivialGather) {
const std::string hlo_text = R"(
HloModule TensorFlowGatherV1
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
ROOT gather = s32[2,3] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1, 3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GatherExpander pass(GatherExpander::kEliminateSimpleGathers);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
ASSERT_FALSE(changed);
}
TEST_F(GatherExpanderTest, EliminateSimpleGathersRewritesTrivialGather) {
const std::string hlo_text = R"(
HloModule test
ENTRY main {
operand = s32[100] parameter(0)
indices = s32[1] parameter(1)
ROOT gather = s32[10] gather(operand, indices),
offset_dims={0},
collapsed_slice_dims={},
start_index_map={0},
index_vector_dim=0,
slice_sizes={10}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GatherExpander pass(GatherExpander::kEliminateAllGathers);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
ASSERT_TRUE(changed);
ASSERT_FALSE(hlo_query::ContainsInstrWithOpcode(module->entry_computation(),
{HloOpcode::kGather}));
}
TEST_F(GatherExpanderTest, GatherIsBroadcast) {
const std::string hlo_text = R"(
HloModule test
ENTRY main {
operand = s32[1,3] parameter(0)
indices = s32[7,5] parameter(1)
ROOT gather = s32[7,3,5] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
GatherExpander pass(GatherExpander::kEliminateSimpleGathers);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
ASSERT_TRUE(changed);
ASSERT_FALSE(hlo_query::ContainsInstrWithOpcode(module->entry_computation(),
{HloOpcode::kGather}));
ASSERT_TRUE(hlo_query::ContainsInstrWithOpcode(module->entry_computation(),
{HloOpcode::kBroadcast}));
module->VerifyOrAddFailure("after-gather-expander.");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gather_expander.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gather_expander_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3393ebe0-0d02-4328-8e95-0a40a35cb820 | cpp | tensorflow/tensorflow | tree_reduction_rewriter | third_party/xla/xla/service/gpu/transforms/tree_reduction_rewriter.cc | third_party/xla/xla/service/cpu/tests/tree_reduction_rewriter_test.cc | #include "xla/service/gpu/transforms/tree_reduction_rewriter.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <iterator>
#include <memory>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
absl::InlinedVector<int64_t, 2> GetSortedReducedDims(
HloReduceInstruction *reduce) {
absl::InlinedVector<int64_t, 2> reduced_dims{reduce->dimensions().begin(),
reduce->dimensions().end()};
absl::c_sort(reduced_dims);
return reduced_dims;
}
bool IsMinMaxReduction(HloReduceInstruction *reduce) {
HloComputation *called = &reduce->to_apply()[0];
if (auto reduction_kind = MatchReductionComputation(called)) {
return reduction_kind == ReductionKind::MAX ||
reduction_kind == ReductionKind::MIN;
}
return false;
}
}
class ReductionRewriterVisitor : public DfsHloRewriteVisitor {
public:
explicit ReductionRewriterVisitor(se::GpuComputeCapability gpu_version)
: gpu_version_(gpu_version) {}
absl::Status HandleReduce(HloInstruction *hlo) override {
auto *reduce = Cast<HloReduceInstruction>(hlo);
VLOG(3) << "Reduction instruction: " << reduce->ToString();
const HloModuleConfig &config = reduce->GetModule()->config();
if (!MatchReductionForSplit(reduce, config)) {
return absl::OkStatus();
}
ReductionDimensions reduction_dims =
GetReductionKindAndContiguousComponents(*hlo);
if (ReductionIsRaceFree(config, reduction_dims)) {
VLOG(3) << "Base case: dimensions fit";
return absl::OkStatus();
}
auto sorted_dims_to_reduce = GetSortedReducedDims(reduce);
CHECK_LE(sorted_dims_to_reduce.size(), 2);
if (reduction_dims.is_row_reduction &&
reduction_dims
.dimensions[ReductionDimensions::kRowMajorReducedDimension] >
BatchedReductionRaceFreeBound()) {
VLOG(2) << "Splitting batched dimension reduce into a separate reduction";
return RewriteBatchDimensionLargerThanTile(reduce, reduction_dims,
sorted_dims_to_reduce);
}
SplitParams split_params =
ComputeSplitParams(reduce, reduction_dims, sorted_dims_to_reduce);
return SplitReductionDimension(reduce, split_params, sorted_dims_to_reduce);
}
private:
bool MatchReductionForSplit(HloReduceInstruction *reduce,
const HloModuleConfig &config) {
bool reductions_via_mlir_disabled =
config.debug_options().xla_gpu_mlir_emitter_level() < 4;
if (reductions_via_mlir_disabled && IsMinMaxReduction(reduce)) {
VLOG(1) << "Not performing tree expansion on min/max-reduction: "
<< reduce->ToString()
<< " since min/max operations are associative";
return false;
}
if (!IsReductionFromOrToContiguousDimensions(*reduce)) {
VLOG(3) << "Is not a reduction from or to contiguous dimensions";
return false;
}
VLOG(3) << "Perform rewrite";
return true;
}
bool ShouldSwapInnerAndOuterReducedMinorDimension(uint64_t k1, uint64_t k2,
uint64_t n,
int64_t race_free_bound,
bool is_row_reduction) {
CHECK(k1 >= k2);
if (k1 > race_free_bound) {
return false;
}
if (is_row_reduction) {
bool maybe_vectorized = k2 % 2 == 0 && n % 2 == 0;
if (maybe_vectorized) {
return k2 * 2 < k1 || k1 % 2 == 0;
}
return n % 2 == 0 || k1 % 2 != 0;
}
return true;
}
struct SplitParams {
int64_t k1;
int64_t k2;
int64_t dim;
};
SplitParams ComputeSplitParams(
HloReduceInstruction *reduce, const ReductionDimensions &reduction_dims,
absl::Span<const int64_t> sorted_dims_to_reduce) {
absl::Span<int64_t const> input_shape_dims =
reduce->inputs()[0]->shape().dimensions();
int64_t reduced_dim = sorted_dims_to_reduce.back();
int64_t reduced_dim_size = input_shape_dims[reduced_dim];
VLOG(3) << "reduced dim size = " << reduced_dim_size;
uint64_t k2 =
static_cast<uint64_t>(std::floor(std::sqrt(reduced_dim_size)));
int64_t race_free_bound = ReductionDimensionRaceFreeBound(
reduce->GetModule()->config(), reduction_dims);
if (k2 > race_free_bound) {
k2 = race_free_bound;
}
uint64_t minimum_padding = (k2 - reduced_dim_size % k2) % k2;
uint64_t best_k1 = (reduced_dim_size + minimum_padding) / k2;
for (uint64_t i = k2 - 1; i > k2 / 2; --i) {
uint64_t padding = (i - reduced_dim_size % i) % i;
if (padding < minimum_padding ||
(padding == minimum_padding && absl::has_single_bit(i))) {
minimum_padding = padding;
best_k1 = (reduced_dim_size + padding) / i;
}
}
uint64_t padded_k = reduced_dim_size + minimum_padding;
uint64_t best_k2 = padded_k / best_k1;
if (ShouldSwapInnerAndOuterReducedMinorDimension(
best_k1, best_k2, reduced_dim_size, race_free_bound,
reduction_dims.is_row_reduction)) {
std::swap(best_k1, best_k2);
}
return SplitParams{static_cast<int64_t>(best_k1),
static_cast<int64_t>(best_k2), reduced_dim};
}
absl::Status SplitReductionDimension(
HloReduceInstruction *reduce, const SplitParams &split_params,
absl::Span<const int64_t> sorted_dims_to_reduce) {
absl::Span<int64_t const> reduce_input_dims =
reduce->inputs()[0]->shape().dimensions();
int64_t split_dim_size = reduce_input_dims[split_params.dim];
VLOG(2) << "dimension to split = " << split_params.dim << " with "
<< split_dim_size << " elements into " << split_params.k1 << " by "
<< split_params.k2;
HloInstruction::InstructionVector padded_inputs(reduce->inputs().begin(),
reduce->inputs().end());
auto padded_size = split_params.k1 * split_params.k2;
absl::InlinedVector<int64_t, 3> padded_dimensions(reduce_input_dims.begin(),
reduce_input_dims.end());
if (split_dim_size != padded_size) {
padded_dimensions[split_params.dim] = padded_size;
PaddingConfig padding_config =
MakeNoPaddingConfig(reduce_input_dims.size());
padding_config.mutable_dimensions(split_params.dim)
->set_edge_padding_high(padded_size - split_dim_size);
for (int input_idx = 0; input_idx < padded_inputs.size(); ++input_idx) {
auto &reduction_input = padded_inputs[input_idx];
Shape padded_shape = ShapeUtil::MakeShape(
reduction_input->shape().element_type(), padded_dimensions);
VLOG(2) << "Generated padded shape: " << padded_shape.ToString();
reduction_input = reduce->parent()->AddInstruction(
HloInstruction::CreatePad(padded_shape, reduction_input,
reduce->init_values()[input_idx],
padding_config),
&reduction_input->metadata());
}
}
absl::InlinedVector<int64_t, 3> reshaped_dimensions;
int64_t input_rank = reduce_input_dims.size();
for (int64_t dim_idx = 0; dim_idx < input_rank; dim_idx++) {
if (dim_idx == split_params.dim) {
reshaped_dimensions.push_back(split_params.k1);
reshaped_dimensions.push_back(split_params.k2);
} else {
reshaped_dimensions.push_back(padded_dimensions[dim_idx]);
}
}
absl::InlinedVector<int64_t, 2> inner_reduce_dims(
sorted_dims_to_reduce.begin(), sorted_dims_to_reduce.end());
auto split_dim_it = std::find(inner_reduce_dims.begin(),
inner_reduce_dims.end(), split_params.dim);
*split_dim_it += 1;
absl::InlinedVector<int64_t, 1> outer_reduce_dims{
split_params.dim -
std::distance(inner_reduce_dims.begin(), split_dim_it)};
absl::InlinedVector<int64_t, 3> inner_reduce_shape =
RemoveElements(inner_reduce_dims, reshaped_dimensions);
HloInstruction::InstructionVector reshaped_padded_inputs;
absl::InlinedVector<Shape, 2> inner_reduce_shapes;
for (HloInstruction *padded_input : padded_inputs) {
Shape reshaped_shape = ShapeUtil::MakeShape(
padded_input->shape().element_type(), reshaped_dimensions);
HloInstruction *reshaped_padded_input = reduce->parent()->AddInstruction(
HloInstruction::CreateBitcast(reshaped_shape, padded_input),
&padded_input->metadata());
VLOG(2) << "Generated reshape: " << reshaped_padded_input->ToString();
reshaped_padded_inputs.push_back(reshaped_padded_input);
inner_reduce_shapes.push_back(ShapeUtil::MakeShape(
padded_input->shape().element_type(), inner_reduce_shape));
}
HloInstruction *inner_reduce = reduce->parent()->AddInstruction(
HloInstruction::CreateReduce(
ShapeUtil::MakeMaybeTupleShape(inner_reduce_shapes),
reshaped_padded_inputs, reduce->init_values(), inner_reduce_dims,
reduce->to_apply()),
&reduce->metadata());
VLOG(1) << "Generated inner reduction: " << inner_reduce->ToString();
std::unique_ptr<HloInstruction> outer_reduce = HloInstruction::CreateReduce(
reduce->shape(), inner_reduce, reduce->init_values(), outer_reduce_dims,
reduce->to_apply());
VLOG(1) << "Generated outer reduction: " << outer_reduce->ToString();
return ReplaceWithNewInstruction(reduce, std::move(outer_reduce));
}
absl::Status RewriteBatchDimensionLargerThanTile(
HloReduceInstruction *hlo,
const ReductionDimensions &reduction_dimensions,
absl::Span<const int64_t> sorted_dims_to_reduce) {
CHECK(reduction_dimensions.is_row_reduction);
absl::InlinedVector<Shape, 2> tuple_shapes;
int64_t minor_reduction_dim = sorted_dims_to_reduce.back();
for (HloInstruction *input : hlo->inputs()) {
tuple_shapes.push_back(
ShapeUtil::DeleteDimension(minor_reduction_dim, input->shape()));
}
HloInstruction *inner_reduce =
hlo->parent()->AddInstruction(HloInstruction::CreateReduce(
ShapeUtil::MakeMaybeTupleShape(tuple_shapes), hlo->inputs(),
hlo->init_values(), {minor_reduction_dim}, hlo->to_apply()));
VLOG(1) << "Inner reduction: " << inner_reduce->ToString();
std::unique_ptr<HloInstruction> out = HloInstruction::CreateReduce(
hlo->shape(), inner_reduce, hlo->init_values(), {0}, hlo->to_apply());
VLOG(1) << "Generated: " << out->ToString();
return ReplaceWithNewInstruction(hlo, std::move(out));
}
se::GpuComputeCapability gpu_version_;
};
absl::StatusOr<bool> TreeReductionRewriter::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
VLOG(5) << "Rewriter input: " << module->ToString();
TF_ASSIGN_OR_RETURN(bool changed,
ReductionRewriterVisitor(gpu_version_)
.RunOnModule(module, execution_threads));
VLOG(5) << "Rewriter output: " << module->ToString();
return changed;
}
}
} | #include "xla/service/cpu/tests/cpu_codegen_test.h"
#include "tsl/platform/test.h"
namespace xla {
namespace cpu {
namespace {
class TreeReductionRewriterTest : public CpuCodegenTest {};
TEST_F(TreeReductionRewriterTest, SimpleRewrite) {
const char* hlo_text = R"(
HloModule SimpleReduction
add {
acc = f32[] parameter(1)
op = f32[] parameter(0)
ROOT out = f32[] add(acc, op)
}
ENTRY main {
input = f32[1000] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[] reduce(input, zero), dimensions={0}, to_apply=add
}
)";
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %main (input: f32[1000]) -> f32[] {
; CHECK-NEXT: [[INSTR_0:%[^ ]+]] = f32[1000]{0} parameter(0)
; CHECK-NEXT: [[INSTR_1:%[^ ]+]] = f32[] constant(0)
; CHECK-NEXT: [[INSTR_2:%[^ ]+]] = f32[32]{0} reduce-window([[INSTR_0]], [[INSTR_1]]), window={size=32 stride=32 pad=12_12}, to_apply=[[INSTR_3:%[^ ]+]]
; CHECK-NEXT: ROOT [[INSTR_4:%[^ ]+]] = f32[] reduce([[INSTR_2]], [[INSTR_1]]), dimensions={0}, to_apply=[[INSTR_3]]
)");
}
TEST_F(TreeReductionRewriterTest, RewriteMultipleDimensions) {
const char* hlo_text = R"(
HloModule SimpleReduction
add {
acc = f32[] parameter(1)
op = f32[] parameter(0)
ROOT out = f32[] add(acc, op)
}
ENTRY main {
input = f32[100,100] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[] reduce(input, zero), dimensions={0,1}, to_apply=add
}
)";
MatchOptimizedHlo(hlo_text,
R"(
; CHECK: [[INSTR_0:%[^ ]+]] = f32[4,4]{1,0} reduce-window([[INSTR_1:%[^ ]+]], [[INSTR_2:%[^ ]+]]), window={size=32x32 stride=32x32 pad=14_14x14_14}, to_apply=[[INSTR_3:%[^ ]+]]
; CHECK-NEXT: ROOT [[INSTR_4:%[^ ]+]] = f32[] reduce([[INSTR_0]], [[INSTR_2]]), dimensions={0,1}, to_apply=[[INSTR_3]]
)");
}
TEST_F(TreeReductionRewriterTest, RewriteMultipleDimensionsSingleSmaller) {
const char* hlo_text = R"(
HloModule SimpleReduction
add {
acc = f32[] parameter(1)
op = f32[] parameter(0)
ROOT out = f32[] add(acc, op)
}
ENTRY main {
input = f32[1000,31] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[] reduce(input, zero), dimensions={0,1}, to_apply=add
}
)";
MatchOptimizedHlo(hlo_text,
R"(
; CHECK: [[INSTR_0:%[^ ]+]] = f32[32,1]{1,0} reduce-window([[INSTR_1:%[^ ]+]], [[INSTR_2:%[^ ]+]]), window={size=32x31 stride=32x31 pad=12_12x0_0}, to_apply=[[INSTR_3:%[^ ]+]]
; CHECK-NEXT: ROOT [[INSTR_4:%[^ ]+]] = f32[] reduce([[INSTR_0]], [[INSTR_2]]), dimensions={0,1}, to_apply=[[INSTR_3]]
)");
}
TEST_F(TreeReductionRewriterTest, NoRewriteRequired) {
const char* hlo_text = R"(
HloModule SimpleReduction
add {
acc = f32[] parameter(1)
op = f32[] parameter(0)
ROOT out = f32[] add(acc, op)
}
ENTRY main {
input = f32[31,31] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[] reduce(input, zero), dimensions={0,1}, to_apply=add
}
)";
MatchOptimizedHlo(hlo_text,
R"(
)");
}
TEST_F(TreeReductionRewriterTest, NoRewriteRequiredZeroDim) {
const char* hlo_text = R"(
HloModule SimpleReduction
add {
acc = f32[] parameter(1)
op = f32[] parameter(0)
ROOT out = f32[] add(acc, op)
}
ENTRY main {
input = f32[3000,0] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[] reduce(input, zero), dimensions={0,1}, to_apply=add
}
)";
MatchOptimizedHlo(hlo_text,
R"(
)");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/tree_reduction_rewriter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/tests/tree_reduction_rewriter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b94443b4-6fb3-4c90-8ccc-d0cf952d9da3 | cpp | tensorflow/tensorflow | hlo_alias_analysis | third_party/xla/xla/service/hlo_alias_analysis.cc | third_party/xla/xla/service/hlo_alias_analysis_test.cc | #include "xla/service/hlo_alias_analysis.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/map_util.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_value.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
using absl::StrAppend;
namespace {
using FlatValueSet = absl::flat_hash_set<const HloValue*>;
void ComputeInputOutputAliasedValues(const HloValue& value,
const HloDataflowAnalysis& dataflow,
FlatValueSet& aliased_values) {
const HloModule& module = dataflow.module();
const HloComputation& entry_computation = *module.entry_computation();
const HloInputOutputAliasConfig& io_alias_config =
module.input_output_alias_config();
for (const HloPosition& pos : value.positions()) {
if (pos.instruction == entry_computation.root_instruction()) {
std::optional<HloInputOutputAliasConfig::Alias> aliased_input =
io_alias_config.GetAliasedParameter(pos.index);
if (aliased_input) {
aliased_values.insert(
&dataflow.GetUniqueValueAt(entry_computation.parameter_instruction(
aliased_input->parameter_number),
aliased_input->parameter_index));
}
}
}
}
void ComputeWhileAliasedValues(const HloValue& value,
const HloDataflowAnalysis& dataflow,
FlatValueSet& aliased_values) {
VLOG(3) << "Compute kWhile aliases";
for (const HloUse& use : value.GetUses()) {
if (use.instruction->opcode() == HloOpcode::kWhile) {
const HloValue& while_value =
dataflow.GetUniqueValueAt(use.instruction, use.operand_index);
aliased_values.insert(&while_value);
VLOG(3) << " value is init value to a while; must share buffer with "
"while value "
<< while_value;
}
}
if (value.defining_instruction()->opcode() == HloOpcode::kParameter) {
const HloComputation* computation = value.defining_instruction()->parent();
const CallGraphNode& call_graph_node =
dataflow.call_graph().GetNode(computation);
for (const CallSite& callsite : call_graph_node.caller_callsites()) {
if (callsite.instruction()->opcode() == HloOpcode::kWhile) {
CHECK_EQ(call_graph_node.caller_callsites().size(), 1);
const HloValue& while_value = dataflow.GetUniqueValueAt(
callsite.instruction(), value.defining_index());
VLOG(3) << " value is parameter value of the body or condition of a "
"while; must share buffer with while value "
<< while_value;
aliased_values.insert(&while_value);
}
}
}
for (const HloPosition& position : value.positions()) {
if (!position.instruction->IsRoot()) continue;
const HloComputation* computation = position.instruction->parent();
const CallGraphNode& call_graph_node =
dataflow.call_graph().GetNode(computation);
for (const CallSite& callsite : call_graph_node.caller_callsites()) {
if (callsite.instruction()->opcode() == HloOpcode::kWhile &&
callsite.instruction()->while_body() == computation) {
CHECK_EQ(call_graph_node.caller_callsites().size(), 1)
<< "Call graph must have been flattened.";
const HloValue& while_value =
dataflow.GetUniqueValueAt(callsite.instruction(), position.index);
VLOG(3) << " value @ " << position << " is root of "
<< callsite.instruction()->name()
<< "; body root and while value root must share buffer "
"among them: "
<< while_value;
aliased_values.insert(&while_value);
}
}
}
}
void ComputeConditionalAliasedValues(const HloValue& value,
const HloDataflowAnalysis& dataflow,
FlatValueSet& aliased_values) {
VLOG(3) << "Compute kConditional aliases";
for (const HloPosition& position : value.positions()) {
if (!position.instruction->IsRoot()) continue;
const HloComputation* computation = position.instruction->parent();
const CallGraphNode& call_graph_node =
dataflow.call_graph().GetNode(computation);
for (const CallSite& callsite : call_graph_node.caller_callsites()) {
if (callsite.instruction()->opcode() == HloOpcode::kConditional) {
CHECK_EQ(call_graph_node.caller_callsites().size(), 1);
const HloValue& cond_value =
dataflow.GetUniqueValueAt(callsite.instruction(), position.index);
VLOG(3) << " value @ " << position << " is root of "
<< callsite.instruction()->name()
<< "; branch computation roots must share buffer among them : "
<< cond_value;
aliased_values.insert(&cond_value);
}
}
}
}
void ComputeInPlaceOperationAliasedValues(const HloValue& value,
const HloDataflowAnalysis& dataflow,
FlatValueSet& aliased_values) {
VLOG(3) << "Compute aliases for in-place operations (e.g. "
"kDynamicUpdateSlice and kScatter)";
for (const HloPosition& position : value.positions()) {
HloInstruction* instruction = position.instruction;
for (const auto& operand_and_output_index :
HloDataflowAnalysis::GetInPlaceInputOutputPairs(instruction)) {
if (position.index == operand_and_output_index.second) {
const HloOperandIndex& operand_index = operand_and_output_index.first;
const HloValue& operand_value = dataflow.GetUniqueValueAt(
instruction->operand(operand_index.operand_number),
operand_index.operand_index);
VLOG(3) << " operand value " << operand_value << " aliases.";
aliased_values.insert(&operand_value);
}
}
}
for (const HloUse& use : value.GetUses()) {
for (const auto& operand_and_output_index :
HloDataflowAnalysis::GetInPlaceInputOutputPairs(use.instruction)) {
const HloOperandIndex& operand_index = operand_and_output_index.first;
if (use.operand_number == operand_index.operand_number &&
use.operand_index == operand_index.operand_index) {
const HloValue& use_value = dataflow.GetUniqueValueAt(
use.instruction, operand_and_output_index.second);
VLOG(3) << " use value " << use_value << " aliases.";
aliased_values.insert(&use_value);
}
}
}
}
FlatValueSet ComputeAliasedValues(const HloValue& value,
const HloDataflowAnalysis& dataflow) {
if (VLOG_IS_ON(2)) {
for (const HloUse& use : value.GetUses()) {
VLOG(2) << "Use of value " << value << ": " << use;
}
}
FlatValueSet aliased_values{&value};
ComputeInputOutputAliasedValues(value, dataflow, aliased_values);
ComputeWhileAliasedValues(value, dataflow, aliased_values);
ComputeConditionalAliasedValues(value, dataflow, aliased_values);
ComputeInPlaceOperationAliasedValues(value, dataflow, aliased_values);
return aliased_values;
}
std::vector<HloBuffer> CreateBuffers(const HloDataflowAnalysis& dataflow) {
const std::vector<HloValue*>& values = dataflow.values();
size_t num_buffers = values.size();
std::vector<FlatValueSet> buffer_values(values.size());
absl::flat_hash_map<const HloValue*, FlatValueSet*> value_to_set;
value_to_set.reserve(values.size());
for (size_t i = 0; i < values.size(); ++i) {
buffer_values[i].insert(values[i]);
value_to_set[values[i]] = &buffer_values[i];
}
for (const HloValue* value : values) {
VLOG(3) << "Merging colocated values, value: " << *value;
FlatValueSet aliased_values = ComputeAliasedValues(*value, dataflow);
if (aliased_values.size() < 2) continue;
std::vector<std::pair<FlatValueSet*, HloValue::Id>> aliased_sets;
aliased_sets.reserve(aliased_values.size());
for (const HloValue* aliased : aliased_values) {
aliased_sets.push_back({value_to_set[aliased], aliased->id()});
}
auto key = [](const auto& set_and_id) {
return std::make_pair(set_and_id.first->size(), -set_and_id.second);
};
FlatValueSet* union_set =
absl::c_max_element(aliased_sets, LessThanByKey(key))->first;
for (auto& aliased_set_and_id : aliased_sets) {
FlatValueSet* aliased_set = aliased_set_and_id.first;
if ((aliased_set != union_set) && !aliased_set->empty()) {
for (const HloValue* aliased_value : *aliased_set) {
CHECK(union_set->insert(aliased_value).second);
value_to_set[aliased_value] = union_set;
}
aliased_set->clear();
--num_buffers;
}
}
}
std::vector<HloBuffer> buffers;
buffers.reserve(num_buffers);
for (const FlatValueSet& value_set : buffer_values) {
if (!value_set.empty()) {
HloBuffer::Id id = buffers.size();
buffers.push_back({id, HloValueSet(value_set).TakeValues()});
}
}
CHECK_EQ(buffers.size(), num_buffers);
return buffers;
}
}
HloAliasAnalysis::HloAliasAnalysis(const HloModule* module) : module_(module) {}
const HloBuffer& HloAliasAnalysis::GetUniqueBufferAt(
const HloInstruction* instruction, const ShapeIndex& index) const {
std::vector<const HloBuffer*> buffers = ComputeBuffersAt(instruction, index);
CHECK_EQ(buffers.size(), 1);
return *buffers[0];
}
HloBuffer& HloAliasAnalysis::GetUniqueBufferAt(
const HloInstruction* instruction, const ShapeIndex& index) {
return GetBuffer(const_cast<const HloAliasAnalysis*>(this)
->GetUniqueBufferAt(instruction, index)
.id());
}
std::vector<const HloBuffer*> HloAliasAnalysis::ComputeBuffersAt(
const HloInstruction* instruction, const ShapeIndex& index) const {
const HloValueSet& value_set =
dataflow_analysis_->GetValueSet(instruction, index);
std::vector<const HloBuffer*> buffers;
buffers.reserve(value_set.values().size());
for (const HloValue* value : value_set.values()) {
buffers.push_back(&GetBufferContainingValue(*value));
}
absl::c_sort(buffers, HloBuffer::IdLessThan);
buffers.erase(std::unique(buffers.begin(), buffers.end()), buffers.end());
return buffers;
}
absl::Status HloAliasAnalysis::Verify() const {
for (const auto& pair : value_to_buffer_) {
const HloValue* value = pair.first;
const HloBuffer& buffer = *pair.second;
TF_RET_CHECK(absl::c_linear_search(buffer.values(), value));
}
for (HloBuffer::Id id = 0; id < buffers_.size(); ++id) {
const HloBuffer& buffer = buffers_[id];
TF_RET_CHECK(buffer.id() == id);
HloValue::Id last_value_id = -1;
for (const HloValue* value : buffer.values()) {
TF_RET_CHECK(GetBufferContainingValue(*value) == buffer);
TF_RET_CHECK(value->id() > last_value_id);
last_value_id = value->id();
}
}
return absl::OkStatus();
}
std::string HloAliasAnalysis::ToString() const {
std::string out =
absl::StrCat("HloAliasAnalysis, module ", module_->name(), "\n");
StrAppend(&out, " Buffers at each position:\n");
for (const HloComputation* computation : module_->computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
StrAppend(&out, " ", instruction->name(), ":\n");
if (instruction->shape().IsTuple()) {
ShapeUtil::ForEachSubshape(
instruction->shape(),
[&out, &instruction, this](const Shape&, const ShapeIndex& index) {
StrAppend(&out, " tuple index ", index.ToString(), ":\n");
for (const HloBuffer* buffer :
ComputeBuffersAt(instruction, index)) {
StrAppend(&out, " ", buffer->ToString(), "\n");
}
});
} else {
for (const HloBuffer* buffer :
ComputeBuffersAt(instruction, {})) {
StrAppend(&out, " ", buffer->ToString(), "\n");
}
}
}
}
StrAppend(&out, " Buffers:\n");
for (const HloBuffer& buffer : buffers()) {
StrAppend(&out, " ", buffer.ToString(), "\n");
StrAppend(&out, " positions:\n");
for (const HloPosition& position : buffer.ComputePositions()) {
StrAppend(&out, " ", position.ToString(), "\n");
}
}
return out;
}
absl::StatusOr<std::unique_ptr<HloAliasAnalysis>> HloAliasAnalysis::Run(
const HloModule* module,
const HloDataflowAnalysis::CanShareBuffer& can_share_buffer) {
VLOG(2) << "HloAliasAnalysis::Run on module " << module->name();
XLA_VLOG_LINES(2, module->ToString());
auto alias_analysis = absl::WrapUnique(new HloAliasAnalysis(module));
TF_ASSIGN_OR_RETURN(alias_analysis->dataflow_analysis_,
HloDataflowAnalysis::Run(*module, true,
false,
can_share_buffer));
size_t num_values = alias_analysis->dataflow_analysis_->values().size();
alias_analysis->buffers_ = CreateBuffers(alias_analysis->dataflow_analysis());
alias_analysis->value_to_buffer_.reserve(num_values);
for (HloBuffer& buffer : alias_analysis->buffers_) {
for (const HloValue* value : buffer.values()) {
alias_analysis->value_to_buffer_[value] = &buffer;
}
}
CHECK_EQ(alias_analysis->value_to_buffer_.size(), num_values);
TF_DCHECK_OK(alias_analysis->Verify());
HloInstruction* root = module->entry_computation()->root_instruction();
ShapeUtil::ForEachSubshape(root->shape(), [&](const Shape& ,
const ShapeIndex& index) {
std::vector<const HloBuffer*> buffers =
alias_analysis->ComputeBuffersAt(root, index);
alias_analysis->live_out_buffers_.insert(buffers.begin(), buffers.end());
});
XLA_VLOG_LINES(2, alias_analysis->ToString());
return std::move(alias_analysis);
}
} | #include "xla/service/hlo_alias_analysis.h"
#include <memory>
#include <set>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/flatten_call_graph.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_ordering.h"
#include "xla/service/hlo_value.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using ::testing::UnorderedElementsAre;
class HloAliasAnalysisTest : public HloTestBase {
protected:
HloAliasAnalysisTest() : HloTestBase() {
module_ = CreateNewVerifiedModule();
}
HloAliasAnalysis& RunAnalysis() {
analysis_ = HloAliasAnalysis::Run(module_.get(),
nullptr)
.value();
return *analysis_;
}
std::vector<HloBuffer> GetBuffersAt(const HloInstruction* instruction,
const ShapeIndex& index = {}) const {
std::set<HloBuffer::Id> buffer_ids;
for (const HloValue* value : analysis_->dataflow_analysis()
.GetValueSet(instruction, index)
.values()) {
buffer_ids.insert(analysis_->GetBufferContainingValue(*value).id());
}
std::vector<HloBuffer> buffers;
buffers.reserve(buffer_ids.size());
for (HloBuffer::Id id : buffer_ids) {
buffers.push_back(analysis_->GetBuffer(id));
}
return buffers;
}
const HloValue& GetValueDefinedAt(const HloInstruction* instruction,
const ShapeIndex& index = {}) const {
return analysis_->dataflow_analysis().GetValueDefinedAt(instruction, index);
}
bool AnyValuesInSameBufferInterfere() {
DependencyHloOrdering ordering(module_.get());
for (const HloBuffer& buffer : analysis_->buffers()) {
for (const HloValue* value_a : buffer.values()) {
for (const HloValue* value_b : buffer.values()) {
if (*value_a != *value_b &&
ordering.MayInterfere(*value_a, *value_b,
analysis_->dataflow_analysis())) {
VLOG(1) << *value_a << " interferes with " << *value_b
<< " in buffer: " << buffer;
return true;
}
}
}
}
return false;
}
bool InstructionBuffersAreAmbiguous(const HloInstruction* instruction) const {
for (const auto& pair :
analysis_->dataflow_analysis().GetInstructionValueSet(instruction)) {
const HloValueSet& value_set = pair.second;
const HloBuffer* buffer = nullptr;
for (const HloValue* value : value_set.values()) {
if (buffer == nullptr) {
buffer = &analysis_->GetBufferContainingValue(*value);
} else if (buffer != &analysis_->GetBufferContainingValue(*value)) {
return true;
}
}
}
return false;
}
bool InstructionBuffersAreDistinct(const HloInstruction* instruction) const {
absl::flat_hash_set<const HloBuffer*> buffers_seen;
for (const auto& pair :
analysis_->dataflow_analysis().GetInstructionValueSet(instruction)) {
const HloValueSet& value_set = pair.second;
absl::flat_hash_set<const HloBuffer*> buffers_at_this_index;
for (const HloValue* value : value_set.values()) {
buffers_at_this_index.insert(
&analysis_->GetBufferContainingValue(*value));
}
buffers_seen.merge(buffers_at_this_index);
if (!buffers_at_this_index.empty()) return false;
}
return true;
}
std::unique_ptr<HloModule> module_;
std::unique_ptr<HloAliasAnalysis> analysis_;
const Shape scalar_shape_ = ShapeUtil::MakeShape(F32, {});
};
TEST_F(HloAliasAnalysisTest, BinaryOperation) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, constant1, constant2));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_EQ(analysis.buffers().size(), 3);
for (const HloInstruction* instruction : {constant1, constant2, add}) {
EXPECT_EQ(analysis.GetUniqueBufferAt(instruction).GetUniqueValue(),
GetValueDefinedAt(instruction));
}
EXPECT_FALSE(InstructionBuffersAreAmbiguous(add));
EXPECT_TRUE(InstructionBuffersAreDistinct(add));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, TupleAndGtes) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({param0, param1}));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, tuple, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, tuple, 1));
builder.AddInstruction(
HloInstruction::CreateBinary(scalar_shape_, HloOpcode::kAdd, gte0, gte1));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_EQ(analysis.buffers().size(), 4);
EXPECT_EQ(analysis.GetUniqueBufferAt(tuple, {}).GetUniqueValue(),
GetValueDefinedAt(tuple, {}));
EXPECT_EQ(analysis.GetUniqueBufferAt(tuple, {0}).GetUniqueValue(),
GetValueDefinedAt(param0));
EXPECT_EQ(analysis.GetUniqueBufferAt(tuple, {1}).GetUniqueValue(),
GetValueDefinedAt(param1));
EXPECT_EQ(analysis.GetUniqueBufferAt(param0),
analysis.GetUniqueBufferAt(tuple, {0}));
EXPECT_EQ(analysis.GetUniqueBufferAt(param0),
analysis.GetUniqueBufferAt(gte0));
EXPECT_THAT(
analysis.GetUniqueBufferAt(param0).ComputePositions(),
UnorderedElementsAre(HloPosition{param0, {}}, HloPosition{tuple, {0}},
HloPosition{gte0, {}}));
EXPECT_FALSE(InstructionBuffersAreAmbiguous(tuple));
EXPECT_TRUE(InstructionBuffersAreDistinct(tuple));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, NondistinctTuple) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({param0, param1, param0}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_THAT(
analysis.GetUniqueBufferAt(param0).ComputePositions(),
UnorderedElementsAre(HloPosition{param0, {}}, HloPosition{tuple, {0}},
HloPosition{tuple, {2}}));
EXPECT_FALSE(InstructionBuffersAreAmbiguous(tuple));
EXPECT_FALSE(InstructionBuffersAreDistinct(tuple));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, ParametersWithAliasing) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 1));
auto negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape_, HloOpcode::kNegate, gte0));
auto negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape_, HloOpcode::kNegate, gte1));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({negate0, negate1}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias(
{0}, 0, {0}));
TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias(
{1}, 0, {1}));
ASSERT_IS_NOT_OK(module_->input_output_alias_config().SetUpAlias(
{1}, 0, {0}));
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_EQ(analysis.GetUniqueBufferAt(gte0),
analysis.GetUniqueBufferAt(tuple, {0}));
EXPECT_EQ(analysis.GetUniqueBufferAt(gte1),
analysis.GetUniqueBufferAt(tuple, {1}));
}
TEST_F(HloAliasAnalysisTest, ParametersWithCrossAliasing) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 1));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({gte0, gte1}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias(
{0}, 0, {1}));
TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias(
{1}, 0, {0}));
ASSERT_IS_NOT_OK(module_->input_output_alias_config().SetUpAlias(
{1}, 0, {1}));
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_EQ(analysis.GetUniqueBufferAt(gte0),
analysis.GetUniqueBufferAt(tuple, {0}));
EXPECT_EQ(analysis.GetUniqueBufferAt(gte0),
analysis.GetUniqueBufferAt(tuple, {1}));
EXPECT_EQ(analysis.GetUniqueBufferAt(gte1),
analysis.GetUniqueBufferAt(tuple, {0}));
EXPECT_EQ(analysis.GetUniqueBufferAt(gte1),
analysis.GetUniqueBufferAt(tuple, {1}));
}
TEST_F(HloAliasAnalysisTest, InputOutputAliasingWithWhile) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
auto add = body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, body_element_0, body_element_1));
auto body_tuple = body_builder.AddInstruction(
HloInstruction::CreateTuple({body_element_0, add}));
HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
auto cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, param));
auto while_element_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, xla_while, 0));
auto while_element_2 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, xla_while, 1));
auto negate_1 = builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape_, HloOpcode::kNegate, while_element_1));
auto negate_2 = builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape_, HloOpcode::kNegate, while_element_2));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({negate_1, negate_2}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias(
{0}, 0, {0}));
TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias(
{1}, 0, {1}));
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_THAT(analysis.GetUniqueBufferAt(xla_while, {1}).values(),
UnorderedElementsAre(&GetValueDefinedAt(param, {1}),
&GetValueDefinedAt(xla_while, {1}),
&GetValueDefinedAt(body_param, {1}),
&GetValueDefinedAt(cond_param, {1}),
&GetValueDefinedAt(add),
&GetValueDefinedAt(negate_2)));
EXPECT_THAT(
analysis.GetUniqueBufferAt(xla_while, {1}).ComputePositions(),
UnorderedElementsAre(
HloPosition{param, {1}}, HloPosition{xla_while, {1}},
HloPosition{while_element_2, {}}, HloPosition{body_param, {1}},
HloPosition{body_element_1, {}}, HloPosition{add, {}},
HloPosition{body_tuple, {1}}, HloPosition{tuple, {1}},
HloPosition{cond_param, {1}}, HloPosition{negate_2, {}}));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, SingleCall) {
auto subbuilder = HloComputation::Builder("Subcomputation");
auto subparam0 = subbuilder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto subparam1 = subbuilder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
auto add = subbuilder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, subparam0, subparam1));
HloComputation* called_computation =
module_->AddEmbeddedComputation(subbuilder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto call = builder.AddInstruction(HloInstruction::CreateCall(
scalar_shape_, {constant1, constant2}, called_computation));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_THAT(analysis.GetUniqueBufferAt(constant1).ComputePositions(),
UnorderedElementsAre(HloPosition{constant1, {}},
HloPosition{subparam0, {}}));
EXPECT_THAT(analysis.GetUniqueBufferAt(constant2).ComputePositions(),
UnorderedElementsAre(HloPosition{constant2, {}},
HloPosition{subparam1, {}}));
EXPECT_THAT(
analysis.GetUniqueBufferAt(add).ComputePositions(),
UnorderedElementsAre(HloPosition{add, {}}, HloPosition{call, {}}));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, ComputationCalledTwice) {
auto subbuilder = HloComputation::Builder("Subcomputation");
auto subparam0 = subbuilder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto subparam1 = subbuilder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
auto add = subbuilder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, subparam0, subparam1));
HloComputation* called_computation =
module_->AddEmbeddedComputation(subbuilder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto call1 = builder.AddInstruction(HloInstruction::CreateCall(
scalar_shape_, {constant1, constant2}, called_computation));
auto call2 = builder.AddInstruction(HloInstruction::CreateCall(
scalar_shape_, {call1, constant2}, called_computation));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_THAT(analysis.GetUniqueBufferAt(constant1).ComputePositions(),
UnorderedElementsAre(HloPosition{constant1, {}},
HloPosition{subparam0, {}}));
EXPECT_THAT(analysis.GetUniqueBufferAt(constant2).ComputePositions(),
UnorderedElementsAre(HloPosition{constant2, {}},
HloPosition{subparam1, {}}));
EXPECT_THAT(
analysis.GetUniqueBufferAt(add).ComputePositions(),
UnorderedElementsAre(HloPosition{add, {}}, HloPosition{call1, {}},
HloPosition{subparam0, {}}, HloPosition{call2, {}}));
EXPECT_THAT(GetBuffersAt(subparam0),
UnorderedElementsAre(analysis.GetUniqueBufferAt(constant1),
analysis.GetUniqueBufferAt(add)));
EXPECT_THAT(GetBuffersAt(subparam1),
UnorderedElementsAre(analysis.GetUniqueBufferAt(constant2)));
EXPECT_TRUE(InstructionBuffersAreAmbiguous(subparam0));
EXPECT_FALSE(InstructionBuffersAreAmbiguous(subparam1));
EXPECT_TRUE(InstructionBuffersAreDistinct(subparam0));
EXPECT_TRUE(InstructionBuffersAreDistinct(subparam1));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, SingleWhile) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
auto add = body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, body_element_0, body_element_1));
auto body_tuple = body_builder.AddInstruction(
HloInstruction::CreateTuple({body_element_0, add}));
HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
auto cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, tuple));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_THAT(
analysis.GetUniqueBufferAt(xla_while, {}).ComputePositions(),
UnorderedElementsAre(HloPosition{tuple, {}}, HloPosition{xla_while, {}},
HloPosition{body_param, {}},
HloPosition{body_tuple, {}},
HloPosition{cond_param, {}}));
EXPECT_THAT(
analysis.GetUniqueBufferAt(xla_while, {0}).ComputePositions(),
UnorderedElementsAre(
HloPosition{constant1, {}}, HloPosition{tuple, {0}},
HloPosition{xla_while, {0}}, HloPosition{body_param, {0}},
HloPosition{body_element_0, {}}, HloPosition{body_tuple, {0}},
HloPosition{cond_param, {0}}));
EXPECT_THAT(
analysis.GetUniqueBufferAt(xla_while, {1}).ComputePositions(),
UnorderedElementsAre(
HloPosition{constant2, {}}, HloPosition{tuple, {1}},
HloPosition{xla_while, {1}}, HloPosition{body_param, {1}},
HloPosition{body_element_1, {}}, HloPosition{add, {}},
HloPosition{body_tuple, {1}}, HloPosition{cond_param, {1}}));
EXPECT_THAT(analysis.GetUniqueBufferAt(xla_while, {0}).values(),
UnorderedElementsAre(&GetValueDefinedAt(constant1)));
EXPECT_THAT(analysis.GetUniqueBufferAt(xla_while, {1}).values(),
UnorderedElementsAre(&GetValueDefinedAt(constant2),
&GetValueDefinedAt(xla_while, {1}),
&GetValueDefinedAt(body_param, {1}),
&GetValueDefinedAt(cond_param, {1}),
&GetValueDefinedAt(add)));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, SequentialWhiles) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
auto add = body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, body_element_0, body_element_1));
body_builder.AddInstruction(
HloInstruction::CreateTuple({body_element_0, add}));
HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto xla_while0 = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, tuple));
auto xla_while1 = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, xla_while0));
auto xla_while2 = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, xla_while1));
module_->AddEntryComputation(builder.Build());
FlattenCallGraph flattener;
TF_ASSERT_OK(flattener.Run(module_.get()).status());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_EQ(analysis.GetUniqueBufferAt(tuple, {}),
analysis.GetUniqueBufferAt(xla_while2, {}));
EXPECT_EQ(analysis.GetUniqueBufferAt(constant1),
analysis.GetUniqueBufferAt(xla_while2, {0}));
EXPECT_EQ(analysis.GetUniqueBufferAt(constant2),
analysis.GetUniqueBufferAt(xla_while2, {1}));
}
TEST_F(HloAliasAnalysisTest, NestedWhiles) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto build_cond_computation = [&tuple_shape]() {
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
return cond_builder.Build();
};
HloComputation* condition1 =
module_->AddEmbeddedComputation(build_cond_computation());
HloComputation* condition2 =
module_->AddEmbeddedComputation(build_cond_computation());
auto inner_builder = HloComputation::Builder("inner_body");
auto inner_param = inner_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto inner_element_0 = inner_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, inner_param, 0));
auto inner_element_1 = inner_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, inner_param, 1));
auto add = inner_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, inner_element_0, inner_element_1));
inner_builder.AddInstruction(
HloInstruction::CreateTuple({inner_element_0, add}));
HloComputation* inner_body =
module_->AddEmbeddedComputation(inner_builder.Build());
auto outer_builder = HloComputation::Builder("outer_body");
auto outer_param = outer_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto outer_element_0 = outer_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, outer_param, 0));
auto negate = outer_builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape_, HloOpcode::kNegate, outer_element_0));
auto outer_element_1 = outer_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, outer_param, 1));
auto outer_tuple = outer_builder.AddInstruction(
HloInstruction::CreateTuple({negate, outer_element_1}));
auto nested_while = outer_builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, condition1, inner_body, outer_tuple));
HloComputation* outer_body =
module_->AddEmbeddedComputation(outer_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto entry_while = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition2, outer_body, tuple));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_EQ(analysis.GetUniqueBufferAt(constant1),
analysis.GetUniqueBufferAt(entry_while, {0}));
EXPECT_EQ(analysis.GetUniqueBufferAt(constant1),
analysis.GetUniqueBufferAt(nested_while, {0}));
EXPECT_EQ(analysis.GetUniqueBufferAt(constant1),
analysis.GetUniqueBufferAt(inner_element_0));
EXPECT_EQ(analysis.GetUniqueBufferAt(constant2),
analysis.GetUniqueBufferAt(entry_while, {1}));
EXPECT_EQ(analysis.GetUniqueBufferAt(constant2),
analysis.GetUniqueBufferAt(nested_while, {1}));
EXPECT_EQ(analysis.GetUniqueBufferAt(constant2),
analysis.GetUniqueBufferAt(inner_element_1));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, SwizzlingWhile) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
auto body_element_2 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 2));
body_builder.AddInstruction(HloInstruction::CreateTuple(
{body_element_1, body_element_2, body_element_0}));
HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto cond_constant = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto constant3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2, constant3}));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, tuple));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_THAT(
analysis.buffers(),
UnorderedElementsAre(analysis.GetUniqueBufferAt(constant1),
analysis.GetUniqueBufferAt(tuple, {}),
analysis.GetUniqueBufferAt(cond_constant)));
EXPECT_EQ(analysis.GetUniqueBufferAt(xla_while, {0}),
analysis.GetUniqueBufferAt(xla_while, {1}));
EXPECT_EQ(analysis.GetUniqueBufferAt(xla_while, {0}),
analysis.GetUniqueBufferAt(xla_while, {2}));
EXPECT_EQ(analysis.GetUniqueBufferAt(xla_while, {0}),
analysis.GetUniqueBufferAt(constant1));
EXPECT_EQ(analysis.GetUniqueBufferAt(constant1),
analysis.GetUniqueBufferAt(constant2));
EXPECT_EQ(analysis.GetUniqueBufferAt(constant1),
analysis.GetUniqueBufferAt(constant3));
EXPECT_TRUE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, Bitcast) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto bitcast = builder.AddInstruction(
HloInstruction::CreateBitcast(scalar_shape_, constant));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_EQ(analysis.buffers().size(), 1);
EXPECT_EQ(analysis.GetUniqueBufferAt(constant),
analysis.GetUniqueBufferAt(bitcast));
}
TEST_F(HloAliasAnalysisTest, DynamicUpdateSlice) {
Shape shape = ShapeUtil::MakeShape(F32, {8});
Shape update_shape = ShapeUtil::MakeShape(F32, {4});
Shape index_shape = ShapeUtil::MakeShape(S32, {});
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, update_shape, "param1"));
auto param2 = builder.AddInstruction(
HloInstruction::CreateParameter(2, index_shape, "param2"));
auto copy0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kCopy, param0));
auto dynamic_update_slice = builder.AddInstruction(
HloInstruction::CreateDynamicUpdateSlice(shape, copy0, param1, {param2}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_EQ(analysis.GetUniqueBufferAt(copy0),
analysis.GetUniqueBufferAt(dynamic_update_slice));
}
TEST_F(HloAliasAnalysisTest, DynamicUpdateSliceMultiOutputFusion) {
absl::string_view hlo_string = R"(
HloModule Module
fused_computation {
param0 = f32[1280,1,128] parameter(0)
param1 = f32[1280,1,128] parameter(1)
param2 = f32[1280,1,128] parameter(2)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
add.1 = f32[1280,1,128] add(param0, param0)
dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(param1, broadcast.6, constant.3, constant.3, constant.3)
dynamic-update-slice.6 = f32[1280,1,128] dynamic-update-slice(param2, broadcast.6, constant.3, constant.3, constant.3)
ROOT tuple.1 = (f32[1280,1,128], f32[1280,1,128], f32[1280,1,128]) tuple(add.1, dynamic-update-slice.5, dynamic-update-slice.6)
}
ENTRY main {
param = f32[1280,1,128] parameter(0)
negate0 = f32[1280,1,128] negate(param)
negate1 = f32[1280,1,128] negate(param)
negate2 = f32[1280,1,128] negate(param)
ROOT fusion = (f32[1280,1,128], f32[1280,1,128], f32[1280,1,128]) fusion(negate0, negate1, negate2), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_string));
SCOPED_TRACE(module_->ToString());
HloAliasAnalysis& analysis = RunAnalysis();
LOG(INFO) << analysis.ToString();
const HloInstruction* fusion =
module_->entry_computation()->GetInstructionWithName("fusion");
const HloInstruction* negate0 =
module_->entry_computation()->GetInstructionWithName("negate0");
const HloInstruction* negate1 =
module_->entry_computation()->GetInstructionWithName("negate1");
const HloInstruction* negate2 =
module_->entry_computation()->GetInstructionWithName("negate2");
EXPECT_EQ(analysis.GetUniqueBufferAt(negate1),
analysis.GetUniqueBufferAt(fusion, {1}));
EXPECT_EQ(analysis.GetUniqueBufferAt(negate2),
analysis.GetUniqueBufferAt(fusion, {2}));
EXPECT_NE(analysis.GetUniqueBufferAt(negate0),
analysis.GetUniqueBufferAt(fusion, {0}));
}
TEST_F(HloAliasAnalysisTest, ChainedDynamicUpdateSliceFusion) {
absl::string_view hlo_string = R"(
HloModule Module
fused_computation {
param0 = f32[1280,1,128] parameter(0)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(param0, broadcast.6, constant.3, constant.3, constant.3)
ROOT dynamic-update-slice.6 = f32[1280,1,128] dynamic-update-slice(dynamic-update-slice.5, broadcast.6, constant.3, constant.3, constant.3)
}
ENTRY main {
param = f32[1280,1,128] parameter(0)
negate0 = f32[1280,1,128] negate(param)
ROOT fusion = f32[1280,1,128] fusion(negate0), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_string));
SCOPED_TRACE(module_->ToString());
HloAliasAnalysis& analysis = RunAnalysis();
LOG(INFO) << analysis.ToString();
const HloInstruction* fusion =
module_->entry_computation()->GetInstructionWithName("fusion");
const HloInstruction* negate0 =
module_->entry_computation()->GetInstructionWithName("negate0");
EXPECT_NE(analysis.GetUniqueBufferAt(negate0),
analysis.GetUniqueBufferAt(fusion));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_alias_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_alias_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e59be81d-78e8-4178-b423-a8826f3c6fff | cpp | tensorflow/tensorflow | scatter_simplifier | third_party/xla/xla/service/scatter_simplifier.cc | third_party/xla/xla/service/scatter_simplifier_test.cc | #include "xla/service/scatter_simplifier.h"
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/permutation_util.h"
#include "xla/service/call_inliner.h"
#include "xla/service/gather_scatter_utils.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::StatusOr<HloInstruction*> FlattenAndTransposeUpdates(
HloInstruction* updates, absl::Span<const int64_t> update_window_dims,
absl::Span<const int64_t> inserted_window_dims,
int64_t scatter_indices_size) {
int64_t updates_rank = updates->shape().rank();
std::vector<int64_t> permutation;
const int64_t num_scatter_dims = updates_rank - update_window_dims.size();
permutation.reserve(updates_rank);
for (int i = 0; i < updates_rank; ++i) {
if (!absl::c_linear_search(update_window_dims, i)) {
permutation.push_back(i);
}
}
absl::c_copy(update_window_dims, std::back_inserter(permutation));
TF_ASSIGN_OR_RETURN(updates, MaybeTranspose(updates, permutation));
if (num_scatter_dims > 1) {
TF_ASSIGN_OR_RETURN(updates, CollapseFirstNDims(updates, num_scatter_dims));
} else if (num_scatter_dims == 0) {
TF_ASSIGN_OR_RETURN(updates, InsertDegenerateDims(updates, {0}));
}
if (!inserted_window_dims.empty()) {
std::vector<int64_t> new_dims;
new_dims.reserve(inserted_window_dims.size());
for (int64_t i : inserted_window_dims) {
new_dims.push_back(i + 1);
}
TF_ASSIGN_OR_RETURN(updates, InsertDegenerateDims(updates, new_dims));
}
return updates;
}
std::vector<int64_t> MakeUpdatePermutation(
const std::vector<int64_t>& operand_permutation) {
std::vector<int64_t> update_permutation;
update_permutation.reserve(operand_permutation.size() + 1);
update_permutation.push_back(0);
for (auto& dim : operand_permutation) {
update_permutation.push_back(dim + 1);
}
return update_permutation;
}
absl::StatusOr<std::vector<HloInstruction*>> TransformScatterUpdates(
HloScatterInstruction* scatter,
const std::vector<int64_t>& update_permutation,
int64_t scatter_indices_size) {
std::vector<HloInstruction*> scatter_updates;
const auto& attrs = scatter->scatter_dimension_numbers();
scatter_updates.reserve(scatter->scatter_updates().size());
for (auto* update : scatter->scatter_updates()) {
TF_ASSIGN_OR_RETURN(
scatter_updates.emplace_back(),
FlattenAndTransposeUpdates(update, attrs.update_window_dims(),
attrs.inserted_window_dims(),
scatter_indices_size));
}
return MaybeTranspose(scatter_updates, update_permutation);
}
ScatterDimensionNumbers MakeScatterDimensionNumbers(
int64_t operand_rank, int64_t scatter_indices_vector_size) {
ScatterDimensionNumbers dim_numbers;
dim_numbers.mutable_update_window_dims()->Reserve(
static_cast<int>(operand_rank));
for (int i = 0; i < operand_rank; ++i) {
dim_numbers.add_update_window_dims(1 + i);
}
dim_numbers.mutable_scatter_dims_to_operand_dims()->Reserve(
static_cast<int>(scatter_indices_vector_size));
for (int i = 0; i < scatter_indices_vector_size; ++i) {
dim_numbers.add_scatter_dims_to_operand_dims(i);
}
dim_numbers.set_index_vector_dim(1);
return dim_numbers;
}
}
absl::StatusOr<HloInstruction*> ScatterSimplifier::ExpandInstruction(
HloInstruction* inst) {
auto* scatter = Cast<HloScatterInstruction>(inst);
if (scatter->called_computations().size() != 1) {
return InvalidArgumentStrCat(
"Expected scatter->called_computations() to have exactly one element, "
"got ",
scatter->called_computations().size());
}
HloComputation* called_computation = scatter->called_computations().front();
const auto& attrs = scatter->scatter_dimension_numbers();
const int operand_rank =
attrs.update_window_dims().size() + attrs.inserted_window_dims().size();
if (operand_rank == 0) {
absl::InlinedVector<HloInstruction*, 2> scatter_operands_and_updates;
scatter_operands_and_updates.reserve(2 * scatter->operand_count());
absl::c_copy(scatter->scatter_operands(),
std::back_inserter(scatter_operands_and_updates));
absl::c_copy(scatter->scatter_updates(),
std::back_inserter(scatter_operands_and_updates));
auto* call_op = scatter->AddInstruction(HloInstruction::CreateCall(
scatter->shape(), scatter_operands_and_updates, called_computation));
TF_RETURN_IF_ERROR(scatter->ReplaceAllUsesWith(call_op));
TF_ASSIGN_OR_RETURN(auto map, CallInliner::Inline(call_op));
return map[call_op];
}
auto [operand_permutation, operand_permutation_inverse] =
MakeOperandStartIndexPermutations(attrs.scatter_dims_to_operand_dims(),
operand_rank);
auto update_permutation = MakeUpdatePermutation(operand_permutation);
TF_ASSIGN_OR_RETURN(auto* scatter_indices,
TransformStartIndices(scatter->scatter_indices(),
attrs.index_vector_dim()));
TF_ASSIGN_OR_RETURN(
auto scatter_updates,
TransformScatterUpdates(scatter, update_permutation,
scatter_indices->shape().dimensions(0)));
TF_ASSIGN_OR_RETURN(
auto scatter_operands,
MaybeTranspose(scatter->scatter_operands(), operand_permutation));
auto dim_numbers = MakeScatterDimensionNumbers(
operand_rank, attrs.scatter_dims_to_operand_dims().size());
Shape output_shape;
if (scatter_operands.size() == 1) {
output_shape = scatter_operands.front()->shape();
} else {
std::vector<Shape> shapes;
shapes.reserve(scatter_operands.size());
for (auto* operand : scatter_operands) {
shapes.push_back(operand->shape());
}
output_shape = ShapeUtil::MakeTupleShape(shapes);
}
auto* result = scatter->AddInstruction(HloInstruction::CreateScatter(
output_shape, scatter_operands, scatter_indices, scatter_updates,
called_computation, dim_numbers,
scatter->indices_are_sorted(), scatter->unique_indices()));
if (IsIdentityPermutation(operand_permutation)) {
return result;
}
if (scatter->scatter_operands().size() == 1) {
return MaybeTranspose(result, operand_permutation_inverse);
}
std::vector<HloInstruction*> result_items;
result_items.reserve(scatter->scatter_operands().size());
for (int i = 0; i < scatter->scatter_operands().size(); ++i) {
TF_ASSIGN_OR_RETURN(result_items.emplace_back(),
MakeGetTupleElementHlo(result, i));
TF_ASSIGN_OR_RETURN(
result_items.back(),
MaybeTranspose(result_items.back(), operand_permutation_inverse));
}
return MaybeMakeTuple(result_items);
}
bool ScatterSimplifier::IsSimplifiedScatter(
const HloScatterInstruction* scatter) {
const auto& dims = scatter->scatter_dimension_numbers();
auto operand_rank = scatter->scatter_operands().front()->shape().rank();
if (operand_rank == 0) return false;
bool nonstandard_index_vector_dim =
dims.index_vector_dim() != scatter->scatter_indices()->shape().rank() - 1;
int64_t num_scatter_dims =
scatter->scatter_updates().front()->shape().rank() -
dims.update_window_dims().size();
bool scatter_indices_reordered =
!IsIdentityPermutation(dims.scatter_dims_to_operand_dims());
bool scatter_dim_not_first =
absl::c_linear_search(dims.update_window_dims(), 0);
bool update_window_dims_sorted = absl::c_is_sorted(dims.update_window_dims());
return !(nonstandard_index_vector_dim || num_scatter_dims > 1 ||
scatter_indices_reordered || scatter_dim_not_first ||
!update_window_dims_sorted || !dims.inserted_window_dims().empty());
}
bool ScatterSimplifier::InstructionMatchesPattern(HloInstruction* inst) {
auto* scatter = DynCast<HloScatterInstruction>(inst);
return scatter && !IsSimplifiedScatter(scatter);
}
} | #include "xla/service/scatter_simplifier.h"
#include <optional>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/pass/hlo_pass_fix.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class ScatterSimplifierTest : public HloTestBase {};
TEST_F(ScatterSimplifierTest, InsertsIndexVectorAndWindowDims) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
p2 = f32[] parameter(2)
p3 = f32[] parameter(3)
ROOT tuple = tuple(p2, p3)
}
ENTRY kernel_entry {
operand0 = f32[3,3] parameter(0)
operand1 = f32[3,3] parameter(1)
indices = s32[2] parameter(2)
update0 = f32[2,3] parameter(3)
update1 = f32[2,3] parameter(4)
ROOT scatter = (f32[3,3], f32[3,3]) scatter(operand0, operand1, indices,
update0, update1),
to_apply=scatter_computation,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"(
CHECK: %[[SCATTER_DIMS_WITH_VECTOR:.*]] = s32[2,1]{1,0} reshape(%indices)
CHECK: %[[RESHAPED_UPDATES0:.*]] = f32[2,1,3]{2,1,0} reshape(%update0)
CHECK: %[[RESHAPED_UPDATES1:.*]] = f32[2,1,3]{2,1,0} reshape(%update1)
CHECK: ROOT %scatter = (f32[3,3]{1,0}, f32[3,3]{1,0}) scatter(
CHECK-SAME: %operand0, %operand1, %[[SCATTER_DIMS_WITH_VECTOR]],
CHECK-SAME: %[[RESHAPED_UPDATES0]], %[[RESHAPED_UPDATES1]]),
CHECK-SAME: update_window_dims={1,2},
CHECK-SAME: inserted_window_dims={},
CHECK-SAME: scatter_dims_to_operand_dims={0},
CHECK-SAME: index_vector_dim=1,
CHECK-SAME: to_apply=%scatter_computation
)");
}
TEST_F(ScatterSimplifierTest, CollapsesScatterDims) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
%p0 = f32[] parameter(0)
ROOT result = f32[] parameter(1)
}
ENTRY kernel_entry {
operand = f32[3,3] parameter(0)
indices = s32[2,1,2] parameter(1)
update = f32[2,1,1,3] parameter(2)
ROOT scatter = f32[3,3] scatter(operand, indices, update),
to_apply=scatter_computation,
update_window_dims={2, 3},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=2
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"(
CHECK: %[[RESHAPED_INDICES:.*]] = s32[2,2]{1,0} reshape(%indices)
CHECK: %[[RESHAPED_UPDATES:.*]] = f32[2,1,3]{2,1,0} reshape(%update)
CHECK: scatter(
CHECK-SAME: %[[RESHAPED_INDICES]]
CHECK-SAME: %[[RESHAPED_UPDATES]]
)");
}
TEST_F(ScatterSimplifierTest, NoOpForSimpleScatter) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
%p0 = f32[] parameter(0)
ROOT result = f32[] parameter(1)
}
ENTRY kernel_entry {
operand = f32[3,3] parameter(0)
indices = s32[2,2] parameter(1)
update = f32[2,1,3] parameter(2)
ROOT scatter = f32[3,3] scatter(operand, indices, update),
to_apply=scatter_computation,
update_window_dims={1,2},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), std::nullopt);
}
TEST_F(ScatterSimplifierTest, MovesIndexVectorDim) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
%p0 = f32[] parameter(0)
ROOT result = f32[] parameter(1)
}
ENTRY kernel_entry {
operand = f32[3,3] parameter(0)
indices = s32[2,1] parameter(1)
update = f32[1,3,3] parameter(2)
ROOT scatter = f32[3,3] scatter(operand, indices, update),
to_apply=scatter_computation,
update_window_dims={1, 2},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"(
CHECK: %[[TRANSPOSED_INDICES:.*]] = s32[1,2]{1,0}
CHECK-SAME: transpose(%indices), dimensions={1,0}
CHECK: scatter(%operand, %[[TRANSPOSED_INDICES]], %update),
CHECK-SAME: index_vector_dim=1
)");
}
TEST_F(ScatterSimplifierTest, TransformsUpdatesAndOperandUsingScatterDims) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
%p0 = f32[] parameter(0)
ROOT result = f32[] parameter(1)
}
ENTRY kernel_entry {
operand = f32[3,4,5] parameter(0)
indices = s32[2,2] parameter(1)
update = f32[2,1,1,3] parameter(2)
ROOT scatter = f32[3,4,5] scatter(operand, indices, update),
to_apply=scatter_computation,
update_window_dims={1, 2, 3},
inserted_window_dims={},
scatter_dims_to_operand_dims={2,0},
index_vector_dim=1
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"(
CHECK: %[[T_OPERAND:.*]] = f32[5,3,4]{2,1,0} transpose(%operand),
CHECK-SAME: dimensions={2,0,1}
CHECK: %[[T_UPDATES:.*]] = f32[2,3,1,1]{3,2,1,0} transpose(%update),
CHECK-SAME: dimensions={0,3,1,2}
CHECK: %[[SCATTER:.*]] = {{.*}} scatter(
CHECK-SAME: %[[T_OPERAND]], %indices, %[[T_UPDATES]])
CHECK-SAME: scatter_dims_to_operand_dims={0,1},
CHECK: ROOT %{{.*}} = f32[3,4,5]
CHECK-SAME: transpose(%[[SCATTER]]), dimensions={1,2,0}
)");
}
TEST_F(ScatterSimplifierTest, MakesScatterDimensionsLeadingInUpdates) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
%p0 = f32[] parameter(0)
ROOT result = f32[] parameter(1)
}
ENTRY kernel_entry {
operand = f32[3] parameter(0)
indices = s32[1,1] parameter(1)
update = f32[2,1] parameter(2)
ROOT scatter = f32[3] scatter(operand, indices, update),
to_apply=scatter_computation,
update_window_dims={0},
inserted_window_dims={},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"(
CHECK: %[[TRANSPOSED_UPDATES:.*]] = f32[1,2]{1,0}
CHECK-SAME: transpose(%update), dimensions={1,0}
CHECK: scatter(
CHECK-SAME: %[[TRANSPOSED_UPDATES]]
CHECK-SAME: update_window_dims={1},
)");
}
TEST_F(ScatterSimplifierTest, ZeroDimScatterIndices) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
%p0 = f32[] parameter(0)
ROOT result = f32[] parameter(1)
}
ENTRY kernel_entry {
operand = f32[4,4] parameter(0)
indices = s32[2] parameter(1)
update = f32[3,3] parameter(2)
ROOT scatter = f32[4,4]{1,0} scatter(operand, indices, update),
update_window_dims={0,1},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
to_apply=scatter_computation
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"(
CHECK: scatter(
)");
}
TEST_F(ScatterSimplifierTest,
IsSimplifiedScatterReturnsFalseForUnsortedWindowDims) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
%p0 = f32[] parameter(0)
ROOT result = f32[] parameter(1)
}
ENTRY kernel_entry {
operand = f32[3,2] parameter(0)
indices = s32[1,1] parameter(1)
update = f32[1,2,2] parameter(2)
ROOT scatter = f32[3,2] scatter(operand, indices, update),
to_apply=scatter_computation,
update_window_dims={2,1},
inserted_window_dims={},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
})";
auto module = ParseAndReturnUnverifiedModule(kModuleStr).value();
auto scatter = module->entry_computation()->root_instruction();
EXPECT_FALSE(ScatterSimplifier::IsSimplifiedScatter(
Cast<HloScatterInstruction>(scatter)));
}
TEST_F(ScatterSimplifierTest, ScatterIntoScalar) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY kernel_entry {
operand = s32[] parameter(0)
indices = s32[0]{0} parameter(1)
updates = s32[] parameter(2)
ROOT scatter = s32[] scatter(operand, indices, updates),
update_window_dims={},
inserted_window_dims={},
scatter_dims_to_operand_dims={},
index_vector_dim=0,
to_apply=scatter_computation
}
)";
auto module = ParseAndReturnUnverifiedModule(kModuleStr).value();
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"(
CHECK: ENTRY
CHECK: %[[OPERAND:.*]] = s32[] parameter(0)
CHECK: %[[UPDATES:.*]] = s32[] parameter(2)
CHECK: ROOT %{{.*}} = s32[] add(%[[OPERAND]], %[[UPDATES]])
)");
}
TEST_F(ScatterSimplifierTest, VariadicScatterIntoScalar) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
p0 = f32[] parameter(0)
p1 = bf16[] parameter(1)
p2 = f32[] parameter(2)
p3 = bf16[] parameter(3)
ROOT tuple = tuple(p2, p3)
}
ENTRY kernel_entry {
operand0 = f32[] parameter(0)
operand1 = bf16[] parameter(1)
indices = s32[0]{0} parameter(2)
updates0 = f32[] parameter(3)
updates1 = bf16[] parameter(4)
ROOT scatter = (f32[], bf16[]) scatter(operand0, operand1, indices, updates0, updates1),
update_window_dims={},
inserted_window_dims={},
scatter_dims_to_operand_dims={},
index_vector_dim=0,
to_apply=scatter_computation
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"(
CHECK: ENTRY
CHECK: %[[UPDATES0:.*]] = f32[] parameter(3)
CHECK: %[[UPDATES1:.*]] = bf16[] parameter(4)
CHECK: ROOT %{{.*}} = (f32[], bf16[]) tuple(%[[UPDATES0]], %[[UPDATES1]])
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/scatter_simplifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/scatter_simplifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
09730dec-80af-493b-9079-4dd8922a3bdf | cpp | tensorflow/tensorflow | slice_sinker | third_party/xla/xla/service/slice_sinker.cc | third_party/xla/xla/service/slice_sinker_test.cc | #include "xla/service/slice_sinker.h"
#include <algorithm>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
#include "xla/shape_util.h"
namespace xla {
namespace {
bool SameSliceConfiguration(const HloInstruction* slice_1,
const HloInstruction* slice_2) {
CHECK_EQ(slice_1->opcode(), HloOpcode::kSlice);
CHECK_EQ(slice_2->opcode(), HloOpcode::kSlice);
CHECK(slice_1->operand(0)->shape().dimensions() ==
slice_2->operand(0)->shape().dimensions());
return slice_1->slice_starts() == slice_2->slice_starts() &&
slice_1->slice_limits() == slice_2->slice_limits() &&
slice_1->slice_strides() == slice_2->slice_strides();
}
bool IsElementwiseOperationOnSimilarSlices(const HloInstruction* inst) {
CHECK(inst->IsElementwise());
if (absl::c_any_of(inst->operands(), [](const HloInstruction* operand) {
return operand->opcode() != HloOpcode::kSlice;
})) {
return false;
}
const HloInstruction* slice0 = inst->operand(0);
return absl::c_all_of(absl::MakeSpan(inst->operands()).subspan(1),
[slice0](const HloInstruction* slice) {
return ShapeUtil::CompatibleIgnoringElementType(
slice0->operand(0)->shape(),
slice->operand(0)->shape()) &&
SameSliceConfiguration(slice0, slice);
});
}
bool IsSimilarOperationOnSlices(const HloInstruction* operation_on_slices,
const HloInstruction* candidate) {
if (candidate->user_count() == 0) {
return false;
}
if (!candidate->SameOp(*operation_on_slices) ||
operation_on_slices->shape().element_type() !=
candidate->shape().element_type()) {
return false;
}
const HloInstruction* operand_slice0 = candidate->operand(0);
for (int64_t i = 0; i < candidate->operand_count(); ++i) {
const HloInstruction* operand_slice = candidate->operand(i);
if (operand_slice->opcode() != HloOpcode::kSlice ||
operand_slice->operand(0) !=
operation_on_slices->operand(i)->operand(0) ||
!SameSliceConfiguration(operand_slice0, operand_slice)) {
return false;
}
}
return true;
}
bool ShouldTransform(const std::vector<HloInstruction*>& operations_on_slices) {
int64_t sum = 0;
for (HloInstruction* user : operations_on_slices) {
sum += ShapeUtil::ElementsIn(user->shape());
}
return sum >= xla::ShapeUtil::ElementsIn(
operations_on_slices[0]->operand(0)->operand(0)->shape());
}
std::optional<std::vector<HloInstruction*>> FindElementwiseOperationGroup(
const HloInstruction* operation_on_slices) {
std::vector<HloInstruction*> operations;
const HloInstruction* slice_source0 =
operation_on_slices->operand(0)->operand(0);
for (const HloInstruction* operand_slice0 : slice_source0->users()) {
if (operand_slice0->opcode() != HloOpcode::kSlice) {
continue;
}
for (HloInstruction* user : operand_slice0->users()) {
if (IsSimilarOperationOnSlices(operation_on_slices, user)) {
operations.push_back(user);
}
}
}
return ShouldTransform(operations) ? std::make_optional(operations)
: std::nullopt;
}
absl::Status SinkSlices(
const std::vector<HloInstruction*>& slice_sources,
const std::vector<HloInstruction*>& operation_on_slices) {
const Shape shape = slice_sources[0]->shape();
PrimitiveType element_type = operation_on_slices[0]->shape().element_type();
Shape new_shape = ShapeUtil::ChangeElementType(shape, element_type);
HloComputation* computation = operation_on_slices[0]->parent();
auto operation_on_slice_sources = computation->AddInstruction(
operation_on_slices[0]->CloneWithNewOperands(new_shape, slice_sources));
VLOG(10) << "Adding operation_on_slice_sources: "
<< operation_on_slice_sources->ToString();
for (HloInstruction* user : operation_on_slices) {
const HloInstruction* operand_slice = user->operand(0);
auto user_slice =
computation->AddInstruction(operand_slice->CloneWithNewOperands(
user->shape(), {operation_on_slice_sources}));
VLOG(10) << "Adding new slice: " << user_slice->ToString()
<< " to replace: " << user->ToString();
TF_RETURN_IF_ERROR(user->ReplaceAllUsesWith(user_slice));
}
return absl::OkStatus();
}
}
absl::StatusOr<bool> SliceSinker::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (!instruction->IsElementwise() || instruction->operand_count() == 0 ||
instruction->user_count() == 0) {
continue;
}
VLOG(10) << "Processing instruction : " << instruction->ToString();
if (!IsElementwiseOperationOnSimilarSlices(instruction)) {
continue;
}
std::optional<std::vector<HloInstruction*>> similar_operations =
FindElementwiseOperationGroup(instruction);
if (!similar_operations.has_value()) {
continue;
}
std::vector<HloInstruction*> slice_sources;
absl::c_transform(
instruction->operands(), std::back_inserter(slice_sources),
[](HloInstruction* slice) { return slice->mutable_operand(0); });
TF_RETURN_IF_ERROR(SinkSlices(slice_sources, similar_operations.value()));
changed = true;
}
}
return changed;
}
} | #include "xla/service/slice_sinker.h"
#include <memory>
#include <vector>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
namespace xla {
namespace {
namespace m = match;
using ::testing::ElementsAre;
class SliceSinkerTest : public HloTestBase {};
TEST_F(SliceSinkerTest, TernaryOperation) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = pred[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
p2 = f32[8,9] parameter(2)
s00 = pred[2,9] slice(pred[8,9] p0), slice={[0:2], [0:9]}
s01 = pred[6,9] slice(pred[8,9] p0), slice={[2:8], [0:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]}
s20 = f32[2,9] slice(f32[8,9] p2), slice={[0:2], [0:9]}
s21 = f32[6,9] slice(f32[8,9] p2), slice={[2:8], [0:9]}
sel0 = f32[2,9] select(pred[2,9] s00, f32[2,9] s10, f32[2,9] s20)
sel1 = f32[6,9] select(pred[6,9] s01, f32[6,9] s11, f32[6,9] s21)
ROOT tuple = (f32[2,9], f32[6,9]) tuple(sel0, sel1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_TRUE(result);
HloInstruction* inst = module->entry_computation()->root_instruction();
const HloInstruction* slice0;
const HloInstruction* slice1;
EXPECT_THAT(inst,
GmockMatch(m::Tuple(
m::Slice(&slice0, m::Select(m::Parameter(0), m::Parameter(1),
m::Parameter(2))),
m::Slice(&slice1, m::Select(m::Parameter(0), m::Parameter(1),
m::Parameter(2))))));
EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9));
EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice1->slice_starts(), ElementsAre(2, 0));
EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1));
}
TEST_F(SliceSinkerTest, OverlappingPartialSlicesBeneficial) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[5,9] slice(f32[8,9] p0), slice={[3:8], [0:9]}
s02 = f32[8,4] slice(f32[8,9] p0), slice={[0:8], [0:4]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[5,9] slice(f32[8,9] p1), slice={[3:8], [0:9]}
s12 = f32[8,4] slice(f32[8,9] p1), slice={[0:8], [0:4]}
add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] s10)
add1 = f32[5,9] add(f32[5,9] s01, f32[5,9] s11)
add2 = f32[8,4] add(f32[8,4] s02, f32[8,4] s12)
ROOT tuple = (f32[2,9], f32[5,9], f32[8,4]) tuple(add0, add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_TRUE(result);
HloInstruction* inst = module->entry_computation()->root_instruction();
const HloInstruction* slice0;
const HloInstruction* slice1;
const HloInstruction* slice2;
EXPECT_THAT(
inst, GmockMatch(m::Tuple(
m::Slice(&slice0, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice1, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice2, m::Add(m::Parameter(0), m::Parameter(1))))));
EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9));
EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice1->slice_starts(), ElementsAre(3, 0));
EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice2->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice2->slice_limits(), ElementsAre(8, 4));
EXPECT_THAT(slice2->slice_strides(), ElementsAre(1, 1));
}
TEST_F(SliceSinkerTest, SameSliceSourcesTwoPeerGroups) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]}
s02 = f32[8,2] slice(f32[8,9] p0), slice={[0:8], [0:2]}
s03 = f32[8,7] slice(f32[8,9] p0), slice={[0:8], [2:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]}
s12 = f32[8,2] slice(f32[8,9] p1), slice={[0:8], [0:2]}
s13 = f32[8,7] slice(f32[8,9] p1), slice={[0:8], [2:9]}
add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] s10)
add1 = f32[6,9] add(f32[6,9] s01, f32[6,9] s11)
mul0 = f32[8,2] multiply(f32[8,2] s02, f32[8,2] s12)
mul1 = f32[8,7] multiply(f32[8,7] s03, f32[8,7] s13)
ROOT tuple = (f32[2,9], f32[6,9], f32[8,2], f32[8,7]) tuple(add0, add1, mul0, mul1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_TRUE(result);
HloInstruction* inst = module->entry_computation()->root_instruction();
const HloInstruction* slice0;
const HloInstruction* slice1;
const HloInstruction* slice2;
const HloInstruction* slice3;
EXPECT_THAT(
inst,
GmockMatch(m::Tuple(
m::Slice(&slice0, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice1, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice2, m::Multiply(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice3, m::Multiply(m::Parameter(0), m::Parameter(1))))));
EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9));
EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice1->slice_starts(), ElementsAre(2, 0));
EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice2->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice2->slice_limits(), ElementsAre(8, 2));
EXPECT_THAT(slice2->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice3->slice_starts(), ElementsAre(0, 2));
EXPECT_THAT(slice3->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice3->slice_strides(), ElementsAre(1, 1));
}
TEST_F(SliceSinkerTest, OverlappingMultipleSlices) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[5,9] slice(f32[8,9] p0), slice={[3:8], [0:9]}
s02 = f32[3,9] slice(f32[8,9] p0), slice={[2:5], [0:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[5,9] slice(f32[8,9] p1), slice={[3:8], [0:9]}
s12 = f32[3,9] slice(f32[8,9] p1), slice={[2:5], [0:9]}
add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] s10)
add1 = f32[5,9] add(f32[5,9] s01, f32[5,9] s11)
add2 = f32[3,9] add(f32[3,9] s02, f32[3,9] s12)
ROOT tuple = (f32[2,9], f32[5,9], f32[3,9]) tuple(add0, add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_TRUE(result);
HloInstruction* inst = module->entry_computation()->root_instruction();
const HloInstruction* slice0;
const HloInstruction* slice1;
const HloInstruction* slice2;
EXPECT_THAT(
inst, GmockMatch(m::Tuple(
m::Slice(&slice0, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice1, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice2, m::Add(m::Parameter(0), m::Parameter(1))))));
EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9));
EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice1->slice_starts(), ElementsAre(3, 0));
EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice2->slice_starts(), ElementsAre(2, 0));
EXPECT_THAT(slice2->slice_limits(), ElementsAre(5, 9));
EXPECT_THAT(slice2->slice_strides(), ElementsAre(1, 1));
}
TEST_F(SliceSinkerTest, DisjointedPartialSlices) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[5,9] slice(f32[8,9] p0), slice={[2:7], [0:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[5,9] slice(f32[8,9] p1), slice={[2:7], [0:9]}
add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] s10)
add1 = f32[5,9] add(f32[5,9] s01, f32[5,9] s11)
ROOT tuple = (f32[2,9], f32[5,9]) tuple(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, OverlappingPartialSlicesNotBeneficial) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,7] slice(f32[8,9] p0), slice={[0:2], [0:7]}
s01 = f32[6,7] slice(f32[8,9] p0), slice={[2:8], [0:7]}
s10 = f32[2,7] slice(f32[8,9] p1), slice={[0:2], [0:7]}
s11 = f32[6,7] slice(f32[8,9] p1), slice={[2:8], [0:7]}
add0 = f32[2,7] add(f32[2,7] s00, f32[2,7] s10)
add1 = f32[6,7] add(f32[6,7] s01, f32[6,7] s11)
ROOT tuple = (f32[2,7], f32[6,7]) tuple(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, DifferentOrderingOfSliceSources) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,7] parameter(0)
p1 = f32[8,7] parameter(1)
s00 = f32[2,7] slice(f32[8,7] p0), slice={[0:2], [0:7]}
s01 = f32[6,7] slice(f32[8,7] p0), slice={[2:8], [0:7]}
s10 = f32[2,7] slice(f32[8,7] p1), slice={[0:2], [0:7]}
s11 = f32[6,7] slice(f32[8,7] p1), slice={[2:8], [0:7]}
add0 = f32[2,7] add(f32[2,7] s00, f32[2,7] s10)
add1 = f32[6,7] add(f32[6,7] s11, f32[6,7] s01)
ROOT tuple = (f32[2,7], f32[6,7]) tuple(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, SlicesFromDifferentIndices) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[4,9] slice(f32[8,9] p0), slice={[0:4], [0:9]}
s01 = f32[4,9] slice(f32[8,9] p0), slice={[4:8], [0:9]}
s10 = f32[4,9] slice(f32[8,9] p1), slice={[0:4], [0:9]}
s11 = f32[4,9] slice(f32[8,9] p1), slice={[4:8], [0:9]}
add0 = f32[4,9] add(f32[4,9] s01, f32[4,9] s10)
add1 = f32[4,9] add(f32[4,9] s00, f32[4,9] s11)
ROOT tuple = (f32[4,9], f32[4,9]) tuple(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, DifferentOperator) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]}
mul = f32[2,9] multiply(f32[2,9] s00, f32[2,9] s10)
add = f32[6,9] add(f32[6,9] s01, f32[6,9] s11)
ROOT tuple = (f32[2,9], f32[6,9]) tuple(mul, add)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, SameOperatorDifferentAttributes) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]}
cmp1 = pred[2,9] compare(f32[2,9] s00, f32[2,9] s10), direction=GT
cmp2 = pred[6,9] compare(f32[6,9] s01, f32[6,9] s11), direction=LT
ROOT tuple = (pred[2,9], pred[6,9]) tuple(cmp1, cmp2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, SlicesWithMultiUsers) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]}
add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] s10)
add1 = f32[6,9] add(f32[6,9] s01, f32[6,9] s11)
mul0 = f32[2,9] multiply(f32[2,9] s00, f32[2,9] s10)
mul1 = f32[6,9] multiply(f32[6,9] s01, f32[6,9] s11)
ROOT tuple = (f32[2,9], f32[6,9], f32[2,9], f32[6,9]) tuple(add0, add1, mul0, mul1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_TRUE(result);
HloInstruction* inst = module->entry_computation()->root_instruction();
const HloInstruction* slice0;
const HloInstruction* slice1;
const HloInstruction* slice2;
const HloInstruction* slice3;
EXPECT_THAT(
inst,
GmockMatch(m::Tuple(
m::Slice(&slice0, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice1, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice2, m::Multiply(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice3, m::Multiply(m::Parameter(0), m::Parameter(1))))));
EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9));
EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice1->slice_starts(), ElementsAre(2, 0));
EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice2->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice2->slice_limits(), ElementsAre(2, 9));
EXPECT_THAT(slice2->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice3->slice_starts(), ElementsAre(2, 0));
EXPECT_THAT(slice3->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice3->slice_strides(), ElementsAre(1, 1));
}
TEST_F(SliceSinkerTest, NonElementWise) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8] parameter(0)
s00 = f32[2] slice(f32[8] p0), slice={[0:2]}
s01 = f32[6] slice(f32[8] p0), slice={[2:8]}
bc0 = f32[2,9] broadcast(f32[2] s00), dimensions={0}
bc1 = f32[6,9] broadcast(f32[6] s01), dimensions={0}
ROOT tuple = (f32[2,9], f32[6,9]) tuple(bc0, bc1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, SlicesWithNontrivialStrides) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[4,9] slice(f32[8,9] p0), slice={[0:7:2], [0:9]}
s01 = f32[4,9] slice(f32[8,9] p0), slice={[1:8:2], [0:9]}
s10 = f32[4,9] slice(f32[8,9] p1), slice={[0:7:2], [0:9]}
s11 = f32[4,9] slice(f32[8,9] p1), slice={[1:8:2], [0:9]}
add0 = f32[4,9] add(f32[4,9] s00, f32[4,9] s10)
add1 = f32[4,9] add(f32[4,9] s01, f32[4,9] s11)
ROOT tuple = (f32[4,9], f32[4,9]) tuple(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_TRUE(result);
HloInstruction* inst = module->entry_computation()->root_instruction();
const HloInstruction* slice0;
const HloInstruction* slice1;
EXPECT_THAT(
inst, GmockMatch(m::Tuple(
m::Slice(&slice0, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice1, m::Add(m::Parameter(0), m::Parameter(1))))));
EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice0->slice_limits(), ElementsAre(7, 9));
EXPECT_THAT(slice0->slice_strides(), ElementsAre(2, 1));
EXPECT_THAT(slice1->slice_starts(), ElementsAre(1, 0));
EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice1->slice_strides(), ElementsAre(2, 1));
}
TEST_F(SliceSinkerTest, NotAllSliceOperand) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[2,9] parameter(1)
p2 = f32[6,9] parameter(2)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]}
abs0 = f32[2,9] abs(f32[2,9] p1)
abs1 = f32[6,9] abs(f32[6,9] p2)
add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] abs0)
add1 = f32[6,9] add(f32[6,9] s01, f32[6,9] abs1)
ROOT tuple = (f32[2,9], f32[6,9]) tuple(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, Cascade) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]}
abs0 = f32[2,9] abs(f32[2,9] s10)
abs1 = f32[6,9] abs(f32[6,9] s11)
add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] abs0)
add1 = f32[6,9] add(f32[6,9] s01, f32[6,9] abs1)
ROOT tuple = (f32[2,9], f32[6,9]) tuple(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_TRUE(result);
HloInstruction* inst = module->entry_computation()->root_instruction();
const HloInstruction* slice0;
const HloInstruction* slice1;
EXPECT_THAT(
inst,
GmockMatch(m::Tuple(
m::Slice(&slice0, m::Add(m::Parameter(0), m::Abs(m::Parameter(1)))),
m::Slice(&slice1,
m::Add(m::Parameter(0), m::Abs(m::Parameter(1)))))));
EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9));
EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice1->slice_starts(), ElementsAre(2, 0));
EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1));
}
TEST_F(SliceSinkerTest, SameOpcodeDifferentResultElementTypes) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]}
convert0 = s32[2,9] convert(f32[2,9] s00)
convert1 = s64[6,9] convert(f32[6,9] s01)
ROOT tuple = (s32[2,9], s64[6,9]) tuple(convert0, convert1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/slice_sinker.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/slice_sinker_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3d74cae6-8e08-4595-b6bf-2d5ca27cc59f | cpp | tensorflow/tensorflow | convert_mover | third_party/xla/xla/service/convert_mover.cc | third_party/xla/xla/service/convert_mover_test.cc | #include "xla/service/convert_mover.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
static bool IsLosslesslyConvertibleTo(const Literal& literal,
PrimitiveType dst_ty) {
PrimitiveType orig_ty = literal.shape().element_type();
absl::StatusOr<Literal> converted1 = literal.Convert(dst_ty);
if (!converted1.ok()) {
return false;
}
absl::StatusOr<Literal> converted2 = converted1->Convert(orig_ty);
if (!converted2.ok()) {
return false;
}
return literal == *converted2;
}
bool OpCommutesWithConvert(HloOpcode opcode) {
switch (opcode) {
case HloOpcode::kConcatenate:
case HloOpcode::kPad:
case HloOpcode::kReshape:
case HloOpcode::kSlice:
case HloOpcode::kTranspose:
return true;
default:
return false;
}
}
absl::StatusOr<bool> MoveConvertPrecisionOps(HloComputation* comp) {
bool changed = false;
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
if (!OpCommutesWithConvert(instr->opcode()) ||
instr->operand_count() == 0 ||
!absl::c_all_of(instr->operands(), [](const HloInstruction* operand) {
return (operand->opcode() == HloOpcode::kConvert &&
operand->user_count() == 1) ||
operand->opcode() == HloOpcode::kConstant;
})) {
continue;
}
auto convert_op_it = absl::c_find_if(instr->operands(),
HloPredicateIsOp<HloOpcode::kConvert>);
if (convert_op_it == instr->operands().end()) {
continue;
}
const HloInstruction* convert_op = *convert_op_it;
if (!absl::c_all_of(instr->operands(), [&](const HloInstruction* operand) {
return operand->opcode() != HloOpcode::kConvert ||
operand->operand(0)->shape().element_type() ==
convert_op->operand(0)->shape().element_type();
})) {
continue;
}
PrimitiveType src_ty = convert_op->operand(0)->shape().element_type();
PrimitiveType dst_ty = convert_op->shape().element_type();
if (primitive_util::BitWidth(src_ty) >= primitive_util::BitWidth(dst_ty)) {
continue;
}
if (absl::c_any_of(instr->operands(), [&](const HloInstruction* operand) {
return operand->opcode() == HloOpcode::kConstant &&
!IsLosslesslyConvertibleTo(operand->literal(), src_ty);
})) {
continue;
}
if (primitive_util::IsSubByteNonPredType(src_ty)) {
continue;
}
VLOG(2) << "Moving increase-precision convert op " << convert_op->ToString()
<< " down the graph: " << instr->ToString();
absl::InlinedVector<HloInstruction*, 8> new_operands;
new_operands.reserve(instr->operand_count());
for (HloInstruction* operand : instr->operands()) {
switch (operand->opcode()) {
case HloOpcode::kConvert:
new_operands.push_back(operand->mutable_operand(0));
break;
case HloOpcode::kConstant:
new_operands.push_back(MakeConvertToHlo(operand, src_ty));
break;
default:
LOG(FATAL) << "Unexpected opcode in " << operand->ToString();
}
}
Shape new_shape = instr->shape();
new_shape.set_element_type(src_ty);
HloInstruction* new_instr = comp->AddInstruction(
instr->CloneWithNewOperands(new_shape, new_operands));
TF_RETURN_IF_ERROR(comp->ReplaceWithNewInstruction(
instr, HloInstruction::CreateConvert(instr->shape(), new_instr)));
changed = true;
}
std::deque<HloInstruction*> work_queue;
std::vector<HloInstruction*> instrs = comp->MakeInstructionPostOrder();
work_queue.insert(work_queue.end(), instrs.rbegin(), instrs.rend());
while (!work_queue.empty()) {
HloInstruction* instr = work_queue.front();
work_queue.pop_front();
if (instr->opcode() != HloOpcode::kConvert ||
instr->operand(0)->user_count() != 1 ||
!OpCommutesWithConvert(instr->operand(0)->opcode())) {
continue;
}
PrimitiveType src_ty = instr->operand(0)->shape().element_type();
PrimitiveType dst_ty = instr->shape().element_type();
if (primitive_util::BitWidth(src_ty) <= primitive_util::BitWidth(dst_ty)) {
continue;
}
if (primitive_util::IsSubByteNonPredType(dst_ty)) {
continue;
}
VLOG(2) << "Moving decrease-precision convert up the graph: "
<< instr->ToString();
HloInstruction* to_convert = instr->mutable_operand(0);
absl::InlinedVector<HloInstruction*, 8> new_operands;
new_operands.reserve(to_convert->operand_count());
for (HloInstruction* operand : to_convert->operands()) {
work_queue.push_front(MakeConvertToHlo(operand, dst_ty));
new_operands.push_back(work_queue.front());
}
Shape new_shape = to_convert->shape();
new_shape.set_element_type(dst_ty);
TF_RETURN_IF_ERROR(comp->ReplaceWithNewInstruction(
instr, to_convert->CloneWithNewOperands(new_shape, new_operands)));
changed = true;
}
return changed;
}
}
absl::StatusOr<bool> ConvertMover::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool changed_computation,
MoveConvertPrecisionOps(comp));
changed |= changed_computation;
}
return changed;
}
} | #include "xla/service/convert_mover.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = ::xla::match;
class ConvertMoverTest : public HloTestBase {
public:
ConvertMoverTest()
: HloTestBase(false,
false) {}
};
template <typename T>
auto MatchConvertToS8(T&& operand) {
return m::Convert(operand).WithShape(m::Shape().WithElementType(S8));
}
template <typename T>
auto MatchConvertToF16(T&& operand) {
return m::Convert(operand).WithShape(m::Shape().WithElementType(F16));
}
template <typename T>
auto MatchConvertToF32(T&& operand) {
return m::Convert(operand).WithShape(m::Shape().WithElementType(F32));
}
template <typename T>
auto MatchConvertToC64(T&& operand) {
return m::Convert(operand).WithShape(m::Shape().WithElementType(C64));
}
TEST_F(ConvertMoverTest, MoveDownThroughConcat) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
ROOT root = concatenate(f32[10] convert(f16[10] parameter(0)),
f32[10] convert(f16[10] parameter(1))),
dimensions={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(MatchConvertToF32(
m::Concatenate(m::Parameter(0), m::Parameter(1)))));
}
TEST_F(ConvertMoverTest, NoMoveDownThroughConcatWithDifferentSrcTypes) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
ROOT root = concatenate(f32[10] convert(bf16[10] parameter(0)),
f32[10] convert(f16[10] parameter(1))),
dimensions={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_FALSE(changed);
}
TEST_F(ConvertMoverTest, MoveUpReshape) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
ROOT root = f16[10,10] convert(f32[10,10] reshape(f32[100] parameter(0)))
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(MatchConvertToF16(m::Parameter(0)))));
}
TEST_F(ConvertMoverTest, MoveUpTwoTransposes) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
t1 = transpose(f32[3,4] parameter(0)), dimensions={1,0}
t2 = transpose(t1), dimensions={1,0}
ROOT root = f16[3,4] convert(t2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Transpose(
m::Transpose(MatchConvertToF16(m::Parameter(0))))));
}
TEST_F(ConvertMoverTest, MoveDownTwoSlices) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
slice1 = f32[9] slice(f32[10] convert(f16[10] parameter(0))), slice={[0:9]}
ROOT slice2 = f32[8] slice(slice1), slice={[0:8]}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(MatchConvertToF32(m::Slice(m::Slice(m::Parameter(0))))));
}
TEST_F(ConvertMoverTest, MoveDownC64) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
ROOT root = concatenate(c64[10] convert(f32[10] parameter(0)),
c64[10] convert(f32[10] parameter(1))),
dimensions={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(MatchConvertToC64(m::Concatenate(
m::Parameter(0),
m::Parameter(1)
))));
}
TEST_F(ConvertMoverTest, MoveDownC64Constant) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
ROOT root = concatenate(c64[2] convert(f32[2] parameter(0)),
c64[2] convert(f32[2] parameter(1)),
c64[2] constant({(1,1), (-1,-1)})),
dimensions={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_FALSE(changed);
}
TEST_F(ConvertMoverTest, MoveUpPad) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
pad = f32[10] pad(f32[8] parameter(0), f32[] constant(0)), padding=1_1
ROOT root = f16[10] convert(pad)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Pad(MatchConvertToF16(m::Parameter(0)),
MatchConvertToF16(m::ConstantEffectiveScalar(0)))));
}
TEST_F(ConvertMoverTest, MoveUpPadWithOutOfRangeConstant) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
pad = s32[10] pad(s32[8] parameter(0), s32[] constant(1000)), padding=1_1
ROOT root = s8[10] convert(pad)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Pad(MatchConvertToS8(m::Parameter(0)),
MatchConvertToS8(m::ConstantEffectiveScalar(1000)))));
}
TEST_F(ConvertMoverTest, MoveDownPad) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
ROOT pad = f32[10] pad(f32[8] convert(f16[8] parameter(0)), f32[] constant(0)),
padding=1_1
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(MatchConvertToF32(m::Pad(
m::Parameter(0), MatchConvertToF16(m::ConstantEffectiveScalar(0))))));
}
TEST_F(ConvertMoverTest, NoMoveDownPadBecauseConstantIsOutOfRange) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
ROOT pad = f32[10] pad(f32[8] convert(f16[8] parameter(0)), f32[] constant(1e9)),
padding=1_1
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_FALSE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convert_mover.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convert_mover_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2b440a48-9e51-478c-ae66-5556f1655335 | cpp | tensorflow/tensorflow | hlo_cost_analysis | third_party/xla/xla/service/hlo_cost_analysis.cc | third_party/xla/xla/service/hlo_cost_analysis_test.cc | #include "xla/service/hlo_cost_analysis.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tsl/lib/gtl/map_util.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "tsl/platform/errors.h"
namespace xla {
HloCostAnalysis::HloCostAnalysis(const Options& options) : options_(options) {}
HloCostAnalysis::HloCostAnalysis(ShapeSizeFunction shape_size,
const Properties& per_second_rates,
const Properties& min_latencies_seconds)
: HloCostAnalysis(
Options{shape_size, per_second_rates, min_latencies_seconds}) {}
absl::Status HloCostAnalysis::Preprocess(const HloInstruction* hlo) {
current_properties_ = Properties();
current_should_compute_bottleneck_time_ = true;
float bytes_accessed = GetShapeSize(hlo->shape());
current_properties_.set_output_bytes_accessed(GetShapeSize(hlo->shape()));
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
const HloInstruction* operand = hlo->operand(i);
bytes_accessed += GetShapeSize(operand->shape());
current_properties_.set_operand_bytes_accessed(
i, GetShapeSize(operand->shape()));
current_properties_.set_operand_utilization(i, 1.0);
}
current_properties_[kBytesAccessedKey] = bytes_accessed;
return absl::OkStatus();
}
absl::Status HloCostAnalysis::Postprocess(const HloInstruction* hlo) {
if (current_should_compute_bottleneck_time_) {
float optimal_seconds = 0.0f;
current_properties_.ForEach([&](absl::string_view key, float val) {
if (key == kOptimalSecondsKey) {
return;
}
float per_second_rate = options_.per_second_rate(key);
if (per_second_rate != 0) {
float time_for_key =
std::max(val / per_second_rate, options_.min_latency_seconds(key));
optimal_seconds = std::max(optimal_seconds, time_for_key);
}
});
current_properties_[kOptimalSecondsKey] = optimal_seconds;
}
current_properties_.ForEach(
[&](absl::string_view key, float val) { properties_sum_[key] += val; });
auto [it_ignored, inserted] =
hlo_properties_.emplace(hlo, std::move(current_properties_));
current_properties_ = Properties();
TF_RET_CHECK(inserted);
return absl::OkStatus();
}
absl::Status HloCostAnalysis::RemoveInstruction(HloInstruction* instruction) {
auto it = hlo_properties_.find(instruction);
if (it != hlo_properties_.end()) {
current_properties_ = it->second;
current_properties_.ForEach(
[&](absl::string_view key, float val) { properties_sum_[key] -= val; });
hlo_properties_.erase(instruction);
}
return absl::OkStatus();
}
absl::Status HloCostAnalysis::RevisitInstruction(HloInstruction* instruction) {
TF_RETURN_IF_ERROR(RemoveInstruction(instruction));
TF_RETURN_IF_ERROR(Preprocess(instruction));
TF_RETURN_IF_ERROR(instruction->Visit(this));
TF_RETURN_IF_ERROR(Postprocess(instruction));
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleElementwiseOp(
const HloInstruction* hlo_instruction) {
const auto& shape = hlo_instruction->shape();
auto computation_count = ShapeUtil::ElementsIn(shape);
auto opcode = hlo_instruction->opcode();
if (opcode == HloOpcode::kErf || opcode == HloOpcode::kExp ||
opcode == HloOpcode::kLog || opcode == HloOpcode::kLogistic ||
opcode == HloOpcode::kPower || opcode == HloOpcode::kSqrt ||
opcode == HloOpcode::kCbrt || opcode == HloOpcode::kRsqrt ||
opcode == HloOpcode::kTanh || opcode == HloOpcode::kSin ||
opcode == HloOpcode::kCos || opcode == HloOpcode::kExpm1 ||
opcode == HloOpcode::kLog1p || opcode == HloOpcode::kAtan2 ||
opcode == HloOpcode::kTan) {
current_properties_[kTranscendentalsKey] = computation_count;
} else {
current_properties_[kFlopsKey] = computation_count;
}
return absl::OkStatus();
}
float HloCostAnalysis::GetPropertyForHlo(
const HloInstruction& hlo, absl::string_view key,
const HloToProperties& hlo_to_properties) {
auto it = hlo_to_properties.find(&hlo);
if (it == hlo_to_properties.end()) {
return 0.0f;
}
return it->second[key];
}
int64_t HloCostAnalysis::GetShapeSize(const Shape& shape) const {
if (!LayoutUtil::HasLayout(shape)) {
return 0;
}
if (LayoutUtil::IsSparseArray(shape)) {
return 0;
}
return options_.shape_size(shape);
}
int64_t HloCostAnalysis::FusionParameterReadBytes(
const HloInstruction* hlo) const {
CHECK(hlo->IsFused() && (hlo->opcode() == HloOpcode::kParameter ||
hlo->opcode() == HloOpcode::kGetTupleElement));
auto handle_slice = [this](const HloInstruction* hlo,
const HloInstruction* user) -> int64_t {
return GetShapeSize(user->shape());
};
auto handle_dynamic_slice = [this](const HloInstruction* hlo,
const HloInstruction* user,
bool& seen_trivial_user) -> int64_t {
if (hlo == user->operand(0)) {
return GetShapeSize(user->shape());
}
if (!seen_trivial_user) {
seen_trivial_user = true;
return GetShapeSize(hlo->shape());
}
return 0;
};
auto handle_dynamic_update_slice =
[this](const HloInstruction* hlo, const HloInstruction* user,
bool& seen_trivial_user) -> int64_t {
if (hlo != user->operand(0) && !seen_trivial_user) {
seen_trivial_user = true;
return GetShapeSize(hlo->shape());
}
return 0;
};
int64_t size = 0;
bool seen_trivial_user = false;
for (const HloInstruction* user : hlo->users()) {
switch (user->opcode()) {
case HloOpcode::kFusion: {
for (int64_t idx : user->OperandIndices(hlo)) {
bool nested_seen_trivial_user = false;
const auto& fusion_users = user->users();
const HloInstruction* root_instruction =
user->fused_instructions_computation()->root_instruction();
const bool fusion_is_simple =
user->fused_parameter(idx) == root_instruction->operand(0);
for (const HloInstruction* fusion_user : fusion_users) {
if (fusion_is_simple &&
fusion_user->opcode() == HloOpcode::kSlice) {
size += handle_slice(user, fusion_user);
} else if (fusion_is_simple &&
fusion_user->opcode() == HloOpcode::kDynamicSlice) {
size += handle_dynamic_slice(user, fusion_user,
nested_seen_trivial_user);
} else if (fusion_is_simple && fusion_user->opcode() ==
HloOpcode::kDynamicUpdateSlice) {
size += handle_dynamic_update_slice(user, fusion_user,
nested_seen_trivial_user);
} else if (!nested_seen_trivial_user) {
nested_seen_trivial_user = true;
size += FusionParameterReadBytes(user->fused_parameter(idx));
}
}
}
break;
}
case HloOpcode::kSlice:
size += handle_slice(hlo, user);
break;
case HloOpcode::kDynamicSlice:
size += handle_dynamic_slice(hlo, user, seen_trivial_user);
break;
case HloOpcode::kDynamicUpdateSlice:
size += handle_dynamic_update_slice(hlo, user, seen_trivial_user);
break;
case HloOpcode::kBroadcast:
case HloOpcode::kReshape:
size += GetShapeSize(hlo->shape());
break;
default:
if (!seen_trivial_user) {
seen_trivial_user = true;
size += GetShapeSize(hlo->shape());
}
}
}
return size;
}
absl::Status HloCostAnalysis::FusionCalculateUtilizations(
const HloInstruction* fusion) {
for (const HloInstruction* instr :
fusion->fused_instructions_computation()->instructions()) {
if (ShouldFilterFusionInstruction(fusion, instr)) {
hlo_properties_[instr][kUtilizationKey] = 0.f;
} else {
hlo_properties_[instr][kUtilizationKey] = 1.f;
}
}
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleElementwiseUnary(
const HloInstruction* hlo) {
return HandleElementwiseOp(hlo);
}
absl::Status HloCostAnalysis::HandleElementwiseBinary(
const HloInstruction* hlo) {
return HandleElementwiseOp(hlo);
}
absl::Status HloCostAnalysis::HandleCompare(const HloInstruction* compare) {
return HandleElementwiseOp(compare);
}
absl::Status HloCostAnalysis::HandleClamp(const HloInstruction* clamp) {
return HandleElementwiseOp(clamp);
}
absl::Status HloCostAnalysis::HandleReducePrecision(const HloInstruction* hlo) {
return HandleElementwiseOp(hlo);
}
absl::Status HloCostAnalysis::HandleParameter(const HloInstruction*) {
current_should_compute_bottleneck_time_ = false;
current_properties_[kBytesAccessedKey] = 0;
current_properties_.set_output_bytes_accessed(0);
current_properties_[kOptimalSecondsKey] = 0;
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleConstant(const HloInstruction*) {
current_should_compute_bottleneck_time_ = false;
current_properties_[kBytesAccessedKey] = 0;
current_properties_.set_output_bytes_accessed(0);
current_properties_[kOptimalSecondsKey] = 0;
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleIota(const HloInstruction*) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleGetTupleElement(
const HloInstruction* get_tuple_element) {
current_should_compute_bottleneck_time_ = false;
current_properties_[kBytesAccessedKey] = 0;
current_properties_.set_output_bytes_accessed(0);
current_properties_.set_operand_bytes_accessed(0, 0);
current_properties_[kOptimalSecondsKey] = 0;
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleSelect(const HloInstruction* hlo) {
return HandleElementwiseOp(hlo);
}
absl::Status HloCostAnalysis::HandleReverse(const HloInstruction*) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleSlice(const HloInstruction* slice) {
const int64_t output_shape_size = GetShapeSize(slice->shape());
const int64_t num_input_elements =
ShapeUtil::ElementsIn(slice->operand(0)->shape());
const int64_t num_output_elements = ShapeUtil::ElementsIn(slice->shape());
current_properties_[kBytesAccessedKey] = output_shape_size * 2;
current_properties_.set_output_bytes_accessed(output_shape_size);
current_properties_.set_operand_bytes_accessed(0, output_shape_size);
current_properties_.set_operand_utilization(
0, 1.0 * num_output_elements / num_input_elements);
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleDynamicSlice(
const HloInstruction* dynamic_slice) {
const int64_t output_shape_size = GetShapeSize(dynamic_slice->shape());
const int64_t start_indices_shape_size =
GetShapeSize(dynamic_slice->operand(1)->shape());
const int64_t num_input_elements =
ShapeUtil::ElementsIn(dynamic_slice->operand(0)->shape());
const int64_t num_output_elements =
ShapeUtil::ElementsIn(dynamic_slice->shape());
current_properties_[kBytesAccessedKey] =
output_shape_size * 2 + start_indices_shape_size;
current_properties_.set_output_bytes_accessed(output_shape_size);
current_properties_.set_operand_bytes_accessed(0, output_shape_size);
current_properties_.set_operand_bytes_accessed(1, start_indices_shape_size);
current_properties_.set_operand_utilization(
0, 1.0 * num_output_elements / num_input_elements);
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleDynamicUpdateSlice(
const HloInstruction* dynamic_update_slice) {
const int64_t update_shape_size =
GetShapeSize(dynamic_update_slice->operand(1)->shape());
const int64_t start_indices_shape_size =
GetShapeSize(dynamic_update_slice->operand(2)->shape());
const int64_t num_update_elements =
ShapeUtil::ElementsIn(dynamic_update_slice->operand(1)->shape());
const int64_t num_output_elements =
ShapeUtil::ElementsIn(dynamic_update_slice->shape());
current_properties_[kBytesAccessedKey] =
update_shape_size * 2 + start_indices_shape_size;
current_properties_.set_output_bytes_accessed(update_shape_size);
current_properties_.set_operand_bytes_accessed(0, 0);
current_properties_.set_operand_bytes_accessed(1, update_shape_size);
current_properties_.set_operand_bytes_accessed(2, start_indices_shape_size);
current_properties_.set_operand_utilization(
0,
1.0 * (num_output_elements - num_update_elements) / num_output_elements);
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleTuple(const HloInstruction* tuple) {
current_properties_[kBytesAccessedKey] = GetShapeSize(tuple->shape());
current_properties_.set_output_bytes_accessed(GetShapeSize(tuple->shape()));
for (int i = 0; i < tuple->operand_count(); ++i) {
current_properties_.set_operand_bytes_accessed(i, 0);
}
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleConcatenate(const HloInstruction*) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleConvert(const HloInstruction* convert) {
return HandleElementwiseOp(convert);
}
absl::Status HloCostAnalysis::HandleCopy(const HloInstruction*) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleDomain(const HloInstruction* domain) {
current_should_compute_bottleneck_time_ = false;
current_properties_[kBytesAccessedKey] = 0;
current_properties_.set_output_bytes_accessed(0);
for (int i = 0; i < domain->operand_count(); ++i) {
current_properties_.set_operand_bytes_accessed(i, 0);
}
current_properties_[kOptimalSecondsKey] = 0;
return absl::OkStatus();
}
int64_t HloCostAnalysis::GetDotFlops(const Shape& lhs_shape,
const Shape& result_shape,
const DotDimensionNumbers& dnums) {
int64_t reduction_width = 1;
for (auto dim : dnums.lhs_contracting_dimensions()) {
reduction_width *= lhs_shape.dimensions(dim);
}
return kFmaFlops * ShapeUtil::ElementsIn(result_shape) * reduction_width;
}
absl::Status HloCostAnalysis::HandleDot(const HloInstruction* dot) {
current_properties_[kFlopsKey] = GetDotFlops(
dot->operand(0)->shape(), dot->shape(), dot->dot_dimension_numbers());
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleInfeed(const HloInstruction* infeed) {
int64_t size = 0;
ShapeUtil::ForEachLeafShape(
infeed->shape(), [&](const Shape& sub_shape, const ShapeIndex& index) {
size += GetShapeSize(sub_shape);
current_properties_.set_output_bytes_accessed(index,
GetShapeSize(sub_shape));
});
current_properties_.set_output_bytes_accessed(size);
current_properties_[kBytesAccessedKey] = size;
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleOutfeed(const HloInstruction* outfeed) {
current_properties_[kBytesAccessedKey] = 0;
for (int64_t i = 0; i < outfeed->operand_count(); ++i) {
const HloInstruction* operand = outfeed->operand(i);
int64_t size = 0;
ShapeUtil::ForEachLeafShape(
operand->shape(), [&](const Shape& sub_shape, const ShapeIndex& index) {
size += GetShapeSize(sub_shape);
current_properties_.set_operand_bytes_accessed(
i, index, GetShapeSize(sub_shape));
});
current_properties_.set_operand_bytes_accessed(i, size);
current_properties_[kBytesAccessedKey] += size;
}
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleMap(const HloInstruction* map) {
TF_ASSIGN_OR_RETURN(const Properties sub_properties,
ProcessSubcomputation(map->to_apply()));
const int64_t element_count = ShapeUtil::ElementsIn(map->shape());
sub_properties.ForEach([&](absl::string_view key, float val) {
if (KeyToCopyFromSubcomputation(key)) {
current_properties_[key] = val * element_count;
}
});
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleReduce(const HloInstruction* reduce) {
HloComputation* function = reduce->to_apply();
TF_ASSIGN_OR_RETURN(const Properties sub_properties,
ProcessSubcomputation(function));
auto arg = reduce->operand(0);
auto output_shape = reduce->shape().IsArray()
? reduce->shape()
: reduce->shape().tuple_shapes(0);
int64_t reduction_count =
ShapeUtil::ElementsIn(arg->shape()) - ShapeUtil::ElementsIn(output_shape);
sub_properties.ForEach([&](absl::string_view key, float val) {
if (KeyToCopyFromSubcomputation(key)) {
current_properties_[key] = val * reduction_count;
}
});
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleReduceWindow(
const HloInstruction* reduce_window) {
const Window& window = reduce_window->window();
auto function = reduce_window->to_apply();
TF_ASSIGN_OR_RETURN(Properties sub_properties,
ProcessSubcomputation(function));
int64_t window_element_count = 1;
for (const auto& dimension : window.dimensions()) {
window_element_count *= dimension.size();
}
const int64_t input_element_count =
ShapeUtil::ElementsIn(reduce_window->operand(0)->shape());
const int64_t output_element_count =
ShapeUtil::ElementsIn(reduce_window->shape().IsArray()
? reduce_window->shape()
: reduce_window->shape().tuple_shapes(0));
int64_t reduction_count = (window_element_count - 1) * output_element_count;
bool optimized_rw = false;
int64_t logical_reduction_dim = -1;
int64_t num_reduction_dimensions = absl::c_count_if(
window.dimensions(),
[](const WindowDimension& dim) { return (dim.size() != 1); });
int64_t num_padded_dimensions =
absl::c_count_if(window.dimensions(), [](const WindowDimension& dim) {
return (dim.padding_low() != 0 || dim.padding_high() != 0);
});
if (num_reduction_dimensions == 1 && num_padded_dimensions == 1 &&
reduce_window->shape().IsArray()) {
auto reduction_dim =
absl::c_find_if(window.dimensions(), [](const WindowDimension& dim) {
return (dim.size() != 1 && dim.padding_low() != 0 &&
dim.padding_high() != 0 &&
dim.padding_low() == dim.padding_high() &&
dim.size() == 2 * dim.padding_low() + 1);
});
if (reduction_dim != window.dimensions().end()) {
logical_reduction_dim = reduction_dim - window.dimensions().begin();
optimized_rw =
reduction_dim->padding_low() ==
reduce_window->shape().dimensions(logical_reduction_dim) - 1;
}
}
if (optimized_rw) {
window_element_count =
reduce_window->shape().dimensions(logical_reduction_dim);
reduction_count = (output_element_count / window_element_count) +
(window_element_count - 1);
VLOG(3) << "Reduction count: " << reduction_count
<< " reported for reduce-window:\n"
<< reduce_window->ToString();
}
if (options_.count_multiple_input_accesses) {
current_properties_.set_operand_utilization(0, 1.0 * output_element_count *
window_element_count /
input_element_count);
current_properties_.set_operand_bytes_accessed(
0, output_element_count * window_element_count *
ShapeUtil::ByteSizeOfPrimitiveType(
reduce_window->operand(0)->shape().element_type()));
}
sub_properties.ForEach([&](absl::string_view key, float val) {
if (KeyToCopyFromSubcomputation(key)) {
current_properties_[key] = val * reduction_count;
}
});
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleSelectAndScatter(
const HloInstruction* instruction) {
TF_ASSIGN_OR_RETURN(Properties select_properties,
ProcessSubcomputation(instruction->select()));
TF_ASSIGN_OR_RETURN(Properties scatter_properties,
ProcessSubcomputation(instruction->scatter()));
const auto source = instruction->operand(1);
const auto source_element_count = ShapeUtil::ElementsIn(source->shape());
int64_t window_element_count = 1;
for (const auto& dimension : instruction->window().dimensions()) {
window_element_count *= dimension.size();
}
const int64_t select_count =
source_element_count * (window_element_count - 1);
select_properties.ForEach([&](absl::string_view key, float val) {
if (KeyToCopyFromSubcomputation(key)) {
current_properties_[key] += val * select_count;
}
});
scatter_properties.ForEach([&](absl::string_view key, float val) {
if (KeyToCopyFromSubcomputation(key)) {
current_properties_[key] += val * source_element_count;
}
});
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleBitcast(const HloInstruction*) {
current_properties_[kBytesAccessedKey] = 0;
current_properties_.set_output_bytes_accessed(0);
current_properties_.set_operand_bytes_accessed(0, 0);
current_properties_[kOptimalSecondsKey] = 0;
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleBroadcast(const HloInstruction* broadcast) {
if (options_.count_multiple_input_accesses) {
current_properties_.set_operand_bytes_accessed(
0, GetShapeSize(broadcast->shape()));
current_properties_.set_operand_utilization(
0, 1.0 * ShapeUtil::ElementsIn(broadcast->shape()) /
ShapeUtil::ElementsIn(broadcast->operand(0)->shape()));
}
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandlePad(const HloInstruction*) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleAsyncStart(
const HloInstruction* async_start) {
TF_ASSIGN_OR_RETURN(
current_properties_,
ProcessSubcomputation(async_start->called_computations()[0]));
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleAsyncUpdate(const HloInstruction*) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleAsyncDone(const HloInstruction*) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleCopyStart(const HloInstruction*) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleCopyDone(const HloInstruction*) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleSend(const HloInstruction*) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleSendDone(const HloInstruction*) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleRecv(const HloInstruction*) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleRecvDone(const HloInstruction*) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleReshape(const HloInstruction*) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleDynamicReshape(const HloInstruction*) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleBatchNormTraining(const HloInstruction*) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleBatchNormInference(const HloInstruction*) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleBatchNormGrad(const HloInstruction*) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleTranspose(const HloInstruction* transpose) {
if (transpose->IsEffectiveBitcast()) {
return HandleBitcast(transpose);
}
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleAfterAll(const HloInstruction* token) {
current_should_compute_bottleneck_time_ = false;
current_properties_[kBytesAccessedKey] = 0;
current_properties_.set_output_bytes_accessed(0);
for (int i = 0; i < token->operand_count(); ++i) {
current_properties_.set_operand_bytes_accessed(i, 0);
}
current_properties_[kOptimalSecondsKey] = 0;
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleAddDependency(
const HloInstruction* add_dependency) {
current_should_compute_bottleneck_time_ = false;
current_properties_[kBytesAccessedKey] = 0;
current_properties_.set_output_bytes_accessed(0);
for (int i = 0; i < add_dependency->operand_count(); ++i) {
current_properties_.set_operand_bytes_accessed(i, 0);
}
current_properties_[kOptimalSecondsKey] = 0;
return absl::OkStatus();
}
int64_t HloCostAnalysis::GetConvolutionFlops(
const HloInstruction* convolution) {
auto lhs = convolution->operand(0);
auto rhs = convolution->operand(1);
const Shape& lhs_shape = lhs->shape();
const Shape& rhs_shape = rhs->shape();
const Shape& result_shape = convolution->shape();
return GetConvolutionFlops(convolution, lhs_shape, rhs_shape, result_shape);
}
int64_t HloCostAnalysis::GetConvolutionFlops(const HloInstruction* convolution,
const Shape& lhs_shape,
const Shape& rhs_shape,
const Shape& result_shape) {
Window window = convolution->window();
const auto& dnums = convolution->convolution_dimension_numbers();
const int64_t input_batch_dim = dnums.input_batch_dimension();
const int64_t input_feature_dim = dnums.input_feature_dimension();
const int64_t output_feature_dim = dnums.output_feature_dimension();
const int64_t input_feature =
ShapeUtil::GetDimension(lhs_shape, input_feature_dim);
const int64_t output_feature =
ShapeUtil::GetDimension(result_shape, output_feature_dim);
const int64_t batch = ShapeUtil::GetDimension(lhs_shape, input_batch_dim);
DimensionVector kernel_limits;
DimensionVector output_limits;
DimensionVector input_limits;
if (window.dimensions().empty()) {
window = window_util::MakeWindow({1});
kernel_limits.push_back(1);
output_limits.push_back(1);
input_limits.push_back(1);
} else {
for (int64_t spatial_dimension = 0;
spatial_dimension < window.dimensions_size(); ++spatial_dimension) {
const int64_t kernel_spatial_dim =
dnums.kernel_spatial_dimensions(spatial_dimension);
const int64_t kernel_limit = rhs_shape.dimensions(kernel_spatial_dim);
kernel_limits.push_back(kernel_limit);
const int64_t output_spatial_dim =
dnums.output_spatial_dimensions(spatial_dimension);
const int64_t output_limit = result_shape.dimensions(output_spatial_dim);
output_limits.push_back(output_limit);
const int64_t input_spatial_dim =
dnums.input_spatial_dimensions(spatial_dimension);
const int64_t input_limit = lhs_shape.dimensions(input_spatial_dim);
input_limits.push_back(input_limit);
}
}
DimensionVector valid_position_counts;
for (int64_t spatial_dimension = 0;
spatial_dimension < window.dimensions_size(); ++spatial_dimension) {
const auto& window_dim = window.dimensions(spatial_dimension);
if (input_limits[spatial_dimension] == output_limits[spatial_dimension] &&
kernel_limits[spatial_dimension] == output_limits[spatial_dimension] &&
input_limits[spatial_dimension] == window_dim.base_dilation() &&
window_dim.window_dilation() == 1 &&
std::max<int64_t>(1, input_limits[spatial_dimension] - 1) ==
window_dim.stride() &&
window_dim.padding_low() == 0 && window_dim.padding_high() == 0) {
valid_position_counts.push_back(input_limits[spatial_dimension]);
continue;
}
if (input_limits[spatial_dimension] == 1 &&
kernel_limits[spatial_dimension] == output_limits[spatial_dimension] &&
window_dim.window_dilation() == 1 && window_dim.base_dilation() == 1 &&
window_dim.stride() == 1 &&
window_dim.padding_high() == output_limits[spatial_dimension] - 1 &&
window_dim.padding_low() == output_limits[spatial_dimension] - 1) {
valid_position_counts.push_back(output_limits[spatial_dimension]);
continue;
}
int64_t valid_position_count = 0;
for (int64_t kernel_idx = 0; kernel_idx < kernel_limits[spatial_dimension];
++kernel_idx) {
if (window_dim.stride() == 1 && window_dim.base_dilation() == 1) {
const int64_t undilated_index_base =
window_dim.padding_low() -
kernel_idx * window_dim.window_dilation();
valid_position_count += std::max<int64_t>(
std::min<int64_t>(
input_limits[spatial_dimension] + undilated_index_base,
output_limits[spatial_dimension]) -
std::max<int64_t>(undilated_index_base, int64_t{0}),
int64_t{0});
continue;
}
for (int64_t output_idx = 0;
output_idx < output_limits[spatial_dimension]; ++output_idx) {
const int64_t undilated_index =
output_idx * window_dim.stride() - window_dim.padding_low() +
kernel_idx * window_dim.window_dilation();
const int64_t lhs_spatial_index =
window_dim.base_dilation() > 1
? undilated_index / window_dim.base_dilation()
: undilated_index;
if (undilated_index != lhs_spatial_index * window_dim.base_dilation()) {
continue;
}
if (lhs_spatial_index < 0 ||
lhs_spatial_index >= input_limits[spatial_dimension]) {
continue;
}
valid_position_count += 1;
}
}
valid_position_counts.push_back(valid_position_count);
}
const int64_t fma_count =
(input_feature / convolution->feature_group_count()) * output_feature *
(batch / convolution->batch_group_count()) *
Product(valid_position_counts);
return fma_count * kFmaFlops;
}
absl::Status HloCostAnalysis::HandleConvolution(
const HloInstruction* convolution) {
current_properties_[kFlopsKey] = GetConvolutionFlops(convolution);
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleFft(const HloInstruction* fft) {
auto real_shape =
fft->operand(0)->shape().IsTuple()
? ShapeUtil::GetTupleElementShape(fft->operand(0)->shape(), 0)
: fft->operand(0)->shape();
constexpr int kFmaPerComplexMul = 4;
int64_t log_factors = 1;
for (int64_t dim : fft->fft_length()) {
log_factors *= Log2Floor<uint64_t>(dim);
}
current_properties_[kFlopsKey] = kFmaFlops * kFmaPerComplexMul * log_factors *
ShapeUtil::ElementsIn(real_shape);
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleTriangularSolve(const HloInstruction* hlo) {
float bytes_accessed = GetShapeSize(hlo->shape());
current_properties_.set_output_bytes_accessed(GetShapeSize(hlo->shape()));
bytes_accessed += GetShapeSize(hlo->operand(0)->shape()) / 2.0f;
current_properties_.set_operand_bytes_accessed(
0, GetShapeSize(hlo->operand(0)->shape()) / 2.0f);
bytes_accessed += GetShapeSize(hlo->operand(1)->shape());
current_properties_.set_operand_bytes_accessed(
0, GetShapeSize(hlo->operand(1)->shape()));
current_properties_[kBytesAccessedKey] = bytes_accessed;
const Shape& a_shape = hlo->operand(0)->shape();
const Shape& b_shape = hlo->operand(1)->shape();
int64_t elems = a_shape.dimensions(a_shape.dimensions_size() - 1);
elems *= ShapeUtil::ElementsIn(b_shape);
current_properties_[kFlopsKey] = kFmaFlops * elems;
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleCholesky(const HloInstruction* hlo) {
float bytes_accessed = GetShapeSize(hlo->operand(0)->shape()) / 2.0f;
current_properties_.set_output_bytes_accessed(
GetShapeSize(hlo->operand(0)->shape()) / 2.0f);
bytes_accessed += GetShapeSize(hlo->operand(0)->shape()) / 2.0f;
current_properties_.set_operand_bytes_accessed(
0, GetShapeSize(hlo->operand(0)->shape()) / 2.0f);
current_properties_[kBytesAccessedKey] = bytes_accessed;
const Shape& a_shape = hlo->operand(0)->shape();
int64_t elems = a_shape.dimensions(a_shape.dimensions_size() - 1);
elems *= ShapeUtil::ElementsIn(a_shape);
current_properties_[kFlopsKey] = elems / 3;
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleOptimizationBarrier(
const HloInstruction* ) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleAllGather(const HloInstruction* ) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleAllGatherStart(const HloInstruction* hlo) {
return HandleAllGather(hlo);
}
absl::Status HloCostAnalysis::HandleAllGatherDone(
const HloInstruction* ) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleAllReduce(const HloInstruction* crs) {
double flops = 0.0;
int64_t output_bytes_accessed = 0;
ShapeUtil::ForEachSubshape(
crs->shape(), [&](const Shape& subshape, const ShapeIndex&) {
if (subshape.IsArray()) {
flops += ShapeUtil::ElementsIn(subshape);
output_bytes_accessed += GetShapeSize(subshape);
}
});
int64_t bytes_accessed = output_bytes_accessed;
for (const HloInstruction* operand : crs->operands()) {
bytes_accessed += GetShapeSize(operand->shape());
}
current_properties_[kFlopsKey] = flops;
current_properties_.set_output_bytes_accessed(output_bytes_accessed);
current_properties_[kBytesAccessedKey] = bytes_accessed;
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleReduceScatter(const HloInstruction* hlo) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleAllReduceStart(const HloInstruction* hlo) {
return HandleAllReduce(hlo);
}
absl::Status HloCostAnalysis::HandleAllReduceDone(
const HloInstruction* ) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleAllToAll(const HloInstruction* hlo) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleCollectiveBroadcast(
const HloInstruction* ) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleCollectivePermute(
const HloInstruction* ) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleCollectivePermuteStart(
const HloInstruction* ) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleCollectivePermuteDone(
const HloInstruction* ) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandlePartitionId(const HloInstruction* ) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleReplicaId(const HloInstruction* ) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleRng(const HloInstruction* random) {
current_properties_[kTranscendentalsKey] =
ShapeUtil::ElementsIn(random->shape());
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleRngBitGenerator(
const HloInstruction* random) {
current_properties_[kTranscendentalsKey] =
ShapeUtil::ElementsInRecursive(random->shape());
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleRngGetAndUpdateState(
const HloInstruction* random) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::FusionProcessOutputBytesAccessed(
const HloInstruction* fusion) {
ShapeUtil::ForEachSubshape(
fusion->shape(),
[this, fusion](const Shape& subshape, const ShapeIndex& shape_index) {
if (!subshape.IsArray()) {
return;
}
const HloInstruction* root = fusion->fused_expression_root();
auto further_examine_index =
shape_index.size() == 1 && root->opcode() == HloOpcode::kTuple;
if (further_examine_index &&
ShouldFilterFusionOutputIndex(fusion, shape_index)) {
current_properties_.set_output_bytes_accessed(shape_index, 0);
hlo_properties_[root->operand(shape_index[0])]
[GetOperandUtilizationKey(0)] = 0;
return;
}
if (further_examine_index) {
root = root->operand(shape_index[0]);
}
if (root->opcode() == HloOpcode::kDynamicUpdateSlice) {
int64_t size = GetShapeSize(root->operand(1)->shape());
current_properties_[kBytesAccessedKey] += size;
current_properties_.set_output_bytes_accessed(shape_index, size);
hlo_properties_[root][GetOperandUtilizationKey(0)] = 0;
return;
}
current_properties_[kBytesAccessedKey] += GetShapeSize(subshape);
current_properties_.set_output_bytes_accessed(shape_index,
GetShapeSize(subshape));
});
if (fusion->shape().IsTuple()) {
std::function<float(const Shape&, const ShapeIndex&)>
propagate_output_size_to_parent;
propagate_output_size_to_parent = [&](const Shape& shape,
const ShapeIndex& shape_index) {
float& bytes_accessed =
current_properties_[GetOutputBytesAccessedKey(shape_index)];
if (bytes_accessed != 0) {
return bytes_accessed;
}
for (int i = 0; i < shape.tuple_shapes_size(); ++i) {
const Shape& subshape = shape.tuple_shapes(i);
if (!subshape.IsTuple() && ShouldFilterFusionOutputIndex(fusion, {i})) {
continue;
}
ShapeIndex subshape_index(shape_index);
subshape_index.push_back(i);
bytes_accessed +=
propagate_output_size_to_parent(subshape, subshape_index);
}
return bytes_accessed;
};
current_properties_[GetOutputBytesAccessedKey()] = 0;
propagate_output_size_to_parent(fusion->shape(), {});
}
return absl::OkStatus();
}
absl::Status HloCostAnalysis::FusionProcessOperandBytesRead(
const HloInstruction* fusion) {
for (int64_t i = 0; i < fusion->fused_parameters().size(); ++i) {
const HloInstruction* operand = fusion->fused_parameter(i);
int64_t operand_size = 0;
if (ShouldFilterFusionInput(fusion, i)) {
current_properties_.set_operand_bytes_accessed(i, operand_size);
current_properties_.set_operand_utilization(
i, hlo_properties_[operand][kUtilizationKey]);
continue;
}
if (!operand->shape().IsTuple()) {
operand_size = FusionParameterReadBytes(operand);
} else {
ShapeUtil::ForEachLeafShape(
operand->shape(),
[&](const Shape& , const ShapeIndex& index) {
const HloInstruction* gte = operand;
for (int64_t sub_index : index) {
for (const HloInstruction* user : gte->users()) {
if (user->opcode() == HloOpcode::kGetTupleElement &&
user->tuple_index() == sub_index) {
gte = user;
break;
}
}
}
int64_t size = FusionParameterReadBytes(gte);
operand_size += size;
current_properties_.set_operand_bytes_accessed(i, index, size);
});
}
current_properties_[kBytesAccessedKey] += operand_size;
current_properties_.set_operand_bytes_accessed(i, operand_size);
current_properties_.set_operand_utilization(
i, hlo_properties_[operand][kUtilizationKey]);
}
return absl::OkStatus();
}
absl::Status HloCostAnalysis::FusionCountConstantsMemoryAccess(
const HloInstruction* fusion) {
for (const HloInstruction* instr :
fusion->fused_instructions_computation()->instructions()) {
if (instr->opcode() == HloOpcode::kConstant &&
ShapeUtil::ElementsIn(instr->shape()) >
immediate_constant_max_elements()) {
float utilization = hlo_properties_[instr][kUtilizationKey];
if (!options_.count_multiple_input_accesses) {
utilization = fmin(utilization, 1.0);
}
current_properties_[kBytesAccessedKey] +=
GetShapeSize(instr->shape()) * utilization;
}
}
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleFusion(const HloInstruction* fusion) {
VLOG(8) << "Processing fusion " << fusion->ToString();
if (fusion->IsCustomFusion()) {
for (const HloInstruction* hlo :
fusion->fused_instructions_computation()->instructions()) {
if (hlo->opcode() == HloOpcode::kGather) {
return HandleGather(hlo);
}
if (hlo->opcode() == HloOpcode::kScatter) {
return HandleScatter(hlo);
}
}
}
TF_ASSIGN_OR_RETURN(
current_properties_,
ProcessSubcomputation(fusion->fused_instructions_computation()));
current_properties_[kBytesAccessedKey] = 0;
TF_RETURN_IF_ERROR(FusionProcessOutputBytesAccessed(fusion));
TF_RETURN_IF_ERROR(FusionCalculateUtilizations(fusion));
TF_RETURN_IF_ERROR(FusionCountConstantsMemoryAccess(fusion));
TF_RETURN_IF_ERROR(FusionProcessOperandBytesRead(fusion));
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleCall(const HloInstruction* call) {
TF_ASSIGN_OR_RETURN(current_properties_,
ProcessSubcomputation(call->to_apply()));
current_should_compute_bottleneck_time_ = false;
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleCustomCall(
const HloInstruction* custom_call) {
current_properties_[kOptimalSecondsKey] = -1;
current_properties_[kBytesAccessedKey] = -1;
current_properties_.set_output_bytes_accessed(-1);
for (int i = 0; i < custom_call->operand_count(); ++i) {
current_properties_.set_operand_bytes_accessed(i, -1);
}
current_properties_[kFlopsKey] = -1;
current_should_compute_bottleneck_time_ = false;
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleSort(const HloInstruction* sort) {
int64_t elements = ShapeUtil::ElementsIn(sort->operand(0)->shape());
current_properties_[kFlopsKey] = elements * Log2Ceiling<uint64_t>(elements);
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleTopK(const HloInstruction* topk) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleWhile(const HloInstruction* xla_while) {
TF_ASSIGN_OR_RETURN(const Properties body_properties,
ProcessSubcomputation(xla_while->while_body()));
TF_ASSIGN_OR_RETURN(const Properties condition_properties,
ProcessSubcomputation(xla_while->while_condition()));
current_properties_ = Properties();
body_properties.ForEach([&](absl::string_view key, float val) {
current_properties_[key] += val;
});
condition_properties.ForEach([&](absl::string_view key, float val) {
current_properties_[key] += val;
});
current_should_compute_bottleneck_time_ = false;
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleConditional(
const HloInstruction* conditional) {
TF_ASSIGN_OR_RETURN(
const Properties branch0_computation_properties,
ProcessSubcomputation(conditional->branch_computation(0)));
current_properties_ = branch0_computation_properties;
for (int j = 1; j < conditional->branch_count(); ++j) {
TF_ASSIGN_OR_RETURN(
const Properties branch_computation_properties,
ProcessSubcomputation(conditional->branch_computation(j)));
branch_computation_properties.ForEach(
[&](absl::string_view key, float val) {
auto& current_property = current_properties_[key];
current_property = std::max(current_property, val);
});
}
current_should_compute_bottleneck_time_ = false;
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleGather(const HloInstruction* gather) {
int64_t output_size = GetShapeSize(gather->shape());
current_properties_[kBytesAccessedKey] =
output_size * 2 + GetShapeSize(gather->operand(1)->shape());
current_properties_.set_operand_bytes_accessed(0, output_size);
current_properties_.set_operand_bytes_accessed(
1, GetShapeSize(gather->operand(1)->shape()));
current_properties_.set_operand_utilization(
0, 1.0 * ShapeUtil::ElementsIn(gather->shape()) /
ShapeUtil::ElementsIn(gather->operand(0)->shape()));
current_properties_.set_output_bytes_accessed(output_size);
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleScatter(const HloInstruction* hlo) {
auto* scatter = Cast<HloScatterInstruction>(hlo);
int64_t total_update_size = 0;
for (int i = 0, n = scatter->scatter_operand_count(); i < n; ++i) {
int64_t update_size = GetShapeSize(scatter->scatter_updates()[i]->shape());
current_properties_.set_operand_bytes_accessed(i, update_size);
current_properties_.set_operand_bytes_accessed(n + 1 + i, update_size);
total_update_size += update_size;
}
int64_t scatter_indices_size =
GetShapeSize(scatter->scatter_indices()->shape());
current_properties_.set_operand_bytes_accessed(
scatter->scatter_operand_count(), scatter_indices_size);
current_properties_[kBytesAccessedKey] =
total_update_size * 3 + scatter_indices_size;
current_properties_.set_output_bytes_accessed(total_update_size);
const int64_t element_count =
ShapeUtil::ElementsIn(scatter->scatter_updates()[0]->shape());
TF_ASSIGN_OR_RETURN(const Properties sub_properties,
ProcessSubcomputation(scatter->to_apply()));
sub_properties.ForEach([&](absl::string_view key, float val) {
if (KeyToCopyFromSubcomputation(key)) {
current_properties_[key] = val * element_count;
}
});
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleGetDimensionSize(
const HloInstruction* ) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::HandleSetDimensionSize(
const HloInstruction* ) {
return absl::OkStatus();
}
absl::Status HloCostAnalysis::FinishVisit(const HloInstruction*) {
return absl::OkStatus();
}
float HloCostAnalysis::flop_count() const { return properties_sum_[kFlopsKey]; }
float HloCostAnalysis::transcendental_count() const {
return properties_sum_[kTranscendentalsKey];
}
float HloCostAnalysis::bytes_accessed() const {
return properties_sum_[kBytesAccessedKey];
}
float HloCostAnalysis::optimal_seconds() const {
return properties_sum_[kOptimalSecondsKey];
}
HloCostAnalysis::Properties HloCostAnalysis::properties(
const HloInstruction& hlo) const {
auto it = hlo_properties_.find(&hlo);
if (it == hlo_properties_.end()) {
return Properties();
}
return it->second;
}
int64_t HloCostAnalysis::flop_count(const HloInstruction& hlo) const {
return GetPropertyForHlo(hlo, kFlopsKey, hlo_properties_);
}
int64_t HloCostAnalysis::transcendental_count(const HloInstruction& hlo) const {
return GetPropertyForHlo(hlo, kTranscendentalsKey, hlo_properties_);
}
int64_t HloCostAnalysis::bytes_accessed(const HloInstruction& hlo) const {
return GetPropertyForHlo(hlo, kBytesAccessedKey, hlo_properties_);
}
int64_t HloCostAnalysis::operand_bytes_accessed(const HloInstruction& hlo,
int64_t operand_num,
ShapeIndex index) const {
return GetPropertyForHlo(hlo, GetOperandBytesAccessedKey(operand_num, index),
hlo_properties_);
}
float HloCostAnalysis::operand_utilization(const HloInstruction& hlo,
int64_t operand_num,
ShapeIndex index) const {
return GetPropertyForHlo(hlo, GetOperandUtilizationKey(operand_num, index),
hlo_properties_);
}
int64_t HloCostAnalysis::output_bytes_accessed(const HloInstruction& hlo,
ShapeIndex index) const {
return GetPropertyForHlo(hlo, GetOutputBytesAccessedKey(index),
hlo_properties_);
}
float HloCostAnalysis::optimal_seconds(const HloInstruction& hlo) const {
return GetPropertyForHlo(hlo, kOptimalSecondsKey, hlo_properties_);
}
int64_t HloCostAnalysis::GetBytesRead(
const HloInstruction& hlo, std::optional<int64_t> memory_space) const {
int64_t bytes_read = 0;
for (int operand_number = 0; operand_number < hlo.operand_count();
++operand_number) {
const Shape& shape = hlo.operand(operand_number)->shape();
ShapeUtil::ForEachSubshape(
shape, [&](const Shape& sub_shape, const ShapeIndex& index) {
if (ShapeUtil::IsLeafIndex(shape, index)) {
std::optional<int64_t> index_memory_space;
if (sub_shape.has_layout()) {
index_memory_space = sub_shape.layout().memory_space();
}
if (!memory_space || memory_space == index_memory_space) {
bytes_read += operand_bytes_accessed(hlo, operand_number, index);
}
}
});
}
return bytes_read;
}
int64_t HloCostAnalysis::GetBytesWritten(
const HloInstruction& hlo, std::optional<int64_t> memory_space) const {
int64_t bytes_written = 0;
ShapeUtil::ForEachLeafShape(
hlo.shape(), [&](const Shape& sub_shape, const ShapeIndex& index) {
std::optional<int64_t> index_memory_space;
if (sub_shape.has_layout()) {
index_memory_space = sub_shape.layout().memory_space();
}
if (!memory_space || memory_space == index_memory_space) {
bytes_written += output_bytes_accessed(hlo, index);
}
});
return bytes_written;
}
absl::StatusOr<HloCostAnalysis::Properties>
HloCostAnalysis::ProcessSubcomputation(HloComputation* computation) {
auto visitor = CreateNestedCostAnalysis();
visitor->ReserveVisitStates(computation->instruction_count());
TF_RETURN_IF_ERROR(computation->Accept(visitor.get()));
for (auto& entry : visitor->hlo_properties_) {
hlo_properties_[entry.first] = std::move(entry.second);
}
return visitor->properties();
}
std::unique_ptr<HloCostAnalysis> HloCostAnalysis::CreateNestedCostAnalysis() {
return std::make_unique<HloCostAnalysis>(options_);
}
std::string HloCostAnalysis::GetOperandBytesAccessedKey(
int64_t operand_num, const ShapeIndex& index) {
return absl::StrCat(kBytesAccessedKey, operand_num, index.ToString());
}
std::string HloCostAnalysis::GetOperandUtilizationKey(
int64_t operand_num, const ShapeIndex& index) {
return absl::StrCat(kUtilizationKey, operand_num, index.ToString());
}
std::string HloCostAnalysis::GetOutputBytesAccessedKey(
const ShapeIndex& index) {
return absl::StrCat(kBytesAccessedKey, "out", index.ToString());
}
bool HloCostAnalysis::KeyToCopyFromSubcomputation(absl::string_view key) const {
return !absl::StartsWith(key, kBytesAccessedKey) &&
!absl::StartsWith(key, kUtilizationKey);
}
} | #include "xla/service/hlo_cost_analysis.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/client/client.h"
#include "xla/client/client_library.h"
#include "xla/client/local_client.h"
#include "xla/hlo/builder/padding.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/local_service.h"
#include "xla/service/service.h"
#include "xla/shape_util.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
constexpr int64_t kPointerSize = 8;
int64_t ShapeSize(const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
}
class HloCostAnalysisTest : public ::testing::Test {
protected:
HloCostAnalysisTest()
: client_(ClientLibrary::LocalClientOrDie()),
service_(static_cast<Service*>(ClientLibrary::GetXlaService(
static_cast<LocalClient*>(client_)->platform()))) {
{
XlaBuilder builder("add_and_exp");
auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
auto half = ConstantR0<float>(&builder, 0.5);
Exp(Add(x, half));
auto computation_status = builder.Build();
TF_CHECK_OK(computation_status.status());
add_and_exp_ = std::move(computation_status).value();
}
{
XlaBuilder builder("add");
auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {}), "y");
Add(x, y);
auto computation_status = builder.Build();
TF_CHECK_OK(computation_status.status());
add_ = std::move(computation_status).value();
}
{
XlaBuilder builder("sigmoid");
auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
auto one = ConstantR0<float>(&builder, 1.0);
Div(one, Add(one, Exp(Neg(x))));
auto computation_status = builder.Build();
TF_CHECK_OK(computation_status.status());
sigmoid_ = std::move(computation_status).value();
}
{
XlaBuilder builder("max");
auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {}), "y");
Max(x, y);
auto computation_status = builder.Build();
TF_CHECK_OK(computation_status.status());
max_ = std::move(computation_status).value();
}
{
XlaBuilder builder("gt");
auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {}), "y");
Gt(x, y);
auto computation_status = builder.Build();
TF_CHECK_OK(computation_status.status());
gt_ = std::move(computation_status).value();
}
}
std::unique_ptr<HloModule> BuildHloGraph(XlaBuilder* builder) {
auto computation_status = builder->Build();
TF_CHECK_OK(computation_status.status());
auto computation = std::move(computation_status).value();
auto config = HloModule::CreateModuleConfigFromProto(computation.proto(),
DebugOptions())
.value();
return HloModule::CreateFromProto(computation.proto(), config).value();
}
Client* client_;
Service* service_;
XlaComputation add_;
XlaComputation add_and_exp_;
XlaComputation sigmoid_;
XlaComputation max_;
XlaComputation gt_;
};
TEST_F(HloCostAnalysisTest, MatrixMultiply) {
XlaBuilder builder("matrix_multiply");
auto lhs = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 5}), "lhs");
auto rhs = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {5, 30}), "rhs");
Dot(lhs, rhs);
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 2 * 10 * 30 * 5);
EXPECT_EQ(analysis.transcendental_count(), 0);
EXPECT_EQ(analysis.bytes_accessed(),
sizeof(float) * (10 * 5 + 5 * 30 + 10 * 30));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 5);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 5 * 30);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 10 * 30);
}
TEST_F(HloCostAnalysisTest, DotGeneral) {
XlaBuilder builder("matrix_multiply");
auto lhs =
Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 5, 5}), "lhs");
auto rhs =
Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {5, 5, 30}), "rhs");
DotDimensionNumbers dnums;
dnums.add_lhs_contracting_dimensions(1);
dnums.add_lhs_contracting_dimensions(2);
dnums.add_rhs_contracting_dimensions(0);
dnums.add_rhs_contracting_dimensions(1);
DotGeneral(lhs, rhs, dnums);
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 2 * 10 * 30 * 5 * 5);
EXPECT_EQ(analysis.transcendental_count(), 0);
EXPECT_EQ(analysis.bytes_accessed(),
sizeof(float) * (10 * 5 * 5 + 5 * 5 * 30 + 10 * 30));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0),
sizeof(float) * 10 * 5 * 5);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1),
sizeof(float) * 5 * 5 * 30);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 10 * 30);
}
TEST_F(HloCostAnalysisTest, DotGeneral2) {
XlaBuilder builder("matrix_multiply");
auto lhs =
Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 5, 5}), "lhs");
auto rhs =
Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {5, 5, 30}), "rhs");
DotDimensionNumbers dnums;
dnums.add_lhs_contracting_dimensions(1);
dnums.add_lhs_batch_dimensions(2);
dnums.add_rhs_contracting_dimensions(0);
dnums.add_rhs_batch_dimensions(1);
DotGeneral(lhs, rhs, dnums);
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 2 * 10 * 30 * 5 * 5);
EXPECT_EQ(analysis.transcendental_count(), 0);
EXPECT_EQ(analysis.bytes_accessed(),
sizeof(float) * (10 * 5 * 5 + 5 * 5 * 30 + 5 * 10 * 30));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0),
sizeof(float) * 10 * 5 * 5);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1),
sizeof(float) * 5 * 5 * 30);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 5 * 10 * 30);
}
TEST_F(HloCostAnalysisTest, DotGeneral3) {
XlaBuilder builder("matrix_multiply");
auto lhs = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 5}), "lhs");
auto rhs = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {5, 30}), "rhs");
DotDimensionNumbers dnums;
DotGeneral(lhs, rhs, dnums);
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 2 * 10 * 30 * 5 * 5);
EXPECT_EQ(analysis.transcendental_count(), 0);
EXPECT_EQ(analysis.bytes_accessed(),
sizeof(float) * (10 * 5 + 5 * 30 + 5 * 5 * 10 * 30));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 5);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 5 * 30);
EXPECT_EQ(analysis.output_bytes_accessed(*root),
sizeof(float) * 5 * 5 * 10 * 30);
}
TEST_F(HloCostAnalysisTest, Map) {
XlaBuilder builder("map");
auto input = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10}), "in");
Map(&builder, {input}, add_and_exp_, {0});
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 10);
EXPECT_EQ(analysis.transcendental_count(), 10);
EXPECT_EQ(analysis.bytes_accessed(), 80);
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 10);
}
TEST_F(HloCostAnalysisTest, Convolution) {
XlaBuilder builder("convolution");
auto input = Parameter(
&builder, 0,
ShapeUtil::MakeShape(F32, {1, 1, 10,
20}),
"input");
auto kernel = Parameter(
&builder, 1,
ShapeUtil::MakeShape(F32, {1, 1, 3,
3}),
"kernel");
Conv(input, kernel, {1, 1}, Padding::kValid);
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 8 * 18 * 2 * 3 * 3);
EXPECT_EQ(analysis.bytes_accessed(),
sizeof(float) * (10 * 20 + 3 * 3 + 8 * 18));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 20);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 3 * 3);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 8 * 18);
}
TEST_F(HloCostAnalysisTest, ConvolutionSame) {
XlaBuilder builder("convolution_same");
const int iw = 3;
const int ih = 3;
const int kw = 3;
const int kh = 3;
const int ow = iw;
const int oh = ih;
const int sx = 1;
const int sy = 1;
auto input = Parameter(
&builder, 0,
ShapeUtil::MakeShape(F32, {1, 1, ih,
iw}),
"input");
auto kernel = Parameter(
&builder, 1,
ShapeUtil::MakeShape(F32, {1, 1, kh,
kw}),
"kernel");
Conv(input, kernel, {sx, sy}, Padding::kSame);
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 2 * (4 + 6 + 4 + 6 + 9 + 6 + 4 + 6 + 4));
EXPECT_EQ(analysis.bytes_accessed(),
sizeof(float) * (iw * ih + kw * kh + ow * oh));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * iw * ih);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * kw * kh);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * ow * oh);
}
TEST_F(HloCostAnalysisTest, ConvolutionExtreme) {
XlaBuilder builder("convolution");
constexpr int64_t kLarge = 512 * 1024;
auto input = Parameter(
&builder, 0,
ShapeUtil::MakeShape(F32, {1, 1, kLarge}),
"input");
auto kernel = Parameter(
&builder, 1,
ShapeUtil::MakeShape(F32, {1, 1, kLarge}),
"kernel");
ConvGeneralDilated(input, kernel, {kLarge - 1}, {{0, 0}}, {kLarge}, {1},
XlaBuilder::CreateDefaultConvDimensionNumbers(1));
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 2 * kLarge);
}
TEST_F(HloCostAnalysisTest, ConvolutionExtreme2) {
XlaBuilder builder("convolution");
constexpr int64_t kLarge = 512 * 1024;
auto input = Parameter(
&builder, 0,
ShapeUtil::MakeShape(F32, {1, 1, 1}),
"input");
auto kernel = Parameter(
&builder, 1,
ShapeUtil::MakeShape(F32, {1, 1, kLarge}),
"kernel");
ConvGeneralDilated(input, kernel, {1}, {{kLarge - 1, kLarge - 1}}, {1}, {1},
XlaBuilder::CreateDefaultConvDimensionNumbers(1));
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 2 * kLarge);
}
TEST_F(HloCostAnalysisTest, ConvolutionWithFeatureGroup) {
XlaBuilder builder("convolution");
auto input = Parameter(
&builder, 0,
ShapeUtil::MakeShape(F32, {1, 120, 10,
20}),
"input");
auto kernel = Parameter(
&builder, 1,
ShapeUtil::MakeShape(F32, {120, 1, 3,
3}),
"kernel");
Conv(input, kernel, {1, 1}, Padding::kValid, 120);
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 120 * 8 * 18 * 2 * 3 * 3);
EXPECT_EQ(analysis.bytes_accessed(),
sizeof(float) * (120 * 10 * 20 + 120 * 3 * 3 + 120 * 8 * 18));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0),
sizeof(float) * 120 * 10 * 20);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1),
sizeof(float) * 120 * 3 * 3);
EXPECT_EQ(analysis.output_bytes_accessed(*root),
sizeof(float) * 120 * 8 * 18);
}
TEST_F(HloCostAnalysisTest, Reduce) {
XlaBuilder builder("reduce");
auto input =
Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 20}), "input");
Reduce(input, ConstantR0<float>(&builder, 0.0f), add_, {1});
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 10 * 20 - 10);
EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (10 * 20 + 1 + 10));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 20);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 1);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 10);
}
TEST_F(HloCostAnalysisTest, ReduceWindow) {
XlaBuilder builder("reduce_window");
auto input =
Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 20}), "input");
ReduceWindow(input, ConstantR0<float>(&builder, 0), add_, {4, 5}, {4, 5},
Padding::kValid);
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 2 * 4 * (4 * 5 - 1));
EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (10 * 20 + 1 + 2 * 4));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 20);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 1);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 2 * 4);
}
TEST_F(HloCostAnalysisTest, ReduceWindowWithOverlaps) {
XlaBuilder builder("reduce_window");
auto input =
Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {8, 8}), "input");
ReduceWindow(input, ConstantR0<float>(&builder, 0), add_, {4, 5}, {2, 1},
Padding::kValid);
auto hlo_module = BuildHloGraph(&builder);
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
int n_output_elements = 3 * 4;
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(root->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), n_output_elements * (4 * 5 - 1));
EXPECT_EQ(analysis.bytes_accessed(),
sizeof(float) * (8 * 8 + 1 + n_output_elements));
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 8 * 8);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 1);
EXPECT_EQ(analysis.output_bytes_accessed(*root),
sizeof(float) * n_output_elements);
}
TEST_F(HloCostAnalysisTest, ReduceWindowSingleDimReduceBroadcast) {
absl::string_view hlo_text = R"(
HloModule fusion.50
region_0.868 {
Arg_1.870 = f32[] parameter(1)
Arg_0.869 = f32[] parameter(0)
ROOT maximum.871 = f32[] maximum(Arg_0.869, Arg_1.870)
}
ENTRY fusion.50 {
constant.367 = f32[] constant(-inf)
param0 = f32[2,3,1024,1024]{2,3,1,0} parameter(0)
ROOT reduce-window.159 = f32[2,3,1024,1024]{2,3,1,0} reduce-window(param0, constant.367), window={size=1x1x1x2047 pad=0_0x0_0x0_0x1023_1023}, to_apply=region_0.868
}
)";
auto hlo_module = ParseAndReturnUnverifiedModule(hlo_text).value();
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), (2 * 3 * 1024) + (1024 - 1));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0),
sizeof(float) * 2 * 3 * 1024 * 1024);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 1);
EXPECT_EQ(analysis.output_bytes_accessed(*root),
sizeof(float) * 2 * 3 * 1024 * 1024);
}
TEST_F(HloCostAnalysisTest, ReduceWindowVariadic) {
XlaBuilder builder("reduce_window_variadic");
auto elem_shape = ShapeUtil::MakeShape(F32, {});
auto p2 = Parameter(&builder, 0, elem_shape, "x0");
auto p3 = Parameter(&builder, 1, elem_shape, "x1");
auto p4 = Parameter(&builder, 2, elem_shape, "y0");
auto p5 = Parameter(&builder, 3, elem_shape, "y1");
absl::InlinedVector<XlaOp, 2> compute_vec = {Min(p2, p4), Min(p3, p5)};
Tuple(&builder, compute_vec);
TF_ASSERT_OK_AND_ASSIGN(auto compute_tuple, builder.Build());
auto input1 =
Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 20}), "input1");
auto input2 =
Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {10, 20}), "input2");
auto init = ConstantR0<float>(&builder, 0);
ReduceWindow({input1, input2}, {init, init}, compute_tuple, {4, 5}, {4, 5},
Padding::kValid);
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 2 * 4 * 2 * (4 * 5 - 1));
EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (10 * 20 * 2 + 2 * 3));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 10 * 20);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 20);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 4);
}
TEST_F(HloCostAnalysisTest, SelectAndScatter) {
XlaBuilder builder("select_and_scatter");
auto operand =
Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 20}), "input");
auto source =
Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {2, 4}), "source");
SelectAndScatter(operand, gt_, {4, 5}, {4, 5}, Padding::kValid, source,
ConstantR0<float>(&builder, 0), add_);
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 2 * 4 * (4 * 5 - 1 + 1));
EXPECT_EQ(analysis.bytes_accessed(),
sizeof(float) * (10 * 20 + 2 * 4 + 1 + 10 * 20));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 20);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 2 * 4);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 2), sizeof(float) * 1);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 10 * 20);
}
TEST_F(HloCostAnalysisTest, Broadcast) {
XlaBuilder b("broadcast");
Broadcast(ConstantR0<float>(&b, 42), {10, 7});
auto hlo_module = BuildHloGraph(&b);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 0);
EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (1 + 10 * 7));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 1);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 10 * 7);
}
TEST_F(HloCostAnalysisTest, BroadcastCountMultipleInputAccesses) {
XlaBuilder b("broadcast");
Broadcast(ConstantR0<float>(&b, 42), {10, 7});
auto hlo_module = BuildHloGraph(&b);
HloCostAnalysis analysis(HloCostAnalysis::Options{
.shape_size = ShapeSize, .count_multiple_input_accesses = true});
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 0);
EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (1 + 10 * 7));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 7);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 10 * 7);
}
TEST_F(HloCostAnalysisTest, FullyConnectedForward) {
XlaBuilder builder("fully_connected_forward");
auto input =
Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 5}), "input");
auto weight =
Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {5, 20}), "weight");
auto bias = Parameter(&builder, 2, ShapeUtil::MakeShape(F32, {20}), "bias");
Map(&builder, {Add(Dot(input, weight), bias, {1})}, sigmoid_, {0, 1});
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 2 * 1000 + 200 + 3 * 200);
EXPECT_EQ(analysis.transcendental_count(), 200);
}
TEST_F(HloCostAnalysisTest, MatmulAndConvolutionCanBeTheSameComputation) {
HloCostAnalysis conv_analysis(ShapeSize);
{
XlaBuilder builder("conv_looking_matmul");
auto lhs = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {64, 64, 1, 1}),
"input");
auto rhs = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {64, 64, 1, 1}),
"weights");
Conv(lhs, rhs, {1, 1}, Padding::kSame);
auto hlo_module = BuildHloGraph(&builder);
ASSERT_IS_OK(hlo_module->entry_computation()->root_instruction()->Accept(
&conv_analysis));
}
HloCostAnalysis matmul_analysis(ShapeSize);
{
XlaBuilder builder("matmul");
auto lhs =
Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {64, 64}), "input");
auto rhs =
Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {64, 64}), "weights");
Dot(lhs, rhs);
auto hlo_module = BuildHloGraph(&builder);
ASSERT_IS_OK(hlo_module->entry_computation()->root_instruction()->Accept(
&matmul_analysis));
}
EXPECT_EQ(conv_analysis.flop_count(), matmul_analysis.flop_count());
}
TEST_F(HloCostAnalysisTest, LatencyBoundedOptimalTime) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY Entry {
param0 = f32[1,1] parameter(0)
param1 = f32[1,1] parameter(1)
ROOT add = f32[1,1] add(param0, param1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
const HloInstruction* add = module->entry_computation()->root_instruction();
HloCostAnalysis::Options options{ShapeSize};
const float clock_cycle_seconds = 10.0f;
options.set_flops_per_second(1024);
options.set_bytes_per_second(1024);
options.set_transcendentals_per_second(1024);
options.set_flops_min_latency_second(clock_cycle_seconds);
HloCostAnalysis cost_analysis(options);
ASSERT_IS_OK(add->Accept(&cost_analysis));
EXPECT_EQ(cost_analysis.optimal_seconds(), clock_cycle_seconds);
}
using FusionCostAnalysis = HloTestBase;
TEST_F(FusionCostAnalysis, LoopFusionDynUpdateSlice) {
const char* hlo_fusion_module_str = R"(
HloModule module
_.1 {
tmp_0 = bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)} parameter(0)
tmp_1 = bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)} parameter(2)
tmp_2 = s32[]{:T(128)} parameter(1)
tmp_3 = s32[]{:T(128)} constant(0)
tmp_4 = bf16[1,32,256,1152]{3,2,1,0:T(8,128)(2,1)S(3)} dynamic-slice(tmp_1, tmp_2, tmp_3, tmp_3, tmp_3), dynamic_slice_sizes={1,32,256,1152}
tmp_11 = bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)} dynamic-update-slice(tmp_0, tmp_4, tmp_2, tmp_3, tmp_3, tmp_3)
ROOT tmp_20 = (bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)}) tuple(tmp_11)
}
ENTRY _ {
_0 = bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)} parameter(0)
_1 = s32[]{:T(128)} parameter(1)
_4 = bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)} parameter(2)
ROOT _ = (bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)}) fusion(_0, _1, _4), kind=kLoop, calls=_.1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_fusion_module_str));
HloCostAnalysis fusion_analysis(ShapeSize);
HloInstruction* fusion = module->entry_computation()->root_instruction();
ASSERT_IS_OK(fusion->Accept(&fusion_analysis));
const char* hlo_dus_module_str = R"(
HloModule module
ENTRY _ {
_0 = bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)} parameter(0)
_1 = s32[]{:T(128)} parameter(1)
_2 = bf16[1,32,256,1152]{3,2,1,0:T(8,128)(2,1)} parameter(2)
ROOT _ = bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)} dynamic-update-slice(_0, _2, _1, _1, _1, _1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto dus_module,
ParseAndReturnVerifiedModule(hlo_dus_module_str));
HloCostAnalysis dus_analysis(ShapeSize);
auto dus = dus_module->entry_computation()->root_instruction();
ASSERT_IS_OK(dus->Accept(&dus_analysis));
EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 0), 0);
EXPECT_EQ(fusion_analysis.bytes_accessed(), dus_analysis.bytes_accessed());
EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 0),
dus_analysis.operand_bytes_accessed(*dus, 0));
EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 1),
dus_analysis.operand_bytes_accessed(*dus, 2));
EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 2),
dus_analysis.operand_bytes_accessed(*dus, 1));
EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion),
dus_analysis.output_bytes_accessed(*dus));
}
TEST_F(FusionCostAnalysis, LoopFusion) {
for (int i = 0; i < 4; ++i) {
Shape r2f32 = ShapeUtil::MakeShape(F32, {2, 2});
HloComputation::Builder builder(TestName());
auto c1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace(
0.0f, 1.0f, 2, 2)));
auto c2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace(
1.0f, 2.0f, 2, 2)));
auto c3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace(
2.0f, 3.0f, 2, 2)));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kAdd, c1, c2));
auto clamp = builder.AddInstruction(
HloInstruction::CreateTernary(r2f32, HloOpcode::kClamp, c2, add, add));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r2f32, HloOpcode::kExp, add));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kMultiply, exp, c3));
auto sub = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kSubtract, mul, clamp));
auto tuple = HloInstruction::CreateTuple({sub, sub, mul, c1});
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
{sub, mul, exp, clamp, add}, HloInstruction::FusionKind::kLoop);
HloCostAnalysis::Options options{ShapeSize};
options.set_flops_per_second(16 * (i == 1 ? 1 / 2.0 : 1.0));
options.set_transcendentals_per_second(4 * (i == 2 ? 1 / 4.0 : 1.0));
options.set_bytes_per_second(64 * (i == 3 ? 1 / 8.0 : 1.0));
HloCostAnalysis fusion_analysis(options);
ASSERT_IS_OK(fusion->Accept(&fusion_analysis));
EXPECT_EQ(fusion_analysis.flop_count(), 16);
EXPECT_EQ(fusion_analysis.transcendental_count(), 4);
constexpr int64_t bytes_accessed = sizeof(float) * 4 * 2 * 2;
static_assert(bytes_accessed == 64, "");
EXPECT_EQ(fusion_analysis.bytes_accessed(), bytes_accessed);
EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 0),
sizeof(float) * 2 * 2);
EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 1),
sizeof(float) * 2 * 2);
EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 2),
sizeof(float) * 2 * 2);
EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion),
sizeof(float) * 2 * 2);
EXPECT_EQ(fusion_analysis.optimal_seconds(), 1 << i);
}
}
TEST_F(FusionCostAnalysis, NestedCopyFusion) {
absl::string_view nested_fusion_text = R"(
HloModule temp, is_scheduled=true
copy_fusion.1291.clone {
input.1291 = s8[2,6144,2,256]{3,1,0,2:T(32,128)(4,1)S(1)} parameter(0)
ROOT copy.74276 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} copy(input.1291)
}
fused_computation.4150.clone {
param_0.185389 = s8[2,6144,2,256]{3,1,0,2:T(32,128)(4,1)} parameter(0)
fusion.103344 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} fusion(param_0.185389), kind=kLoop, calls=copy_fusion.1291.clone
constant.230138 = s32[]{:T(128)} constant(0)
param_1.219146 = s32[]{:T(128)S(6)} parameter(1)
ROOT dynamic-slice.40526 = s8[2,384,2,256]{3,1,0,2:T(8,128)(4,1)} dynamic-slice(fusion.103344, constant.230138, param_1.219146, constant.230138, constant.230138), dynamic_slice_sizes={2,384,2,256}
}
ENTRY temp {
param_2.123719 = s8[2,6144,2,256]{3,1,0,2:T(32,128)(4,1)} parameter(0)
param_3.66279 = s32[]{:T(128)S(6)} parameter(1)
ROOT fusion.85943 = s8[2,384,2,256]{3,1,0,2:T(8,128)(4,1)} fusion(param_2.123719, param_3.66279), kind=kLoop, calls=fused_computation.4150.clone
}
)";
absl::string_view fusion_text = R"(
HloModule temp, is_scheduled=true
fused_computation.4150.clone {
param_0.185389 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} parameter(0)
constant.230138 = s32[]{:T(128)} constant(0)
param_1.219146 = s32[]{:T(128)S(6)} parameter(1)
ROOT dynamic-slice.40526 = s8[2,384,2,256]{3,1,0,2:T(8,128)(4,1)} dynamic-slice(param_0.185389, constant.230138, param_1.219146, constant.230138, constant.230138), dynamic_slice_sizes={2,384,2,256}
}
ENTRY temp {
param_2.123719 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} parameter(0)
param_3.66279 = s32[]{:T(128)S(6)} parameter(1)
ROOT fusion.85943 = s8[2,384,2,256]{3,1,0,2:T(8,128)(4,1)} fusion(param_2.123719, param_3.66279), kind=kLoop, calls=fused_computation.4150.clone
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto nested_fusion_module,
ParseAndReturnVerifiedModule(nested_fusion_text));
HloCostAnalysis nested_analysis(ShapeSize);
auto* nested_root =
nested_fusion_module->entry_computation()->root_instruction();
ASSERT_IS_OK(nested_root->Accept(&nested_analysis));
TF_ASSERT_OK_AND_ASSIGN(auto fusion_module,
ParseAndReturnVerifiedModule(fusion_text));
HloCostAnalysis fusion_analysis(ShapeSize);
auto* fusion_root = fusion_module->entry_computation()->root_instruction();
ASSERT_IS_OK(fusion_root->Accept(&fusion_analysis));
EXPECT_EQ(nested_analysis.bytes_accessed(*nested_root),
fusion_analysis.bytes_accessed(*fusion_root));
}
TEST_F(FusionCostAnalysis, NestedCopyFusionDUS) {
absl::string_view nested_fusion_text = R"(
HloModule temp, is_scheduled=true
copy_fusion.1291.clone {
input.1291 = s8[2,6144,2,256]{3,1,0,2:T(32,128)(4,1)} parameter(0)
ROOT copy.74276 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} copy(input.1291)
}
fused_computation.4150.clone {
param_0.185389 = s8[2,6144,2,256]{3,1,0,2:T(32,128)(4,1)} parameter(0)
fusion.103344 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} fusion(param_0.185389), kind=kLoop, calls=copy_fusion.1291.clone
param_1.185389 = s8[2,6144,1,256]{3,1,0,2:T(8,128)(4,1)} parameter(2)
constant.230138 = s32[]{:T(128)} constant(0)
param_1.219146 = s32[]{:T(128)S(6)} parameter(1)
param_3.229 = pred[]{:T(512)} constant(false)
broadcast.11499 = pred[2,6144,1,256]{3,1,0,2:T(8,128)(4,1)} broadcast(param_3.229), dimensions={}
dynamic-slice.11241 = s8[2,6144,1,256]{3,1,0,2:T(8,128)(4,1)} dynamic-slice(fusion.103344, constant.230138, constant.230138, param_1.219146, constant.230138), dynamic_slice_sizes={2,6144,1,256}
select.9063 = s8[2,6144,1,256]{3,1,0,2:T(8,128)(4,1)} select(broadcast.11499, param_1.185389, dynamic-slice.11241)
ROOT dynamic-update-slice.40526 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} dynamic-update-slice(fusion.103344, select.9063, constant.230138, constant.230138, param_1.219146, constant.230138)
}
ENTRY temp {
param_2.123719 = s8[2,6144,2,256]{3,1,0,2:T(32,128)(4,1)} parameter(0)
param_3.66279 = s32[]{:T(128)S(6)} parameter(1)
param_1.123719 = s8[2,6144,1,256]{3,1,0,2:T(8,128)(4,1)} parameter(2)
ROOT fusion.85943 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} fusion(param_2.123719, param_3.66279, param_1.123719), kind=kLoop, calls=fused_computation.4150.clone
}
)";
absl::string_view fusion_text = R"(
HloModule temp, is_scheduled=true
fused_computation.4150.clone {
param_0.185389 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} parameter(0)
param_1.185389 = s8[2,6144,1,256]{3,1,0,2:T(8,128)(4,1)} parameter(2)
constant.230138 = s32[]{:T(128)} constant(0)
param_1.219146 = s32[]{:T(128)S(6)} parameter(1)
param_3.229 = pred[]{:T(512)} constant(false)
broadcast.11499 = pred[2,6144,1,256]{3,1,0,2:T(8,128)(4,1)} broadcast(param_3.229), dimensions={}
dynamic-slice.11241 = s8[2,6144,1,256]{3,1,0,2:T(8,128)(4,1)} dynamic-slice(param_0.185389, constant.230138, constant.230138, param_1.219146, constant.230138), dynamic_slice_sizes={2,6144,1,256}
select.9063 = s8[2,6144,1,256]{3,1,0,2:T(8,128)(4,1)} select(broadcast.11499, param_1.185389, dynamic-slice.11241)
ROOT dynamic-update-slice.40526 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} dynamic-update-slice(param_0.185389, select.9063, constant.230138, constant.230138, param_1.219146, constant.230138)
}
ENTRY temp {
param_2.123719 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} parameter(0)
param_3.66279 = s32[]{:T(128)S(6)} parameter(1)
param_1.123719 = s8[2,6144,1,256]{3,1,0,2:T(8,128)(4,1)} parameter(2)
ROOT fusion.85943 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} fusion(param_2.123719, param_3.66279, param_1.123719), kind=kLoop, calls=fused_computation.4150.clone
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto nested_fusion_module,
ParseAndReturnVerifiedModule(nested_fusion_text));
HloCostAnalysis nested_analysis(ShapeSize);
auto* nested_root =
nested_fusion_module->entry_computation()->root_instruction();
ASSERT_IS_OK(nested_root->Accept(&nested_analysis));
TF_ASSERT_OK_AND_ASSIGN(auto fusion_module,
ParseAndReturnVerifiedModule(fusion_text));
HloCostAnalysis fusion_analysis(ShapeSize);
auto* fusion_root = fusion_module->entry_computation()->root_instruction();
ASSERT_IS_OK(fusion_root->Accept(&fusion_analysis));
EXPECT_EQ(nested_analysis.bytes_accessed(*nested_root),
fusion_analysis.bytes_accessed(*fusion_root));
}
TEST_F(FusionCostAnalysis, NestedFusionFeedsMultipleUsers) {
absl::string_view hlo_text = R"(
HloModule temp, is_scheduled=true
fused_computation.1 {
tmp_0 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} parameter(0)
tmp_1 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} fusion(bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_0), kind=kLoop, calls=
{
tmp_0 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} parameter(0)
ROOT tmp_4 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} add(bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_0, bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_0)
}
tmp_2 = bf16[]{:T(256)} constant(0)
tmp_3 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} reduce-window(bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_1, bf16[]{:T(256)} tmp_2), window={size=1x1x1x1023 pad=0_0x0_0x0_0x511_511}, to_apply=
{
tmp_0 = bf16[]{:T(256)} parameter(0)
tmp_1 = bf16[]{:T(256)} parameter(1)
ROOT tmp_2 = bf16[]{:T(256)} add(bf16[]{:T(256)} tmp_0, bf16[]{:T(256)} tmp_1)
}
ROOT tmp_4 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} divide(bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_1, bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_3)
}
ENTRY temp {
tmp_0 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} parameter(0)
ROOT result = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} fusion(tmp_0), kind=kLoop, calls=fused_computation.1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto fusion_module,
ParseAndReturnVerifiedModule(hlo_text));
HloCostAnalysis fusion_analysis(ShapeSize);
auto* fusion_root = fusion_module->entry_computation()->root_instruction();
ASSERT_IS_OK(fusion_root->Accept(&fusion_analysis));
EXPECT_EQ(1073741824, fusion_analysis.bytes_accessed(*fusion_root));
}
TEST_F(FusionCostAnalysis, ParamFeedsNestedFusionAndTrivialUser) {
absl::string_view hlo_text = R"(
HloModule temp, is_scheduled=true
fused_computation.1 {
tmp_0 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} parameter(0)
tmp_1 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} fusion(bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_0), kind=kLoop, calls=
{
tmp_0 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} parameter(0)
ROOT tmp_4 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} add(bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_0, bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_0)
}
tmp_2 = bf16[]{:T(256)} constant(0)
tmp_3 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} reduce-window(bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_1, bf16[]{:T(256)} tmp_2), window={size=1x1x1x1023 pad=0_0x0_0x0_0x511_511}, to_apply=
{
tmp_0 = bf16[]{:T(256)} parameter(0)
tmp_1 = bf16[]{:T(256)} parameter(1)
ROOT tmp_2 = bf16[]{:T(256)} add(bf16[]{:T(256)} tmp_0, bf16[]{:T(256)} tmp_1)
}
ROOT tmp_4 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} divide(bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_0, bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_3)
}
ENTRY temp {
tmp_0 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} parameter(0)
ROOT result = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} fusion(tmp_0), kind=kLoop, calls=fused_computation.1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto fusion_module,
ParseAndReturnVerifiedModule(hlo_text));
HloCostAnalysis fusion_analysis(ShapeSize);
auto* fusion_root = fusion_module->entry_computation()->root_instruction();
ASSERT_IS_OK(fusion_root->Accept(&fusion_analysis));
EXPECT_EQ(1610612736, fusion_analysis.bytes_accessed(*fusion_root));
}
TEST_F(FusionCostAnalysis, LoopFusionTupleOutput) {
Shape r2f32 = ShapeUtil::MakeShape(F32, {2, 2});
HloComputation::Builder builder(TestName());
auto c1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace(
0.0f, 1.0f, 2, 2)));
auto c2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace(
1.0f, 2.0f, 2, 2)));
auto c3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace(
2.0f, 3.0f, 2, 2)));
auto tuple1 = builder.AddInstruction(HloInstruction::CreateTuple({c1, c2}));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kAdd, c1, c2));
auto clamp = builder.AddInstruction(
HloInstruction::CreateTernary(r2f32, HloOpcode::kClamp, c2, add, add));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r2f32, HloOpcode::kExp, add));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kMultiply, exp, c3));
auto sub = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kSubtract, mul, clamp));
auto tuple2 = builder.AddInstruction(
HloInstruction::CreateTuple({sub, sub, mul, tuple1}));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
{tuple2, sub, mul, exp, clamp, add}, HloInstruction::FusionKind::kLoop);
HloCostAnalysis fusion_analysis(ShapeSize);
ASSERT_IS_OK(fusion->Accept(&fusion_analysis));
EXPECT_EQ(fusion_analysis.flop_count(), 16);
EXPECT_EQ(fusion_analysis.transcendental_count(), 4);
EXPECT_EQ(fusion_analysis.bytes_accessed(*fusion),
sizeof(float) * (5 + 5) * 2 * 2);
EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 0),
sizeof(float) * 2 * 2 * 2);
EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 1),
sizeof(float) * 2 * 2);
EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 2),
sizeof(float) * 2 * 2);
EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 3),
sizeof(float) * 2 * 2);
EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion),
sizeof(float) * 5 * 2 * 2);
EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion, {0}),
sizeof(float) * 2 * 2);
EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion, {1}),
sizeof(float) * 2 * 2);
EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion, {2}),
sizeof(float) * 2 * 2);
EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion, {3}),
sizeof(float) * 2 * 2 * 2);
EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion, {3, 0}),
sizeof(float) * 2 * 2);
EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion, {3, 1}),
sizeof(float) * 2 * 2);
}
TEST_F(FusionCostAnalysis, NoLayout) {
Shape shape_with_layout = ShapeUtil::MakeShape(F32, {2, 3, 4, 5});
Shape shape_without_layout = shape_with_layout;
shape_without_layout.clear_layout();
HloComputation::Builder builder(TestName());
auto c1 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR4FromArray4D(Array4D<float>(2, 3, 4, 5))));
auto c2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({1, 2, 3})));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(shape_without_layout, c2, {1}));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
shape_with_layout, HloOpcode::kAdd, c1, broadcast));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
{add, broadcast}, HloInstruction::FusionKind::kLoop);
HloCostAnalysis fusion_analysis(ShapeSize);
ASSERT_IS_OK(fusion->Accept(&fusion_analysis));
EXPECT_EQ(fusion_analysis.flop_count(), 120);
EXPECT_EQ(fusion_analysis.transcendental_count(), 0);
EXPECT_EQ(fusion_analysis.bytes_accessed(),
sizeof(float) * (2 * 3 * 4 * 5 + 3 + 2 * 3 * 4 * 5));
EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 0),
sizeof(float) * 2 * 3 * 4 * 5);
EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 1),
sizeof(float) * 3);
EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion),
sizeof(float) * 2 * 3 * 4 * 5);
}
TEST_F(FusionCostAnalysis, NonTupleWithTupleParamBytesAccessed) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
fused_computation {
param = (f32[3,2]{1,0}, f32[3,2]{1,0}) parameter(0)
gte0 = f32[3,2]{1,0} get-tuple-element(param), index=0
gte1 = f32[3,2]{1,0} get-tuple-element(param), index=1
ROOT add = f32[3,2]{1,0} add(gte0, gte1)
}
ENTRY entry {
param0 = f32[3,2]{1,0} parameter(0)
param1 = f32[3,2]{1,0} parameter(1)
tuple = (f32[3,2]{1,0}, f32[3,2]{1,0}) tuple(param0, param1)
ROOT fusion = f32[3,2]{1,0} fusion(tuple), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* fusion = module->entry_computation()->root_instruction();
HloCostAnalysis fusion_analysis(ShapeSize);
ASSERT_IS_OK(fusion->Accept(&fusion_analysis));
EXPECT_EQ(fusion_analysis.bytes_accessed(*fusion), sizeof(float) * 3 * 2 * 3);
EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 0),
sizeof(float) * 3 * 2 * 2);
EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion),
sizeof(float) * 3 * 2);
}
TEST_F(FusionCostAnalysis, TupleBytesAccessed) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
fused_computation {
param = (f32[2,2]{1,0}, f32[2,2]{1,0}) parameter(0)
gte0 = f32[2,2]{1,0} get-tuple-element(param), index=0
gte1 = f32[2,2]{1,0} get-tuple-element(param), index=1
add = f32[2,2]{1,0} add(gte0, gte1)
mul = f32[2,2]{1,0} multiply(gte0, gte1)
ROOT root = (f32[2,2]{1,0}, f32[2,2]{1,0}) tuple(add, mul)
}
ENTRY entry {
param0 = f32[2,2]{1,0} parameter(0)
param1 = f32[2,2]{1,0} parameter(1)
tuple = (f32[2,2]{1,0}, f32[2,2]{1,0}) tuple(param0, param1)
ROOT fusion = (f32[2,2]{1,0}, f32[2,2]{1,0}) fusion(tuple), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* fusion = module->entry_computation()->root_instruction();
HloCostAnalysis fusion_analysis(ShapeSize);
ASSERT_IS_OK(fusion->Accept(&fusion_analysis));
EXPECT_EQ(fusion_analysis.bytes_accessed(*fusion), sizeof(float) * 2 * 2 * 4);
EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 0),
sizeof(float) * 2 * 2 * 2);
EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion),
sizeof(float) * 2 * 2 * 2);
}
TEST_F(FusionCostAnalysis, IgnoreUnusedParameterShape) {
absl::string_view hlo_string = R"(
HloModule m
f {
p0 = (s8[3], s8[100]) parameter(0)
gte0 = s8[3] get-tuple-element(p0), index=0
c1 = s8[3] constant(0)
a1 = s8[3] add(gte0, c1)
ROOT r1 = s8[3] add(a1, c1)
}
ENTRY e {
param0 = (s8[3], s8[100]) parameter(0)
ROOT r0 = s8[3] fusion(param0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(root->Accept(&analysis));
EXPECT_EQ(analysis.output_bytes_accessed(*root), 3);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), 2 * kPointerSize + 3);
EXPECT_EQ(analysis.bytes_accessed(*root), 2 * kPointerSize + 3 + 3 + 3);
EXPECT_EQ(analysis.bytes_accessed(), 2 * kPointerSize + 3 + 3 + 3);
}
TEST_F(FusionCostAnalysis, InfeedOutfeed) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
after-all = token[] after-all()
infeed = ((f32[2,3]{1,0}), token[]) infeed(after-all)
gte0 = (f32[2,3]{1,0}) get-tuple-element(infeed), index=0
gte1 = f32[2,3]{1,0} get-tuple-element(gte0), index=0
add = f32[2,3]{1,0} add(gte1, gte1)
tuple = (f32[2,3]{1,0}) tuple(add)
tok = token[] get-tuple-element(infeed), index=1
ROOT outfeed = token[] outfeed(tuple, tok)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* infeed =
module->entry_computation()->GetInstructionWithName("infeed");
HloInstruction* outfeed =
module->entry_computation()->GetInstructionWithName("outfeed");
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(infeed->Accept(&analysis));
ASSERT_IS_OK(outfeed->Accept(&analysis));
EXPECT_EQ(analysis.bytes_accessed(*infeed), sizeof(float) * 2 * 3);
EXPECT_EQ(analysis.operand_bytes_accessed(*infeed, 0), 0);
EXPECT_EQ(analysis.output_bytes_accessed(*infeed), sizeof(float) * 2 * 3);
EXPECT_EQ(analysis.bytes_accessed(*outfeed), sizeof(float) * 2 * 3);
EXPECT_EQ(analysis.operand_bytes_accessed(*outfeed, 0),
sizeof(float) * 2 * 3);
EXPECT_EQ(analysis.output_bytes_accessed(*outfeed), 0);
}
TEST_F(FusionCostAnalysis, AllReduceTupleBytesAccessed) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
sum {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
param0 = f32[2,2]{1,0} parameter(0)
param1 = f32[2,2]{1,0} parameter(1)
ROOT all-reduce = (f32[2,2]{1,0}, f32[2,2]{1,0}) all-reduce(param0, param1), replica_groups={{0,1}}, to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* all_reduce = module->entry_computation()->root_instruction();
HloCostAnalysis all_reduce_analysis(ShapeSize);
ASSERT_IS_OK(all_reduce->Accept(&all_reduce_analysis));
EXPECT_EQ(all_reduce_analysis.bytes_accessed(*all_reduce),
sizeof(float) * 2 * 2 * 4);
EXPECT_EQ(all_reduce_analysis.operand_bytes_accessed(*all_reduce, 0),
sizeof(float) * 2 * 2);
EXPECT_EQ(all_reduce_analysis.operand_bytes_accessed(*all_reduce, 1),
sizeof(float) * 2 * 2);
EXPECT_EQ(all_reduce_analysis.output_bytes_accessed(*all_reduce),
sizeof(float) * 2 * 2 * 2);
}
TEST_F(HloCostAnalysisTest, TupleCost) {
HloCostAnalysis analysis(ShapeSize);
XlaBuilder builder("tuple");
auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {123}), "x");
auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {42}), "y");
Tuple(&builder, {x, y});
auto hlo_module = BuildHloGraph(&builder);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 0);
EXPECT_EQ(analysis.transcendental_count(), 0);
EXPECT_EQ(analysis.bytes_accessed(), kPointerSize * 2);
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), 0);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), 0);
EXPECT_EQ(analysis.output_bytes_accessed(*root), kPointerSize * 2);
}
using DomainCostAnalysis = HloTestBase;
TEST_F(DomainCostAnalysis, DomainCost) {
HloCostAnalysis analysis(ShapeSize);
HloComputation::Builder builder("domain");
auto x = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {123}), "x"));
auto y = builder.AddInstruction(
HloInstruction::CreateParameter(1, ShapeUtil::MakeShape(F32, {42}), "y"));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({x, y}));
auto domain = builder.AddInstruction(
HloInstruction::CreateDomain(tuple->shape(), tuple, nullptr, nullptr));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
EXPECT_EQ(hlo_module->entry_computation()->root_instruction(), domain);
ASSERT_IS_OK(domain->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(*domain), 0);
EXPECT_EQ(analysis.transcendental_count(*domain), 0);
EXPECT_EQ(analysis.bytes_accessed(*domain), 0);
}
TEST_F(HloCostAnalysisTest, BaseDilatedConvolution) {
XlaBuilder builder("BaseDilatedConvolution");
auto input = Parameter(
&builder, 0,
ShapeUtil::MakeShape(F32, {1, 1, 10,
20}),
"input");
auto kernel = Parameter(
&builder, 1,
ShapeUtil::MakeShape(F32, {1, 1, 3,
3}),
"kernel");
ConvGeneralDilated(input, kernel, {1, 1},
{{1, 1}, {1, 1}},
{3, 5}, {7, 11},
XlaBuilder::CreateDefaultConvDimensionNumbers(2));
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 1472);
}
TEST_F(HloCostAnalysisTest, Slice) {
XlaBuilder builder("slice");
auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {2}), "x");
Slice(x, {0}, {1}, {1});
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.bytes_accessed(), 8);
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float));
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float));
}
TEST_F(HloCostAnalysisTest, DynamicSlice) {
XlaBuilder builder("dynamic-slice");
auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {2}), "x");
DynamicSlice(x, absl::Span<const XlaOp>({ConstantR0<int32_t>(&builder, 1)}),
{1});
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.bytes_accessed(), 8 + 4);
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float));
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(int32_t));
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float));
}
TEST_F(HloCostAnalysisTest, DynamicUpdateSlice) {
XlaBuilder builder("dynamic-update-slice");
auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {2}), "x");
DynamicUpdateSlice(
x, ConstantR1<float>(&builder, {1.0}),
absl::Span<const XlaOp>({ConstantR0<int32_t>(&builder, 1)}));
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.bytes_accessed(), 8 + 4);
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), 0);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float));
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 2), sizeof(int32_t));
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float));
}
TEST_F(HloCostAnalysisTest, Gather) {
XlaBuilder builder("gather");
Shape operand_shape = ShapeUtil::MakeShape(S32, {3, 3});
Shape indices_shape = ShapeUtil::MakeShape(S32, {2});
auto operand = Parameter(&builder, 0, operand_shape, "operand");
auto indices = Parameter(&builder, 1, indices_shape, "indices");
GatherDimensionNumbers dim_numbers;
dim_numbers.add_offset_dims(1);
dim_numbers.add_collapsed_slice_dims(0);
dim_numbers.add_start_index_map(0);
dim_numbers.set_index_vector_dim(1);
Gather(operand, indices, dim_numbers, {1, 3});
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.bytes_accessed(), 56);
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 2 * 3);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(int32_t) * 2);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 2 * 3);
}
TEST_F(HloCostAnalysisTest, Scatter) {
XlaBuilder builder("scatter");
Shape operand_shape = ShapeUtil::MakeShape(F32, {3, 3});
Shape indices_shape = ShapeUtil::MakeShape(S32, {2});
Shape values_shape = ShapeUtil::MakeShape(F32, {2, 3});
auto operand = Parameter(&builder, 0, operand_shape, "operand");
auto indices = Parameter(&builder, 1, indices_shape, "indices");
auto values = Parameter(&builder, 2, values_shape, "values");
ScatterDimensionNumbers dim_numbers;
dim_numbers.set_index_vector_dim(1);
dim_numbers.add_update_window_dims(1);
dim_numbers.add_inserted_window_dims(0);
dim_numbers.add_scatter_dims_to_operand_dims(0);
Scatter(operand, indices, values, add_, dim_numbers);
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.bytes_accessed(), 4 * (2 + 3 * (2 * 3)));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 2 * 3);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(int32_t) * 2);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 2), sizeof(float) * 2 * 3);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 2 * 3);
}
TEST_F(HloCostAnalysisTest, MultioutputScatter) {
XlaBuilder builder("scatter");
Shape operand0_shape = ShapeUtil::MakeShape(F32, {3, 3});
Shape operand1_shape = ShapeUtil::MakeShape(S32, {3, 3});
Shape indices_shape = ShapeUtil::MakeShape(S32, {2});
Shape values0_shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape values1_shape = ShapeUtil::MakeShape(S32, {2, 3});
auto operand0 = Parameter(&builder, 0, operand0_shape, "operand0");
auto operand1 = Parameter(&builder, 1, operand1_shape, "operand1");
auto indices = Parameter(&builder, 2, indices_shape, "indices");
auto values0 = Parameter(&builder, 3, values0_shape, "values0");
auto values1 = Parameter(&builder, 4, values1_shape, "values1");
ScatterDimensionNumbers dim_numbers;
dim_numbers.set_index_vector_dim(1);
dim_numbers.add_update_window_dims(1);
dim_numbers.add_inserted_window_dims(0);
dim_numbers.add_scatter_dims_to_operand_dims(0);
auto add = [] {
XlaBuilder builder("add");
auto x0 = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x0");
auto x1 = Parameter(&builder, 1, ShapeUtil::MakeShape(S32, {}), "x1");
auto y0 = Parameter(&builder, 2, ShapeUtil::MakeShape(F32, {}), "y0");
auto y1 = Parameter(&builder, 3, ShapeUtil::MakeShape(S32, {}), "y1");
Tuple(&builder, {Add(x0, y0), Add(x1, y1)});
auto computation_status = builder.Build();
TF_CHECK_OK(computation_status.status());
return std::move(computation_status).value();
}();
Scatter({operand0, operand1}, indices, {values0, values1}, add, dim_numbers);
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.bytes_accessed(), 4 * (2 + 2 * 3 * (2 * 3)));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 2 * 3);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(int32_t) * 2 * 3);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 2), sizeof(int32_t) * 2);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 3), sizeof(float) * 2 * 3);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 4), sizeof(int32_t) * 2 * 3);
EXPECT_EQ(analysis.output_bytes_accessed(*root), 2 * sizeof(float) * 2 * 3);
}
TEST_F(HloCostAnalysisTest, GetShapeSizeIgnoreUnsupportedShape) {
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
*shape.mutable_layout() =
LayoutUtil::MakeLayout({1, 0}, {DIM_DENSE, DIM_COMPRESSED});
HloCostAnalysis analysis(ShapeSize);
EXPECT_TRUE(LayoutUtil::IsSparseArray(shape));
EXPECT_EQ(0, analysis.GetShapeSize(shape));
}
TEST_F(FusionCostAnalysis, Broadcast) {
absl::string_view hlo_string = R"(
HloModule m
f {
p0 = s8[] parameter(0)
c1 = s8[] constant(0)
a1 = s8[] add(p0, c1)
b1 = s8[10000] broadcast(a1), dimensions={}
b2 = s8[10000] broadcast(c1), dimensions={}
ROOT r1 = s8[10000] add(b1, b2)
}
ENTRY e {
param0 = s8[] parameter(0)
ROOT r0 = s8[10000] fusion(param0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(root->Accept(&analysis));
EXPECT_EQ(analysis.output_bytes_accessed(*root), 10000);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), 1);
EXPECT_EQ(analysis.bytes_accessed(*root), 10000 + 1);
EXPECT_EQ(analysis.bytes_accessed(), 10000 + 1);
}
TEST_F(FusionCostAnalysis, RevisitModifiedFusion) {
Shape r2f32 = ShapeUtil::MakeShape(F32, {2, 2});
HloComputation::Builder builder(TestName());
HloInstruction* c1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace(
0.0f, 1.0f, 2, 2)));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kAdd, c1, c1));
HloInstruction* mul = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kMultiply, add, add));
HloInstruction* neg = builder.AddInstruction(
HloInstruction::CreateUnary(r2f32, HloOpcode::kNegate, mul));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloInstruction* fusion = computation->CreateFusionInstruction(
{neg, mul, add}, HloInstruction::FusionKind::kLoop);
HloCostAnalysis::Options options{ShapeSize};
HloCostAnalysis analysis(options);
ASSERT_IS_OK(fusion->Accept(&analysis));
constexpr int64_t bytes_accessed = sizeof(float) * 2 * 2 * 2;
static_assert(bytes_accessed == 32, "");
EXPECT_EQ(analysis.flop_count(), 4 * 3);
EXPECT_EQ(analysis.transcendental_count(), 0);
EXPECT_EQ(analysis.bytes_accessed(), bytes_accessed);
EXPECT_EQ(analysis.operand_bytes_accessed(*fusion, 0), sizeof(float) * 2 * 2);
EXPECT_EQ(analysis.output_bytes_accessed(*fusion), sizeof(float) * 2 * 2);
ASSERT_IS_OK(analysis.RevisitInstruction(fusion));
EXPECT_EQ(analysis.flop_count(), 4 * 3);
EXPECT_EQ(analysis.transcendental_count(), 0);
EXPECT_EQ(analysis.bytes_accessed(), bytes_accessed);
EXPECT_EQ(analysis.operand_bytes_accessed(*fusion, 0), sizeof(float) * 2 * 2);
EXPECT_EQ(analysis.output_bytes_accessed(*fusion), sizeof(float) * 2 * 2);
HloComputation* fused_computation = fusion->fused_instructions_computation();
HloInstruction* to_replace = fused_computation->root_instruction();
HloInstruction* exp =
fused_computation->AddInstruction(HloInstruction::CreateUnary(
r2f32, HloOpcode::kExp, to_replace->mutable_operand(0)));
ASSERT_IS_OK(fused_computation->ReplaceInstruction(to_replace, exp));
ASSERT_IS_OK(module->Verify());
ASSERT_IS_OK(analysis.RevisitInstruction(fusion));
EXPECT_EQ(analysis.flop_count(), 4 * 2);
EXPECT_EQ(analysis.transcendental_count(), 4);
EXPECT_EQ(analysis.bytes_accessed(), bytes_accessed);
EXPECT_EQ(analysis.operand_bytes_accessed(*fusion, 0), sizeof(float) * 2 * 2);
EXPECT_EQ(analysis.output_bytes_accessed(*fusion), sizeof(float) * 2 * 2);
}
TEST_F(FusionCostAnalysis, RevisitAlteredFusion) {
absl::string_view hlo_string = R"(
HloModule m
f {
fp0 = s8[10] parameter(0)
ROOT fr = s8[1] slice(fp0), slice={[0:1]}
}
ENTRY e {
p0 = s8[10] parameter(0)
ROOT r = s8[1] fusion(p0), kind=kLoop, calls=f
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
HloCostAnalysis modified_analysis(ShapeSize);
ASSERT_IS_OK(root->Accept(&modified_analysis));
HloInstruction* fusion_root =
root->called_computations()[0]->root_instruction();
EXPECT_FLOAT_EQ(modified_analysis.operand_utilization(*fusion_root, 0), 0.1);
fusion_root->mutable_slice_limits()->at(0) = 2;
fusion_root->mutable_shape()->mutable_dimensions()[0] = 2;
root->mutable_shape()->mutable_dimensions()[0] = 2;
module->mutable_config().SetDefaultComputationLayout(
module->entry_computation()->ComputeProgramShape());
ASSERT_IS_OK(modified_analysis.RevisitInstruction(root));
HloCostAnalysis unmodified_analysis(ShapeSize);
ASSERT_IS_OK(root->Accept(&unmodified_analysis));
EXPECT_FLOAT_EQ(modified_analysis.operand_utilization(*fusion_root, 0), 0.2);
EXPECT_FLOAT_EQ(modified_analysis.operand_utilization(*fusion_root, 0),
unmodified_analysis.operand_utilization(*fusion_root, 0));
}
TEST_F(FusionCostAnalysis, RevisitWithSharedComputation) {
absl::string_view hlo_string = R"(
HloModule m
add_computation {
arg_0.1 = f32[] parameter(0)
arg_1.1 = f32[] parameter(1)
ROOT r = f32[] add(arg_0.1, arg_1.1)
}
ENTRY e {
p0 = f32[127,125] parameter(0)
p1 = f32[127,125] parameter(1)
constant_zero = f32[] constant(0)
r0 = f32[127] reduce(p0, constant_zero), dimensions={1}, to_apply=add_computation
r1 = f32[127] reduce(p0, constant_zero), dimensions={1}, to_apply=add_computation
ROOT _ = f32[127] add(r0, r1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
HloCostAnalysis analysis(ShapeSize);
HloInstruction* add_root =
root->operand(1)->called_computations()[0]->root_instruction();
ASSERT_IS_OK(root->Accept(&analysis));
EXPECT_EQ(analysis.operand_utilization(*add_root, 0), 1);
ASSERT_IS_OK(analysis.RemoveInstruction(root->mutable_operand(0)));
EXPECT_EQ(analysis.operand_utilization(*add_root, 0), 1);
ASSERT_IS_OK(analysis.RevisitInstruction(root->mutable_operand(0)));
EXPECT_EQ(analysis.operand_utilization(*add_root, 0), 1);
}
using Properties = HloCostAnalysis::Properties;
constexpr auto kFlopsKey = HloCostAnalysis::kFlopsKey;
constexpr auto kTranscendentalsKey = HloCostAnalysis::kTranscendentalsKey;
constexpr auto kBytesAccessedKey = HloCostAnalysis::kBytesAccessedKey;
constexpr auto kOptimalSecondsKey = HloCostAnalysis::kOptimalSecondsKey;
constexpr auto kUtilizationKey = HloCostAnalysis::kUtilizationKey;
constexpr auto kReserved0Key = HloCostAnalysis::kReserved0Key;
constexpr auto kReserved1Key = HloCostAnalysis::kReserved1Key;
TEST(HloCostAnalysisProperties, ZeroWhenInitialized) {
Properties p;
EXPECT_EQ(0, p[kFlopsKey]);
EXPECT_EQ(0, p[kTranscendentalsKey]);
EXPECT_EQ(0, p[kBytesAccessedKey]);
EXPECT_EQ(0, p[kOptimalSecondsKey]);
EXPECT_EQ(0, p[kUtilizationKey]);
EXPECT_EQ(0, p[kReserved0Key]);
EXPECT_EQ(0, p[kReserved1Key]);
EXPECT_EQ(0, p.operand_utilization(0, {}));
EXPECT_EQ(0, p.operand_utilization(1, {}));
EXPECT_EQ(0, p.operand_utilization(2, {}));
EXPECT_EQ(0, p.operand_utilization(0, {0}));
EXPECT_EQ(0, p.operand_utilization(2, {0}));
EXPECT_EQ(0, p[HloCostAnalysis::GetOperandUtilizationKey(0, {})]);
EXPECT_EQ(0, p[HloCostAnalysis::GetOperandUtilizationKey(1, {})]);
EXPECT_EQ(0, p[HloCostAnalysis::GetOperandUtilizationKey(2, {})]);
EXPECT_EQ(0, p[HloCostAnalysis::GetOperandUtilizationKey(0, {0})]);
EXPECT_EQ(0, p[HloCostAnalysis::GetOperandUtilizationKey(2, {0})]);
EXPECT_EQ(0, p.operand_bytes_accessed(0, {}));
EXPECT_EQ(0, p.operand_bytes_accessed(1, {}));
EXPECT_EQ(0, p.operand_bytes_accessed(2, {}));
EXPECT_EQ(0, p.operand_bytes_accessed(0, {0}));
EXPECT_EQ(0, p.operand_bytes_accessed(2, {0}));
EXPECT_EQ(0, p[HloCostAnalysis::GetOperandBytesAccessedKey(0, {})]);
EXPECT_EQ(0, p[HloCostAnalysis::GetOperandBytesAccessedKey(1, {})]);
EXPECT_EQ(0, p[HloCostAnalysis::GetOperandBytesAccessedKey(2, {})]);
EXPECT_EQ(0, p[HloCostAnalysis::GetOperandBytesAccessedKey(0, {0})]);
EXPECT_EQ(0, p[HloCostAnalysis::GetOperandBytesAccessedKey(2, {0})]);
EXPECT_EQ(0, p.output_bytes_accessed({}));
EXPECT_EQ(0, p.output_bytes_accessed({0}));
EXPECT_EQ(0, p[HloCostAnalysis::GetOutputBytesAccessedKey({})]);
EXPECT_EQ(0, p[HloCostAnalysis::GetOutputBytesAccessedKey({0})]);
EXPECT_EQ(0, p["foobar"]);
std::vector<std::pair<std::string, float>> vals;
Properties().ForEach([&](absl::string_view key, float val) {
vals.push_back({std::string(key), val});
});
EXPECT_THAT(vals, ::testing::IsEmpty());
}
TEST(HloCostAnalysisProperties, SetValues) {
Properties p;
p[kFlopsKey] = 1;
p[kTranscendentalsKey] = 2;
p[kBytesAccessedKey] = 3;
p[kOptimalSecondsKey] = 4;
p[kUtilizationKey] = 5;
p[kReserved0Key] = 6;
p[kReserved1Key] = 7;
EXPECT_EQ(1, p[kFlopsKey]);
EXPECT_EQ(2, p[kTranscendentalsKey]);
EXPECT_EQ(3, p[kBytesAccessedKey]);
EXPECT_EQ(4, p[kOptimalSecondsKey]);
EXPECT_EQ(5, p[kUtilizationKey]);
EXPECT_EQ(6, p[kReserved0Key]);
EXPECT_EQ(7, p[kReserved1Key]);
p.set_operand_utilization(0, {}, 10);
p.set_operand_utilization(1, {}, 11);
p.set_operand_utilization(2, {}, 12);
p.set_operand_utilization(0, {0}, 13);
p.set_operand_utilization(2, {0}, 14);
EXPECT_EQ(10, p.operand_utilization(0, {}));
EXPECT_EQ(11, p.operand_utilization(1, {}));
EXPECT_EQ(12, p.operand_utilization(2, {}));
EXPECT_EQ(13, p.operand_utilization(0, {0}));
EXPECT_EQ(14, p.operand_utilization(2, {0}));
EXPECT_EQ(10, p[HloCostAnalysis::GetOperandUtilizationKey(0, {})]);
EXPECT_EQ(11, p[HloCostAnalysis::GetOperandUtilizationKey(1, {})]);
EXPECT_EQ(12, p[HloCostAnalysis::GetOperandUtilizationKey(2, {})]);
EXPECT_EQ(13, p[HloCostAnalysis::GetOperandUtilizationKey(0, {0})]);
EXPECT_EQ(14, p[HloCostAnalysis::GetOperandUtilizationKey(2, {0})]);
p.set_operand_bytes_accessed(0, {}, 20);
p.set_operand_bytes_accessed(1, {}, 21);
p.set_operand_bytes_accessed(2, {}, 22);
p.set_operand_bytes_accessed(0, {0}, 23);
p.set_operand_bytes_accessed(2, {0}, 24);
EXPECT_EQ(20, p.operand_bytes_accessed(0, {}));
EXPECT_EQ(21, p.operand_bytes_accessed(1, {}));
EXPECT_EQ(22, p.operand_bytes_accessed(2, {}));
EXPECT_EQ(23, p.operand_bytes_accessed(0, {0}));
EXPECT_EQ(24, p.operand_bytes_accessed(2, {0}));
EXPECT_EQ(20, p[HloCostAnalysis::GetOperandBytesAccessedKey(0, {})]);
EXPECT_EQ(21, p[HloCostAnalysis::GetOperandBytesAccessedKey(1, {})]);
EXPECT_EQ(22, p[HloCostAnalysis::GetOperandBytesAccessedKey(2, {})]);
EXPECT_EQ(23, p[HloCostAnalysis::GetOperandBytesAccessedKey(0, {0})]);
EXPECT_EQ(24, p[HloCostAnalysis::GetOperandBytesAccessedKey(2, {0})]);
p.set_output_bytes_accessed({}, 30);
p.set_output_bytes_accessed({0}, 31);
EXPECT_EQ(30, p.output_bytes_accessed({}));
EXPECT_EQ(31, p.output_bytes_accessed({0}));
EXPECT_EQ(30, p[HloCostAnalysis::GetOutputBytesAccessedKey({})]);
EXPECT_EQ(31, p[HloCostAnalysis::GetOutputBytesAccessedKey({0})]);
p["foo"] = 100;
EXPECT_EQ(100, p["foo"]);
p["bar"] += 101;
EXPECT_EQ(101, p["bar"]);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_cost_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_cost_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
794dc36a-5c72-49a9-bb2c-dbc91bcd6fe0 | cpp | tensorflow/tensorflow | hlo_computation_deduplicator | third_party/xla/xla/service/hlo_computation_deduplicator.cc | third_party/xla/xla/service/hlo_computation_deduplicator_test.cc | #include "xla/service/hlo_computation_deduplicator.h"
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/shape_util.h"
#include "tsl/platform/logging.h"
namespace xla {
bool HloComputationDeduplicator::ContainsLargeConstants(HloComputation* comp) {
int total_size = 0;
for (HloInstruction* instruction : comp->instructions()) {
if (instruction->IsConstant()) {
total_size += ShapeUtil::ArrayDataSize(instruction->literal().shape());
if (total_size > 1024) {
return true;
}
}
}
return false;
}
absl::StatusOr<bool> HloComputationDeduplicator::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
absl::flat_hash_map<std::string, HloComputation*> unique_comps;
absl::flat_hash_map<HloComputation*, HloComputation*> replacement;
HloPrintOptions options = HloPrintOptions::Canonical();
options.set_print_subcomputation_mode(
HloPrintOptions::PrintSubcomputationMode::kOff);
options.set_print_infeed_outfeed_config(false);
options.set_print_only_essential_constants(true);
options.set_print_operand_shape(true);
options.set_print_ids(false);
options.set_canonicalize_computations(true);
auto comp_eq = [&replacement](const HloComputation* a,
const HloComputation* b) {
if (a->unique_id() == b->unique_id()) return true;
if (replacement.contains(a) &&
replacement.at(a)->unique_id() == b->unique_id()) {
return true;
}
if (replacement.contains(b) &&
replacement.at(b)->unique_id() == a->unique_id()) {
return true;
}
if (replacement.contains(a) && replacement.contains(b) &&
replacement.at(a)->unique_id() == replacement.at(b)->unique_id()) {
return true;
}
return false;
};
for (HloComputation* comp :
module->MakeComputationPostOrder(execution_threads)) {
if (comp->IsEntryComputation() || comp->instruction_count() > 128 ||
ContainsLargeConstants(comp) || comp->IsCollectiveCalledComputation()) {
continue;
}
std::string comp_str = comp->ToString(options);
auto poss_dup = unique_comps.find(comp_str);
if (poss_dup != unique_comps.end() &&
poss_dup->second->Equal(*comp, true,
comp_eq)) {
VLOG(2) << "Replacing " << comp->name() << " with "
<< poss_dup->second->name();
replacement[comp] = poss_dup->second;
} else {
unique_comps[std::move(comp_str)] = comp;
}
}
if (mark_fusion_duplications_) {
module->MarkFusionDuplications(replacement);
} else {
module->ReplaceComputations(replacement);
}
return !replacement.empty();
}
} | #include "xla/service/hlo_computation_deduplicator.h"
#include <cstdint>
#include <memory>
#include <string>
#include <string_view>
#include <vector>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
class HloComputationDeduplicatorTest : public HloTestBase {
protected:
std::vector<std::string> RunDeduplicatePass(const std::string_view text,
bool expect_true) {
std::unique_ptr<HloModule> module =
ParseAndReturnVerifiedModule(text).value();
HloComputationDeduplicator dedup;
bool changed = dedup.Run(module.get()).value();
EXPECT_EQ(changed, expect_true);
std::vector<std::string> computation_names;
for (auto comp : module->computations()) {
computation_names.emplace_back(comp->name());
}
return computation_names;
}
};
TEST_F(HloComputationDeduplicatorTest, RemoveRegionBandC) {
const std::string_view text = R"(
HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0}, s32[20]{0})->s32[]}
region_A {
Arg_0.6 = s32[] parameter(0)
Arg_1.7 = s32[] parameter(1)
ROOT add.8 = s32[] add(Arg_0.6, Arg_1.7)
}
region_B {
Arg_0.11 = s32[] parameter(0)
Arg_1.12 = s32[] parameter(1)
ROOT add.13 = s32[] add(Arg_0.11, Arg_1.12)
}
region_C {
Arg_0.17 = s32[] parameter(0)
Arg_1.18 = s32[] parameter(1)
ROOT add.19 = s32[] add(Arg_0.17, Arg_1.18)
}
ENTRY main.22 {
Arg_0.1 = s32[10]{0} parameter(0)
Arg_1.2 = s32[15]{0} parameter(1)
Arg_2.3 = s32[20]{0} parameter(2)
constant.4 = s32[] constant(0)
reduce.9 = s32[] reduce(Arg_0.1, constant.4), dimensions={0}, to_apply=region_A
reduce.14 = s32[] reduce(Arg_1.2, constant.4), dimensions={0}, to_apply=region_B
reduce.20 = s32[] reduce(Arg_2.3, constant.4), dimensions={0}, to_apply=region_C
multiply.15 = s32[] multiply(reduce.9, reduce.14)
ROOT multiply.21 = s32[] multiply(multiply.15, reduce.20)
}
)";
auto computation_names = RunDeduplicatePass(text, true);
for (auto name : computation_names) {
EXPECT_NE(name, "region_B");
EXPECT_NE(name, "region_C");
}
EXPECT_EQ(computation_names.size(), 2);
}
TEST_F(HloComputationDeduplicatorTest, RemoveRegionBExactCopy) {
const std::string_view text = R"(
HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]}
region_A {
Arg_0.5 = s32[] parameter(0)
Arg_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] add(Arg_0.5, Arg_1.6)
}
region_B {
Arg_0.5 = s32[] parameter(0)
Arg_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] add(Arg_0.5, Arg_1.6)
}
ENTRY main.15 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
)";
auto computation_names = RunDeduplicatePass(text, true);
for (auto name : computation_names) {
EXPECT_NE(name, "region_B");
}
EXPECT_EQ(computation_names.size(), 2);
}
TEST_F(HloComputationDeduplicatorTest, RemoveRegionsWithSameSubcomp) {
const std::string_view text = R"(
HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]}
region_X {
Ag_0 = s32[] parameter(0)
Arg_1 = s32[] parameter(1)
ROOT their_sum = s32[] add(Ag_0, Arg_1)
}
region_Y {
Arg_0 = s32[] parameter(0)
Arg_1 = s32[] parameter(1)
ROOT the_sum = s32[] add(Arg_0, Arg_1)
}
region_A {
Arg_0.5 = s32[] parameter(0)
Arg_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] add(Arg_0.5, Arg_1.6)
}
region_B {
Arg_0.5 = s32[] parameter(0)
Ar_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] add(Arg_0.5, Ar_1.6)
}
main.15 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_X
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_Y
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
main.16 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
main.17 {
Arg_0 = s32[10]{0} parameter(0)
Arg_1 = s32[15]{0} parameter(1)
rd1 = s32[] call(Arg_0, Arg_1), to_apply=main.15
rd2 = s32[] call(Arg_0, Arg_1), to_apply=main.16
ROOT ret = add(rd1, rd2)
}
)";
auto computation_names = RunDeduplicatePass(text, true);
for (auto name : computation_names) {
EXPECT_NE(name, "region_B");
EXPECT_NE(name, "region_A");
EXPECT_NE(name, "region_Y");
EXPECT_NE(name, "main.16");
}
EXPECT_EQ(computation_names.size(), 3);
}
TEST_F(HloComputationDeduplicatorTest, DontRemoveRegionsWithDifferentSubcomp) {
const std::string_view text = R"(
HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]}
region_X {
Ag_0 = s32[] parameter(0)
Arg_1 = s32[] parameter(1)
ROOT their_sum = s32[] multiply(Ag_0, Arg_1)
}
region_Y {
Arg_0 = s32[] parameter(0)
Arg_1 = s32[] parameter(1)
ROOT the_sum = s32[] add(Arg_0, Arg_1)
}
region_A {
Arg_0.5 = s32[] parameter(0)
Arg_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] add(Arg_0.5, Arg_1.6)
}
region_B {
Arg_0.5 = s32[] parameter(0)
Ar_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] add(Arg_0.5, Ar_1.6)
}
main.15 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_X
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_Y
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
main.16 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
main.17 {
Arg_0 = s32[10]{0} parameter(0)
Arg_1 = s32[15]{0} parameter(1)
rd1 = s32[] call(Arg_0, Arg_1), to_apply=main.15
rd2 = s32[] call(Arg_0, Arg_1), to_apply=main.16
ROOT ret = add(rd1, rd2)
}
)";
auto computation_names = RunDeduplicatePass(text, true);
int region_x_count = 0;
int region_y_count = 0;
int main_16_count = 0;
int main_15_count = 0;
int region_a_count = 0;
int region_b_count = 0;
for (auto name : computation_names) {
region_x_count += (name == "region_X");
region_y_count += (name == "region_Y");
main_15_count += (name == "main.15");
main_16_count += (name == "main.16");
region_a_count += (name == "region_A");
region_b_count += (name == "region_B");
}
EXPECT_EQ(region_a_count, 0);
EXPECT_EQ(region_b_count, 0);
EXPECT_EQ(main_15_count, 1);
EXPECT_EQ(main_16_count, 1);
EXPECT_EQ(region_x_count, 1);
EXPECT_EQ(region_y_count, 1);
}
TEST_F(HloComputationDeduplicatorTest, RemoveRegionBVarDifferences) {
const std::string_view text = R"(
HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]}
region_A {
Arg_0.5 = s32[] parameter(0)
Arg_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] add(Arg_0.5, Arg_1.6)
}
region_B {
Arg_0.2 = s32[] parameter(0)
Arg_1.3 = s32[] parameter(1)
ROOT add.8 = s32[] add(Arg_0.2, Arg_1.3)
}
ENTRY main.15 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
)";
auto computation_names = RunDeduplicatePass(text, true);
for (auto name : computation_names) {
EXPECT_NE(name, "region_B");
}
EXPECT_EQ(computation_names.size(), 2);
}
TEST_F(HloComputationDeduplicatorTest, DontRemoveRegionBCommutative) {
const std::string_view text = R"(
HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]}
region_A {
Arg_0 = s32[] parameter(0)
Arg_1 = s32[] parameter(1)
ROOT add.7 = s32[] add(Arg_1, Arg_0)
}
region_B {
Arg_0.2 = s32[] parameter(0)
Arg_1.3 = s32[] parameter(1)
ROOT add.8 = s32[] add(Arg_0.2, Arg_1.3)
}
ENTRY main.15 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
)";
auto computation_names = RunDeduplicatePass(text, false);
int region_b_count = 0;
for (auto name : computation_names) {
region_b_count += (name == "region_B");
}
EXPECT_EQ(region_b_count, 1);
EXPECT_EQ(computation_names.size(), 3);
}
TEST_F(HloComputationDeduplicatorTest,
DontRemoveRegionBDifferentExecutionThread) {
const std::string_view text = R"(
HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]}
region_A {
Arg_0 = s32[] parameter(0)
Arg_1 = s32[] parameter(1)
ROOT add = s32[] add(Arg_0, Arg_1)
}
region_B {
Arg_0 = s32[] parameter(0)
Arg_1 = s32[] parameter(1)
ROOT add = s32[] add(Arg_0, Arg_1)
}
called_computation {
Arg_0 = s32[15]{0} parameter(0)
Cst = s32[] constant(0)
ROOT rd2 = s32[] reduce(Arg_0, Cst), dimensions={0}, to_apply=region_B
}, execution_thread="parallel_thread"
ENTRY main.15 {
Arg_0 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0, constant.3), dimensions={0}, to_apply=region_A
Arg_1 = s32[15]{0} parameter(1)
call-start = ((s32[15]{0}), s32[], s32[]) call-start(Arg_1),
async_execution_thread="parallel_thread",
to_apply=%called_computation
call-done = s32[] call-done(call-start)
ROOT multiply.14 = s32[] multiply(rd1, call-done)
}
)";
auto computation_names = RunDeduplicatePass(text, false);
int region_b_count = 0;
for (auto name : computation_names) {
region_b_count += (name == "region_B");
}
EXPECT_EQ(region_b_count, 1);
EXPECT_EQ(computation_names.size(), 5);
}
TEST_F(HloComputationDeduplicatorTest, DontRemoveRegionLargeConstant) {
const std::string_view text = R"(
HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]}
region_A {
Arg_00 = s32[] parameter(0)
Arg_1_1 = s32[] parameter(1)
Arg_0 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
Arg_1 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
Arg_2 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
Arg_3 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
Arg_4 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
Arg_5 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
add1 = s32[10, 10] add(Arg_1, Arg_0)
add2 = s32[10, 10] add(Arg_2, Arg_3)
add3 = s32[10, 10] add(Arg_4, Arg_5)
add8 = s32[10, 10] add(add1, add2)
addv = s32[10, 10] add(add3, add8)
ROOT ret = add(Arg_00, Arg_1_1)
}
region_B {
Arg_00 = s32[] parameter(0)
Arg_1_1 = s32[] parameter(1)
Arg_0 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
Arg_1 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
Arg_2 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
Arg_3 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
Arg_4 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
Arg_5 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
add1 = s32[10, 10] add(Arg_1, Arg_0)
add2 = s32[10, 10] add(Arg_2, Arg_3)
add3 = s32[10, 10] add(Arg_4, Arg_5)
add8 = s32[10, 10] add(add1, add2)
addv = s32[10, 10] add(add3, add8)
ROOT ret = add(Arg_00, Arg_1_1)
}
ENTRY main.15 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
)";
auto computation_names = RunDeduplicatePass(text, false);
int region_b_count = 0;
for (auto comp : computation_names) {
region_b_count += (comp == "region_B");
}
EXPECT_EQ(region_b_count, 1);
EXPECT_EQ(computation_names.size(), 3);
}
TEST_F(HloComputationDeduplicatorTest, DontRemoveRegionBDifferentcomp) {
const std::string_view text = R"(
HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]}
region_A {
Arg_0.5 = s32[] parameter(0)
Arg_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] multiply(Arg_0.5, Arg_1.6)
}
region_B {
Arg_0.2 = s32[] parameter(0)
Arg_1.3 = s32[] parameter(1)
ROOT add.8 = s32[] add(Arg_0.2, Arg_1.3)
}
ENTRY main.15 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
)";
auto computation_names = RunDeduplicatePass(text, false);
int region_b_count = 0;
for (auto name : computation_names) {
region_b_count += (name == "region_B");
}
EXPECT_EQ(region_b_count, 1);
EXPECT_EQ(computation_names.size(), 3);
}
TEST_F(HloComputationDeduplicatorTest, DontRemoveRegionBDifferentType) {
const std::string_view text = R"(
HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s16[15]{0})->s16[]}
region_A {
Arg_0.5 = s32[] parameter(0)
Arg_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] multiply(Arg_0.5, Arg_1.6)
}
region_B {
Arg_0.5 = s16[] parameter(0)
Arg_1.6 = s16[] parameter(1)
ROOT add.7 = s16[] multiply(Arg_0.5, Arg_1.6)
}
ENTRY main.15 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(5)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A
Arg_1.2 = s16[15]{0} parameter(1)
constant.4 = s16[] constant(5)
rd2 = s16[] reduce(Arg_1.2, constant.4), dimensions={0}, to_apply=region_B
}
)";
auto computation_names = RunDeduplicatePass(text, false);
int region_b_count = 0;
for (auto comp : computation_names) {
region_b_count += (comp == "region_B");
}
EXPECT_EQ(region_b_count, 1);
EXPECT_EQ(computation_names.size(), 3);
}
TEST_F(HloComputationDeduplicatorTest, DontRemoveRegionBEntryComp) {
const std::string_view text = R"(
HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]}
region_A1 {
Arg_0.5 = s32[] parameter(0)
Arg_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] multiply(Arg_0.5, Arg_1.6)
}
region_B1 {
Arg_0.2 = s32[] parameter(0)
Arg_1.3 = s32[] parameter(1)
ROOT add.8 = s32[] add(Arg_0.2, Arg_1.3)
}
ENTRY region_B {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A1
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B1
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
region_A {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A1
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B1
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
)";
auto computation_names = RunDeduplicatePass(text, false);
EXPECT_EQ(computation_names.size(), 4);
}
TEST_F(HloComputationDeduplicatorTest, LargeSubComputationTest) {
const Shape shape = ShapeUtil::MakeScalarShape(S32);
const int total_regions = 2;
const int max_insns = 128;
std::vector<HloComputation> comps;
auto module = CreateNewVerifiedModule();
for (int region = 0; region < total_regions; region++) {
HloComputation::Builder builder("region_" + std::to_string(region));
auto curr =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a0"));
auto next =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "a1"));
for (int i = 0; i < max_insns; i++) {
next = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, curr, next));
}
module->AddComputationAndUnifyNamesAndIds(builder.Build(), false);
}
HloComputation::Builder main("main_func");
std::vector<HloInstruction *> insns;
std::vector<HloInstruction *> consts;
for (int region = 0; region < total_regions; region++) {
insns.push_back(main.AddInstruction(
HloInstruction::CreateParameter(region, ShapeUtil::MakeShape(S32, {10}),
"a" + std::to_string(region))));
consts.push_back(main.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(5 + region))));
}
int region = 0;
for (auto comp : module->computations()) {
ASSERT_LT(region, total_regions);
main.AddInstruction(HloInstruction::CreateReduce(
ShapeUtil::MakeScalarShape(S32), insns[region], consts[region],
{0}, comp));
}
module->AddEntryComputation(main.Build());
HloComputationDeduplicator dedup;
TF_ASSERT_OK_AND_ASSIGN(bool changed, dedup.Run(module.get()));
EXPECT_FALSE(changed);
std::vector<HloComputation *> computations = module->MakeComputationSorted();
EXPECT_EQ(computations.size(), (total_regions + 1));
}
TEST_F(HloComputationDeduplicatorTest, DontDeduplicateReduceAllReduce) {
const std::string_view text = R"(
HloModule TestModule
add.1 {
Arg_0 = s32[] parameter(0)
Arg_1 = s32[] parameter(1)
ROOT add.2 = s32[] add(Arg_0, Arg_1)
}
add.2 {
Arg_0 = s32[] parameter(0)
Arg_1 = s32[] parameter(1)
ROOT add.2 = s32[] add(Arg_0, Arg_1)
}
ENTRY main {
Arg_0.1 = s32[10] parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=add.1
Arg_1.1 = s32[] parameter(1)
rd2 = s32[] all-reduce(Arg_1.1), to_apply=add.2
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
)";
auto computation_names = RunDeduplicatePass(text, false);
EXPECT_EQ(computation_names.size(), 3);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_computation_deduplicator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_computation_deduplicator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bdb04ae8-b394-4376-8869-cbfb0c452ed3 | cpp | tensorflow/tensorflow | gather_simplifier | third_party/xla/xla/service/gather_simplifier.cc | third_party/xla/xla/service/gather_simplifier_test.cc | #include "xla/service/gather_simplifier.h"
#include <iterator>
#include <vector>
#include "absl/algorithm/container.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal_util.h"
#include "xla/permutation_util.h"
#include "xla/service/gather_scatter_utils.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::StatusOr<HloInstruction*> GatherSimplifier::ExpandInstruction(
HloInstruction* inst) {
auto* gather = DynCast<HloGatherInstruction>(inst);
if (absl::c_linear_search(gather->gather_slice_sizes(), 0)) {
auto* zero = gather->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(gather->shape().element_type())));
return gather->AddInstruction(
HloInstruction::CreateBroadcast(gather->shape(), zero, {}));
}
const auto& dims = gather->gather_dimension_numbers();
int operand_rank =
dims.collapsed_slice_dims().size() + dims.offset_dims().size();
auto [operand_permutation, operand_permutation_inverse] =
MakeOperandStartIndexPermutations(dims.start_index_map(), operand_rank);
auto* operand = gather->operands()[0];
auto* start_indices = gather->operands()[1];
TF_ASSIGN_OR_RETURN(operand, MaybeTranspose(operand, operand_permutation));
TF_ASSIGN_OR_RETURN(
start_indices,
TransformStartIndices(start_indices, dims.index_vector_dim()));
auto slice_sizes = Permute(gather->gather_slice_sizes(), operand_permutation);
std::vector<int64_t> output_dims = {start_indices->shape().dimensions(0)};
absl::c_copy(slice_sizes, std::back_inserter(output_dims));
Shape output_shape =
ShapeUtil::MakeShape(operand->shape().element_type(), output_dims);
std::vector<int64_t> offset_dims(operand_rank);
absl::c_iota(offset_dims, 1);
std::vector<int64_t> start_index_map(dims.start_index_map().size());
absl::c_iota(start_index_map, 0);
auto* result = gather->AddInstruction(HloInstruction::CreateGather(
output_shape, operand, start_indices,
HloGatherInstruction::MakeGatherDimNumbers(
offset_dims,
{}, start_index_map, 1),
slice_sizes, gather->indices_are_sorted()));
std::vector<int64_t> output_permutation(1 +
operand_rank);
absl::c_transform(operand_permutation_inverse, output_permutation.begin() + 1,
[](int64_t dim) { return dim + 1; });
TF_ASSIGN_OR_RETURN(result, MaybeTranspose(result, output_permutation));
if (!dims.collapsed_slice_dims().empty()) {
std::vector<int64_t> collapsed_slice_dims(
dims.collapsed_slice_dims().size());
absl::c_transform(dims.collapsed_slice_dims(), collapsed_slice_dims.begin(),
[](int64_t dim) { return dim + 1; });
TF_ASSIGN_OR_RETURN(result,
ElideDegenerateDims(result, collapsed_slice_dims));
}
auto original_start_index_dims = gather->operands()[1]->shape().dimensions();
std::vector<int64_t> start_indices_dims;
for (int i = 0; i < original_start_index_dims.size(); ++i) {
if (i != dims.index_vector_dim()) {
start_indices_dims.push_back(original_start_index_dims[i]);
}
}
if (start_indices_dims.size() > 1) {
TF_ASSIGN_OR_RETURN(result,
ExpandFirstDimIntoNDims(result, start_indices_dims));
} else if (start_indices_dims.empty()) {
TF_ASSIGN_OR_RETURN(result, ElideDegenerateDims(result, {0}));
}
std::vector<int64_t> output_perm;
auto output_rank = static_cast<int64_t>(start_indices_dims.size() +
dims.offset_dims().size());
output_perm.reserve(output_rank);
auto offset_dim_index = static_cast<int64_t>(start_indices_dims.size());
int64_t start_index_dim_index = 0;
for (int64_t i = 0; i < output_rank; ++i) {
if (absl::c_linear_search(dims.offset_dims(), i)) {
output_perm.push_back(offset_dim_index++);
} else {
output_perm.push_back(start_index_dim_index++);
}
}
return MaybeTranspose(result, output_perm);
}
bool GatherSimplifier::IsSimplifiedGather(const HloGatherInstruction* gather) {
auto* start_indices = gather->operands()[1];
const auto& dims = gather->gather_dimension_numbers();
return start_indices->shape().rank() == 2 && dims.index_vector_dim() == 1 &&
IsIdentityPermutation(dims.start_index_map()) &&
dims.collapsed_slice_dims().empty() &&
*dims.offset_dims().begin() == 1 &&
*dims.offset_dims().rbegin() == dims.offset_dims().size();
}
bool GatherSimplifier::InstructionMatchesPattern(HloInstruction* inst) {
auto* gather = DynCast<HloGatherInstruction>(inst);
return gather && !IsSimplifiedGather(gather);
}
} | #include "xla/service/gather_simplifier.h"
#include <optional>
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class GatherSimplifierTest : public HloTestBase {};
TEST_F(GatherSimplifierTest, TransformsStartIndices) {
constexpr absl::string_view kModuleStr = R"(
HloModule gather_simplifier
ENTRY kernel_entry {
operand = f32[33,34] parameter(0)
indices = s32[42,43] parameter(1)
ROOT gather = f32[42,43,7,8] gather(operand, indices),
offset_dims={2,3},
collapsed_slice_dims={},
start_index_map={0},
index_vector_dim=2,
slice_sizes={7,8}
})";
RunAndFilecheckHloRewrite(kModuleStr, GatherSimplifier(), R"(
CHECK: %[[VECTOR_DIM:.*]] = s32[42,43,1]{2,1,0} reshape(%indices)
CHECK: %[[INDICES_2D:.*]] = s32[1806,1]{1,0} reshape(%[[VECTOR_DIM]])
CHECK: %[[GATHER:.*]] = f32[1806,7,8]{{.*}} gather(
CHECK-SAME: %operand, %[[INDICES_2D]]),
CHECK-SAME: offset_dims={1,2},
CHECK-SAME: collapsed_slice_dims={},
CHECK-SAME: start_index_map={0},
CHECK-SAME: index_vector_dim=1,
CHECK-SAME: slice_sizes={7,8}
CHECK: ROOT %{{.*}} = f32[42,43,7,8]{3,2,1,0} reshape(%[[GATHER]])
)");
}
TEST_F(GatherSimplifierTest, RemovesCollapsedSliceDims) {
constexpr absl::string_view kModuleStr = R"(
HloModule gather_simplifier
ENTRY kernel_entry {
operand = f32[33,34] parameter(0)
indices = s32[42,1] parameter(1)
ROOT gather = f32[42] gather(operand, indices),
offset_dims={},
collapsed_slice_dims={0,1},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,1}
})";
RunAndFilecheckHloRewrite(kModuleStr, GatherSimplifier(), R"(
CHECK: %[[GATHER:.*]] = f32[42,1,1]{2,1,0} gather(%operand, %indices)
CHECK-SAME: offset_dims={1,2},
CHECK-SAME: collapsed_slice_dims={},
CHECK: ROOT %{{.*}} = f32[42]{0} reshape(%[[GATHER]])
)");
}
TEST_F(GatherSimplifierTest, MakesStartIndexMapIdentity) {
constexpr absl::string_view kModuleStr = R"(
HloModule gather_simplifier
ENTRY kernel_entry {
operand = f32[33,34,35] parameter(0)
indices = s32[42,3] parameter(1)
ROOT gather = f32[42,1,2,3] gather(operand, indices),
offset_dims={1,2,3},
collapsed_slice_dims={},
start_index_map={2,0,1},
index_vector_dim=1,
slice_sizes={1,2,3}
})";
RunAndFilecheckHloRewrite(kModuleStr, GatherSimplifier(), R"(
%operand = f32[33,34,35]{2,1,0} parameter(0)
CHECK: %[[OPERAND:.*]] = f32[35,33,34]{2,1,0} transpose(%operand)
CHECK: %[[GATHER:.*]] = f32[42,3,1,2]{{.*}} gather(%[[OPERAND]],
CHECK-SAME: start_index_map={0,1,2},
CHECK: ROOT {{.*}} = f32[42,1,2,3]{{.*}} transpose(%[[GATHER]])
)");
}
TEST_F(GatherSimplifierTest, CollapsesSomeDims) {
constexpr absl::string_view kModuleStr = R"(
HloModule gather_simplifier
ENTRY kernel_entry {
operand = f32[33,34,35] parameter(0)
indices = s32[42,1] parameter(1)
ROOT gather = f32[7,42] gather(operand, indices),
offset_dims={0},
collapsed_slice_dims={0,2},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,7,1}
})";
RunAndFilecheckHloRewrite(kModuleStr, GatherSimplifier(), R"(
CHECK: %[[GATHER:.*]] = f32[42,1,7,1]{3,2,1,0} gather(
CHECK: %[[COLLAPSED:.*]] = f32[42,7]{1,0} reshape(%[[GATHER]])
CHECK: ROOT {{.*}} = f32[7,42]{1,0} transpose(%[[COLLAPSED]]),
CHECK-SAME: dimensions={1,0}
)");
}
TEST_F(GatherSimplifierTest, ZeroDimStartIndices) {
constexpr absl::string_view kModuleStr = R"(
HloModule gather_simplifier
ENTRY kernel_entry {
operand = f32[8,16] parameter(0)
indices = s32[2] parameter(1)
ROOT gather = f32[8,16] gather(f32[8,16] operand, s32[2] indices),
offset_dims={0,1},
collapsed_slice_dims={},
start_index_map={0,1},
index_vector_dim=0,
slice_sizes={8,16}
})";
RunAndFilecheckHloRewrite(kModuleStr, GatherSimplifier(), R"(
CHECK: gather(
)");
}
TEST_F(GatherSimplifierTest, ZeroSizeSlice) {
constexpr absl::string_view kModuleStr = R"(
HloModule gather_simplifier
ENTRY kernel_entry {
operand = f32[0,2] parameter(0)
indices = s32[3] parameter(1)
ROOT gather = f32[3,2] gather(f32[0,2] operand, s32[3]{0} indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={0,2}
})";
RunAndFilecheckHloRewrite(kModuleStr, GatherSimplifier(), R"(
CHECK: %[[ZERO:.*]] = f32[] constant(0)
CHECK: ROOT {{.*}} = f32[3,2]{1,0} broadcast(%[[ZERO]]), dimensions={}
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gather_simplifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gather_simplifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1d363942-2352-4331-8226-9a635b5b362f | cpp | tensorflow/tensorflow | collective_quantizer | third_party/xla/xla/service/collective_quantizer.cc | third_party/xla/xla/service/collective_quantizer_test.cc | #include "xla/service/collective_quantizer.h"
#include "xla/service/hlo_replication_analysis.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape_util.h"
namespace xla {
namespace {
namespace m = match;
struct ConversionSubgraph {
HloInstruction* convert = nullptr;
HloInstruction* binary = nullptr;
HloInstruction* clamp = nullptr;
HloInstruction* scale_bcast = nullptr;
std::vector<HloInstruction*> unaries;
};
template <typename... Args>
auto ScalarBroadcast(Args... args) {
return m::Broadcast(args...).WithPredicate([](const HloInstruction* instr) {
return ShapeUtil::IsScalar(instr->operand(0)->shape());
});
}
auto BitcastPreservesElementType() {
return m::Bitcast().WithPredicate([](const HloInstruction* instr) {
return ShapeUtil::SameElementType(instr->shape(),
instr->operand(0)->shape());
});
}
auto ConvertToNarrowerType() {
auto converts_to_narrower_type = [](const HloInstruction* instr) -> bool {
return ShapeUtil::ByteSizeOfPrimitiveType(instr->shape().element_type()) <
ShapeUtil::ByteSizeOfPrimitiveType(
instr->operand(0)->shape().element_type());
};
return m::Convert().WithPredicate(converts_to_narrower_type);
}
auto ConvertToWiderType() {
auto converts_to_wider_type = [](const HloInstruction* instr) -> bool {
return ShapeUtil::ByteSizeOfPrimitiveType(instr->shape().element_type()) >
ShapeUtil::ByteSizeOfPrimitiveType(
instr->operand(0)->shape().element_type());
};
return m::Convert().WithPredicate(converts_to_wider_type);
}
bool IsSupportedCollective(HloInstruction* instr) {
return instr->operand_count() == 1 &&
(instr->opcode() == HloOpcode::kAllGather ||
instr->opcode() == HloOpcode::kAllToAll ||
instr->opcode() == HloOpcode::kCollectiveBroadcast ||
instr->opcode() == HloOpcode::kCollectivePermute);
}
HloInstruction* ApplyUnaries(HloInstruction* instr,
const std::vector<HloInstruction*>& unaries) {
for (HloInstruction* unary : unaries) {
instr = instr->AddInstruction(unary->CloneWithNewOperands(
ShapeUtil::MakeShapeWithDenseLayout(
instr->shape().element_type(), unary->shape().dimensions(),
unary->shape().layout().minor_to_major()),
{instr}));
}
return instr;
}
absl::StatusOr<bool> InstrIsReplicated(HloModule* module,
HloInstruction* instr) {
if (module->config().replica_count() > 1) {
return false;
}
TF_ASSIGN_OR_RETURN(
auto replication_analysis,
HloReplicationAnalysis::Run(module,
true));
return replication_analysis->HloInstructionIsReplicatedAt(instr, {});
}
std::vector<HloInstruction*> FindDequantizationSubgraphRecursive(
HloInstruction* instr, absl::flat_hash_set<int>& visited_instrs,
std::vector<HloInstruction*> subgraph) {
if (!visited_instrs.emplace(instr->unique_id()).second) {
return {};
}
subgraph.emplace_back(instr);
if (Match(instr, ConvertToWiderType())) {
return subgraph;
}
if (instr->operand_count() == 1 || instr->opcode() == HloOpcode::kDivide) {
return FindDequantizationSubgraphRecursive(instr->mutable_operand(0),
visited_instrs, subgraph);
} else if (instr->opcode() == HloOpcode::kMultiply) {
for (HloInstruction* operand : instr->unique_operands()) {
auto binary_subgraph = FindDequantizationSubgraphRecursive(
operand, visited_instrs, subgraph);
if (!binary_subgraph.empty()) {
return binary_subgraph;
}
}
}
return {};
}
std::optional<ConversionSubgraph> IsSupportedDequantization(
HloInstruction* instr) {
ConversionSubgraph subgraph;
absl::flat_hash_set<int> visited_instrs;
std::vector<HloInstruction*> candidate_subgraph =
FindDequantizationSubgraphRecursive(instr, visited_instrs,
std::vector<HloInstruction*>{});
std::reverse(candidate_subgraph.begin(), candidate_subgraph.end());
if (candidate_subgraph.size() > 1 &&
(Match(
candidate_subgraph[1],
m::MultiplyAnyOrder(&subgraph.binary, m::Convert(&subgraph.convert),
ScalarBroadcast(&subgraph.scale_bcast))) ||
Match(candidate_subgraph[1],
m::Divide(&subgraph.binary, m::Convert(&subgraph.convert),
ScalarBroadcast(&subgraph.scale_bcast))))) {
subgraph.unaries = {candidate_subgraph.begin() + 2,
candidate_subgraph.end()};
} else if (candidate_subgraph.size() > 0 &&
Match(candidate_subgraph[0], m::Convert(&subgraph.convert))) {
subgraph.unaries = {candidate_subgraph.begin() + 1,
candidate_subgraph.end()};
} else {
VLOG(5) << "Did not find type conversion or dequantization pattern.";
return std::nullopt;
}
for (HloInstruction* unary : subgraph.unaries) {
if (!Match(unary, m::AnyOf<HloInstruction>(m::Bitcast(), m::Copy(),
m::Reshape(), m::Slice()))) {
VLOG(5) << "Unexpected instruction in unary ops.";
return std::nullopt;
}
}
return std::make_optional<ConversionSubgraph>(std::move(subgraph));
}
std::optional<ConversionSubgraph> IsSupportedQuantization(
HloInstruction* instr) {
ConversionSubgraph subgraph;
std::vector<HloInstruction*> ops;
while (instr->user_count() <= 1) {
if (Match(instr, m::AnyOf<HloInstruction>(
BitcastPreservesElementType(), m::Copy(), m::Reshape(),
m::Slice(), m::Multiply(), m::Divide(), m::Clamp()))) {
if (instr->user_count() > 0) {
ops.emplace_back(instr);
instr = instr->users()[0];
continue;
}
break;
}
if (Match(instr, ConvertToNarrowerType())) {
ops.emplace_back(instr);
break;
}
VLOG(5) << "Unsupported instruction.";
return std::nullopt;
}
if (ops.size() > 2 &&
(Match(
ops.back(),
m::Convert(&subgraph.convert,
m::Clamp(&subgraph.clamp, ScalarBroadcast(m::Constant()),
m::MultiplyAnyOrder(
&subgraph.binary, m::Op(),
ScalarBroadcast(&subgraph.scale_bcast)),
ScalarBroadcast(m::Constant())))) ||
Match(ops.back(),
m::Convert(
&subgraph.convert,
m::Clamp(&subgraph.clamp, ScalarBroadcast(m::Constant()),
m::Divide(&subgraph.binary, m::Op(),
ScalarBroadcast(&subgraph.scale_bcast)),
ScalarBroadcast(m::Constant())))))) {
subgraph.unaries = {ops.begin(), ops.end() - 3};
} else if (ops.size() > 0 &&
Match(ops.back(), m::Convert(&subgraph.convert))) {
subgraph.unaries = {ops.begin(), ops.end() - 1};
} else {
VLOG(5) << "Did not find type conversion or quantization pattern.";
return std::nullopt;
}
for (HloInstruction* unary : subgraph.unaries) {
if (!Match(unary, m::AnyOf<HloInstruction>(m::Bitcast(), m::Copy(),
m::Reshape(), m::Slice()))) {
VLOG(5) << "Unexpected instruction in unary ops.";
return std::nullopt;
}
}
return std::make_optional<ConversionSubgraph>(std::move(subgraph));
}
absl::Status MatchDequantization(HloInstruction* instr, bool* changed) {
std::optional<ConversionSubgraph> subgraph =
IsSupportedDequantization(instr->mutable_operand(0));
if (!subgraph.has_value()) {
return absl::OkStatus();
}
if (subgraph->scale_bcast) {
TF_ASSIGN_OR_RETURN(
bool scale_is_replicated,
InstrIsReplicated(instr->parent()->parent(), subgraph->scale_bcast));
if (!scale_is_replicated) {
return absl::OkStatus();
}
}
HloInstruction* new_coll_operand = subgraph->convert->mutable_operand(0);
new_coll_operand = ApplyUnaries(new_coll_operand, subgraph->unaries);
Shape new_coll_shape = ShapeUtil::ChangeElementType(
instr->shape(), new_coll_operand->shape().element_type());
HloInstruction* new_collective = instr->AddInstruction(
instr->CloneWithNewOperands(new_coll_shape, {new_coll_operand}));
Shape new_convert_shape = ShapeUtil::ChangeElementType(
new_collective->shape(), subgraph->convert->shape().element_type());
HloInstruction* new_convert =
instr->AddInstruction(subgraph->convert->CloneWithNewOperands(
new_convert_shape, {new_collective}));
HloInstruction* new_binary;
if (subgraph->binary) {
HloInstruction* new_scale_bcast = instr->AddInstruction(
subgraph->scale_bcast->CloneWithNewShape(new_convert->shape()));
new_binary = instr->AddInstruction(subgraph->binary->CloneWithNewOperands(
new_convert->shape(), {new_convert, new_scale_bcast}));
}
TF_RETURN_IF_ERROR(
instr->ReplaceAllUsesWith(subgraph->binary ? new_binary : new_convert));
*changed = true;
VLOG(5) << "Quantized collective " << new_collective->ToShortString();
return absl::OkStatus();
}
absl::Status MatchQuantization(HloInstruction* instr, bool* changed) {
std::optional<ConversionSubgraph> subgraph;
if (instr->user_count() == 1) {
subgraph = IsSupportedQuantization(instr->users()[0]);
}
if (!subgraph.has_value()) {
return absl::OkStatus();
}
if (subgraph->scale_bcast) {
TF_ASSIGN_OR_RETURN(
bool scale_is_replicated,
InstrIsReplicated(instr->parent()->parent(), subgraph->scale_bcast));
if (!scale_is_replicated) {
return absl::OkStatus();
}
}
HloInstruction* coll_operand = instr->mutable_operand(0);
HloInstruction *new_binary, *new_clamp;
if (subgraph->binary) {
HloInstruction* new_scale_bcast = instr->AddInstruction(
subgraph->scale_bcast->CloneWithNewShape(coll_operand->shape()));
new_binary = instr->AddInstruction(subgraph->binary->CloneWithNewOperands(
coll_operand->shape(), {coll_operand, new_scale_bcast}));
HloInstruction* new_clamp_lower = instr->AddInstruction(
subgraph->clamp->operand(0)->CloneWithNewShape(coll_operand->shape()));
HloInstruction* new_clamp_upper = instr->AddInstruction(
subgraph->clamp->operand(2)->CloneWithNewShape(coll_operand->shape()));
new_clamp = instr->AddInstruction(subgraph->clamp->CloneWithNewOperands(
coll_operand->shape(), {new_clamp_lower, new_binary, new_clamp_upper}));
}
Shape new_convert_shape = ShapeUtil::ChangeElementType(
coll_operand->shape(), subgraph->convert->shape().element_type());
HloInstruction* new_convert =
instr->AddInstruction(subgraph->convert->CloneWithNewOperands(
new_convert_shape, {subgraph->binary ? new_clamp : coll_operand}));
Shape new_collective_shape = ShapeUtil::ChangeElementType(
instr->shape(), subgraph->convert->shape().element_type());
HloInstruction* new_collective = instr->AddInstruction(
instr->CloneWithNewOperands(new_collective_shape, {new_convert}));
new_collective = ApplyUnaries(new_collective, subgraph->unaries);
TF_RETURN_IF_ERROR(subgraph->convert->ReplaceAllUsesWith(new_collective));
*changed = true;
VLOG(5) << "Quantized collective " << new_collective->ToShortString();
return absl::OkStatus();
}
}
absl::StatusOr<bool> CollectiveQuantizer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp : module->MakeComputationPostOrder()) {
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
if (IsSupportedCollective(instr)) {
TF_RETURN_IF_ERROR(MatchDequantization(instr, &changed));
TF_RETURN_IF_ERROR(MatchQuantization(instr, &changed));
}
}
}
return changed;
}
} | #include "xla/service/collective_quantizer.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_verifier.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class CollectiveQuantizerTest : public HloTestBase {
public:
absl::StatusOr<bool> RunCollectiveQuantizer(HloModule* module) {
CollectiveQuantizer collective_quantizer;
return collective_quantizer.Run(module, {});
}
};
TEST_F(CollectiveQuantizerTest, AllGatherConvert) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,4,8,128] parameter(0)
all-gather = bf16[8,32,8,128] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
ROOT convert = f8e4m3fn[8,32,8,128] convert(all-gather)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::AllGather(op::Convert(op::Parameter())));
HloInstruction* all_gather = module->entry_computation()->root_instruction();
EXPECT_THAT(all_gather->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, AllGatherConvertUnary) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,4,8,128] parameter(0)
all-gather = bf16[8,32,8,128] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
reshape = bf16[8,32,1024] reshape(all-gather)
slice = bf16[8,32,512] slice(reshape), slice={[0:8], [0:32], [256:768]}
ROOT convert = f8e4m3fn[8,32,512] convert(slice)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Slice(op::Reshape(op::AllGather(op::Convert(op::Parameter())))));
HloInstruction* all_gather = module->entry_computation()->root_instruction();
EXPECT_THAT(all_gather->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, AllGatherQuantize) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,4,8,128] parameter(0)
all-gather = bf16[8,32,8,128] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
scale = bf16[] parameter(1), sharding={replicated}
scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={}
divide = bf16[8,32,8,128] divide(all-gather, scale_bcast)
clamp_lower = bf16[] constant(-448.0)
clamp_lower_bcast = bf16[8,32,8,128] broadcast(clamp_lower), dimensions={}
clamp_upper = bf16[] constant(448.0)
clamp_upper_bcast = bf16[8,32,8,128] broadcast(clamp_upper), dimensions={}
clamp = bf16[8,32,8,128] clamp(clamp_lower_bcast, divide, clamp_upper_bcast)
ROOT convert = f8e4m3fn[8,32,8,128] convert(clamp)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::AllGather(op::Convert(op::Clamp(
op::Broadcast(), op::Divide(op::Parameter(), op::Broadcast()),
op::Broadcast()))));
HloInstruction* all_gather = module->entry_computation()->root_instruction();
EXPECT_THAT(all_gather->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, AllToAllQuantize) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,32,8,128] parameter(0)
all-to-all = bf16[8,32,8,128] all-to-all(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
scale = bf16[] parameter(1), sharding={replicated}
scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={}
divide = bf16[8,32,8,128] divide(all-to-all, scale_bcast)
clamp_lower = bf16[] constant(-448.0)
clamp_lower_bcast = bf16[8,32,8,128] broadcast(clamp_lower), dimensions={}
clamp_upper = bf16[] constant(448.0)
clamp_upper_bcast = bf16[8,32,8,128] broadcast(clamp_upper), dimensions={}
clamp = bf16[8,32,8,128] clamp(clamp_lower_bcast, divide, clamp_upper_bcast)
ROOT convert = f8e4m3fn[8,32,8,128] convert(clamp)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::AllToAll(op::Convert(op::Clamp(
op::Broadcast(), op::Divide(op::Parameter(), op::Broadcast()),
op::Broadcast()))));
HloInstruction* all_to_all = module->entry_computation()->root_instruction();
EXPECT_THAT(all_to_all->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, CollectiveBroadcastQuantize) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,32,8,128] parameter(0)
collective-broadcast = bf16[8,32,8,128] collective-broadcast(param), replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
scale = bf16[] parameter(1), sharding={replicated}
scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={}
divide = bf16[8,32,8,128] divide(collective-broadcast, scale_bcast)
clamp_lower = bf16[] constant(-448.0)
clamp_lower_bcast = bf16[8,32,8,128] broadcast(clamp_lower), dimensions={}
clamp_upper = bf16[] constant(448.0)
clamp_upper_bcast = bf16[8,32,8,128] broadcast(clamp_upper), dimensions={}
clamp = bf16[8,32,8,128] clamp(clamp_lower_bcast, divide, clamp_upper_bcast)
ROOT convert = f8e4m3fn[8,32,8,128] convert(clamp)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CollectiveBroadcast(op::Convert(op::Clamp(
op::Broadcast(), op::Divide(op::Parameter(), op::Broadcast()),
op::Broadcast()))));
HloInstruction* collective_broadcast =
module->entry_computation()->root_instruction();
EXPECT_THAT(collective_broadcast->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, CollectivePermuteQuantize) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,32,8,128] parameter(0)
collective-permute = bf16[8,32,8,128] collective-permute(param), source_target_pairs={{0,1},{2,3},{4,5},{6,7}}, channel_id=1
scale = bf16[] parameter(1), sharding={replicated}
scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={}
divide = bf16[8,32,8,128] divide(collective-permute, scale_bcast)
clamp_lower = bf16[] constant(-448.0)
clamp_lower_bcast = bf16[8,32,8,128] broadcast(clamp_lower), dimensions={}
clamp_upper = bf16[] constant(448.0)
clamp_upper_bcast = bf16[8,32,8,128] broadcast(clamp_upper), dimensions={}
clamp = bf16[8,32,8,128] clamp(clamp_lower_bcast, divide, clamp_upper_bcast)
ROOT convert = f8e4m3fn[8,32,8,128] convert(clamp)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CollectivePermute(op::Convert(op::Clamp(
op::Broadcast(), op::Divide(op::Parameter(), op::Broadcast()),
op::Broadcast()))));
HloInstruction* collective_permute =
module->entry_computation()->root_instruction();
EXPECT_THAT(collective_permute->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, AllGatherQuantizeUnary) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,4,8,128] parameter(0)
all-gather = bf16[8,32,8,128] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
reshape = bf16[8,32,1024] reshape(all-gather)
slice = bf16[8,32,512] slice(reshape), slice={[0:8], [0:32], [256:768]}
scale = bf16[] parameter(1), sharding={replicated}
scale_bcast = bf16[8,32,512] broadcast(scale), dimensions={}
divide = bf16[8,32,512] divide(slice, scale_bcast)
clamp_lower = bf16[] constant(-448.0)
clamp_lower_bcast = bf16[8,32,512] broadcast(clamp_lower), dimensions={}
clamp_upper = bf16[] constant(448.0)
clamp_upper_bcast = bf16[8,32,512] broadcast(clamp_upper), dimensions={}
clamp = bf16[8,32,512] clamp(clamp_lower_bcast, divide, clamp_upper_bcast)
ROOT convert = f8e4m3fn[8,32,512] convert(clamp)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Slice(op::Reshape(op::AllGather(op::Convert(op::Clamp(
op::Broadcast(), op::Divide(op::Parameter(), op::Broadcast()),
op::Broadcast()))))));
HloInstruction* slice = module->entry_computation()->root_instruction();
EXPECT_THAT(slice->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, AllGatherQuantizeMultiUser) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,4,8,128] parameter(0)
all-gather = bf16[8,32,8,128] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
scale = bf16[] parameter(1), sharding={replicated}
scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={}
divide = bf16[8,32,8,128] divide(all-gather, scale_bcast)
clamp_lower = bf16[] constant(-448.0)
clamp_lower_bcast = bf16[8,32,8,128] broadcast(clamp_lower), dimensions={}
clamp_upper = bf16[] constant(448.0)
clamp_upper_bcast = bf16[8,32,8,128] broadcast(clamp_upper), dimensions={}
clamp = bf16[8,32,8,128] clamp(clamp_lower_bcast, divide, clamp_upper_bcast)
add = bf16[8,32,8,128] add(divide, clamp)
ROOT convert = f8e4m3fn[8,32,8,128] convert(add)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectiveQuantizerTest, AllGatherQuantizeNonReplicatedScale) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,4,8,128] parameter(0)
all-gather = bf16[8,32,8,128] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
scale = bf16[] parameter(1)
scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={}
divide = bf16[8,32,8,128] divide(all-gather, scale_bcast)
clamp_lower = bf16[] constant(-448.0)
clamp_lower_bcast = bf16[8,32,8,128] broadcast(clamp_lower), dimensions={}
clamp_upper = bf16[] constant(448.0)
clamp_upper_bcast = bf16[8,32,8,128] broadcast(clamp_upper), dimensions={}
clamp = bf16[8,32,8,128] clamp(clamp_lower_bcast, divide, clamp_upper_bcast)
ROOT convert = f8e4m3fn[8,32,8,128] convert(clamp)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectiveQuantizerTest, ConvertAllGather) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = f8e4m3fn[8,4,8,128] parameter(0)
convert = bf16[8,4,8,128] convert(param)
ROOT all-gather = bf16[8,32,8,128] all-gather(convert), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Convert(op::AllGather(op::Parameter())));
const HloInstruction* all_gather =
module->entry_computation()->root_instruction()->operand(0);
EXPECT_THAT(all_gather->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, ConvertAllGatherUnary) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = f8e4m3fn[8,4,8,128] parameter(0)
convert = bf16[8,4,8,128] convert(param)
reshape = bf16[8,4,1024] reshape(convert)
slice = bf16[8,4,512] slice(reshape), slice={[0:8], [0:4], [256:768]}
ROOT all-gather = bf16[8,32,512] all-gather(slice), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Convert(op::AllGather(op::Slice(op::Reshape(op::Parameter())))));
const HloInstruction* all_gather =
module->entry_computation()->root_instruction()->operand(0);
EXPECT_THAT(all_gather->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, DequantizeAllGather) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = f8e4m3fn[8,4,8,128] parameter(0)
convert = bf16[8,4,8,128] convert(param)
scale = bf16[] parameter(1), sharding={replicated}
scale_bcast = bf16[8,4,8,128] broadcast(scale), dimensions={}
multiply = bf16[8,4,8,128] multiply(convert, scale_bcast)
ROOT all-gather = bf16[8,32,8,128] all-gather(multiply), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Multiply(op::Convert(op::AllGather(op::Parameter())),
op::Broadcast()));
const HloInstruction* all_gather =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(all_gather->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, DequantizeAllToAll) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = f8e4m3fn[8,32,8,128] parameter(0)
convert = bf16[8,32,8,128] convert(param)
scale = bf16[] parameter(1), sharding={replicated}
scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={}
multiply = bf16[8,32,8,128] multiply(convert, scale_bcast)
ROOT all-to-all = bf16[8,32,8,128] all-to-all(multiply), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Multiply(op::Convert(op::AllToAll(op::Parameter())),
op::Broadcast()));
const HloInstruction* all_to_all =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(all_to_all->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, DequantizeCollectiveBroadcast) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = f8e4m3fn[8,32,8,128] parameter(0)
convert = bf16[8,32,8,128] convert(param)
scale = bf16[] parameter(1), sharding={replicated}
scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={}
multiply = bf16[8,32,8,128] multiply(convert, scale_bcast)
ROOT collective-broadcast = bf16[8,32,8,128] collective-broadcast(multiply), replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Multiply(op::Convert(op::CollectiveBroadcast(op::Parameter())),
op::Broadcast()));
const HloInstruction* collective_broadcast =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(collective_broadcast->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, DequantizeCollectivePermute) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = f8e4m3fn[8,32,8,128] parameter(0)
convert = bf16[8,32,8,128] convert(param)
scale = bf16[] parameter(1), sharding={replicated}
scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={}
multiply = bf16[8,32,8,128] multiply(convert, scale_bcast)
ROOT collective-permute = bf16[8,32,8,128] collective-permute(multiply), source_target_pairs={{0,1},{2,3},{4,5},{6,7}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Multiply(op::Convert(op::CollectivePermute(op::Parameter())),
op::Broadcast()));
const HloInstruction* collective_permute =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(collective_permute->shape().element_type(), F8E4M3FN);
}
TEST_F(CollectiveQuantizerTest, DequantizeAllGatherUnary) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = f8e4m3fn[8,4,8,128] parameter(0)
convert = bf16[8,4,8,128] convert(param)
scale = bf16[] parameter(1), sharding={replicated}
scale_bcast = bf16[8,4,8,128] broadcast(scale), dimensions={}
multiply = bf16[8,4,8,128] multiply(convert, scale_bcast)
reshape = bf16[8,4,1024] reshape(multiply)
slice = bf16[8,4,512] slice(reshape), slice={[0:8], [0:4], [256:768]}
ROOT all-gather = bf16[8,32,512] all-gather(slice), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Multiply(
op::Convert(op::AllGather(op::Slice(op::Reshape(op::Parameter())))),
op::Broadcast()));
HloInstruction* all_gather = module->entry_computation()
->root_instruction()
->mutable_operand(0)
->mutable_operand(0);
EXPECT_THAT(all_gather->shape().element_type(), F8E4M3FN);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_quantizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_quantizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9a20636e-d0bb-4a0d-8c31-35ca2b50c535 | cpp | tensorflow/tensorflow | while_loop_simplifier | third_party/xla/xla/service/while_loop_simplifier.cc | third_party/xla/xla/service/while_loop_simplifier_test.cc | #include "xla/service/while_loop_simplifier.h"
#include <cstdint>
#include <optional>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/call_inliner.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/while_loop_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/union_find.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace m = match;
using hlo_query::ContainsInstrWithOpcode;
using std::optional;
static absl::StatusOr<bool> TryRemoveTrivialCompare(HloInstruction* while_op) {
std::optional<int64_t> indvar_index = GetLoopInductionVarTupleIdx(while_op);
if (indvar_index.has_value()) {
if (while_op->operand(0)->operand(*indvar_index)->IsConstant()) {
const HloConstantInstruction* init_value_hlo =
Cast<HloConstantInstruction>(
while_op->operand(0)->operand(*indvar_index));
std::optional<int64_t> trip_count = MatchTrivialLoopTripCount(
while_op, indvar_index.value(), init_value_hlo->literal());
if (trip_count.has_value()) {
std::optional<int64_t> init_value =
LiteralUtil::LiteralAsScalarInt64(init_value_hlo->literal());
for (HloInstruction* body_instr :
while_op->while_body()->instructions()) {
HloInstruction* constant;
if (Match(body_instr,
m::Compare(m::GetTupleElement(m::Parameter(),
indvar_index.value()),
m::Constant(&constant).IsConstantScalar()))) {
std::optional<int64_t> constant_value =
LiteralUtil::LiteralAsScalarInt64(constant->literal());
if (constant_value.has_value()) {
if (constant_value.value() <= init_value.value()) {
if (body_instr->comparison_direction() ==
ComparisonDirection::kLt) {
TF_RETURN_IF_ERROR(while_op->while_body()->ReplaceInstruction(
body_instr, MakeScalarLike(body_instr, false)));
return true;
} else if (body_instr->comparison_direction() ==
ComparisonDirection::kGt) {
TF_RETURN_IF_ERROR(while_op->while_body()->ReplaceInstruction(
body_instr, MakeScalarLike(body_instr, true)));
return true;
}
}
if (constant_value.value() >=
init_value.value() + trip_count.value()) {
if (body_instr->comparison_direction() ==
ComparisonDirection::kLt) {
TF_RETURN_IF_ERROR(while_op->while_body()->ReplaceInstruction(
body_instr, MakeScalarLike(body_instr, true)));
return true;
} else if (body_instr->comparison_direction() ==
ComparisonDirection::kGt) {
TF_RETURN_IF_ERROR(while_op->while_body()->ReplaceInstruction(
body_instr, MakeScalarLike(body_instr, false)));
return true;
}
}
}
}
}
}
}
}
return false;
}
void CopyFrontendAttributes(HloInstruction* old_while_op,
HloInstruction* new_while_op) {
new_while_op->add_frontend_attributes(old_while_op->frontend_attributes());
}
void CopyMetadata(HloInstruction* old_while_op, HloInstruction* new_while_op) {
new_while_op->set_metadata(old_while_op->metadata());
}
static absl::StatusOr<HloInstruction*> RemoveDeadTupleIndices(
HloInstruction* while_op, absl::flat_hash_set<int64_t>& used_tuple_indices,
int64_t index_for_replaced = -1) {
std::vector<int64_t> new_to_old_tuple_idx(used_tuple_indices.begin(),
used_tuple_indices.end());
absl::c_sort(new_to_old_tuple_idx);
HloModule* module = while_op->GetModule();
HloComputation* computation = while_op->parent();
HloInstruction* while_init = while_op->mutable_operand(0);
HloComputation* while_cond = while_op->while_condition();
HloComputation* while_body = while_op->while_body();
HloInstruction* while_body_root = while_body->root_instruction();
auto print_no_metadata = HloPrintOptions().set_print_metadata(false);
absl::flat_hash_map<int64_t, int64_t> old_to_new_tuple_idx;
for (int64_t new_idx = 0; new_idx < new_to_old_tuple_idx.size(); ++new_idx) {
int64_t old_idx = new_to_old_tuple_idx[new_idx];
old_to_new_tuple_idx[old_idx] = new_idx;
VLOG(2) << "Remapping tuple index " << old_idx << " to " << new_idx;
}
std::vector<const Shape*> new_while_tuple_elem_shapes;
new_while_tuple_elem_shapes.reserve(new_to_old_tuple_idx.size());
for (int64_t old_idx : new_to_old_tuple_idx) {
new_while_tuple_elem_shapes.push_back(
&while_init->shape().tuple_shapes(old_idx));
}
Shape new_while_shape =
ShapeUtil::MakeTupleShapeWithPtrs(new_while_tuple_elem_shapes);
auto make_while_computation_replacements = [&](const HloComputation* comp) {
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
auto* param = comp->parameter_instruction(0);
replacements.emplace(param, HloInstruction::CreateParameter(
0, new_while_shape, param->name()));
std::vector<HloInstruction*> materialized_users(param->users().begin(),
param->users().end());
for (const auto* user : materialized_users) {
if (user == while_body_root) {
continue;
}
CHECK_EQ(user->opcode(), HloOpcode::kGetTupleElement)
<< user->ToString(print_no_metadata);
int64_t old_idx = user->tuple_index();
auto new_idx_iter = old_to_new_tuple_idx.find(old_idx);
if (new_idx_iter != old_to_new_tuple_idx.end()) {
replacements.emplace(
user, HloInstruction::CreateGetTupleElement(user->shape(), param,
new_idx_iter->second));
} else {
replacements.emplace(user, nullptr);
}
}
for (const auto* hlo : comp->MakeInstructionPostOrder()) {
if (hlo == comp->root_instruction() || replacements.contains(hlo)) {
continue;
}
for (const auto* operand : hlo->operands()) {
auto op_it = replacements.find(operand);
if (op_it != replacements.end() && op_it->second == nullptr) {
replacements[hlo] = nullptr;
break;
}
}
}
return replacements;
};
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
while_cond_replacements = make_while_computation_replacements(while_cond);
std::unique_ptr<HloComputation> new_while_cond =
while_cond->CloneWithReplacements(&while_cond_replacements);
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
while_body_replacements = make_while_computation_replacements(while_body);
std::vector<HloInstruction*> new_while_body_root_elems;
new_while_body_root_elems.reserve(new_to_old_tuple_idx.size());
for (int64_t old_idx : new_to_old_tuple_idx) {
new_while_body_root_elems.push_back(
while_body_root->mutable_operand(old_idx));
}
while_body_replacements.emplace(
while_body_root, HloInstruction::CreateTuple(new_while_body_root_elems));
std::unique_ptr<HloComputation> new_while_body =
while_body->CloneWithReplacements(&while_body_replacements);
std::vector<HloInstruction*> new_while_init_elems;
new_while_init_elems.reserve(new_to_old_tuple_idx.size());
for (int64_t old_idx : new_to_old_tuple_idx) {
new_while_init_elems.push_back(
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
while_init->shape().tuple_shapes(old_idx), while_init, old_idx)));
}
auto* new_while_init = computation->AddInstruction(
HloInstruction::CreateTuple(new_while_init_elems));
auto* new_while_op = computation->AddInstruction(HloInstruction::CreateWhile(
new_while_shape,
module->AddEmbeddedComputation(std::move(new_while_cond)),
module->AddEmbeddedComputation(std::move(new_while_body)),
new_while_init));
new_while_op->CopyBackendConfigFrom(while_op);
CopyFrontendAttributes(while_op, new_while_op);
CopyMetadata(while_op, new_while_op);
std::vector<HloInstruction*> new_tuple_elems;
const int64_t tuple_size = ShapeUtil::TupleElementCount(while_init->shape());
for (int64_t old_idx = 0; old_idx < tuple_size; ++old_idx) {
auto new_tuple_idx_it = old_to_new_tuple_idx.find(old_idx);
if (new_tuple_idx_it != old_to_new_tuple_idx.end() ||
index_for_replaced != -1) {
int64_t gte_idx = new_tuple_idx_it != old_to_new_tuple_idx.end()
? new_tuple_idx_it->second
: index_for_replaced;
new_tuple_elems.push_back(
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
new_while_op->shape().tuple_shapes(gte_idx), new_while_op,
gte_idx)));
} else {
new_tuple_elems.push_back(
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
while_init->shape().tuple_shapes(old_idx), while_init, old_idx)));
}
}
HloInstruction* new_tuple =
computation->AddInstruction(HloInstruction::CreateTuple(new_tuple_elems));
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(while_op, new_tuple));
return new_while_op;
}
absl::StatusOr<bool> TryRemoveDeadWhileParams(HloInstruction* while_op) {
CHECK_EQ(while_op->opcode(), HloOpcode::kWhile);
if (!while_op->parent()->IsSafelyRemovable(while_op)) {
VLOG(2) << "Can't remove dead parameters from non-removable while op.";
return false;
}
HloInstruction* while_init = while_op->mutable_operand(0);
HloComputation* while_cond = while_op->while_condition();
HloComputation* while_body = while_op->while_body();
HloInstruction* while_body_root = while_body->root_instruction();
if (!while_init->shape().IsTuple()) {
VLOG(2) << "While op's carried value isn't tuple shaped.";
return false;
}
if (while_body_root->opcode() != HloOpcode::kTuple) {
VLOG(2) << "While body's root is not a tuple(...) instruction.";
return false;
}
const int64_t tuple_size = ShapeUtil::TupleElementCount(while_init->shape());
auto print_no_metadata = HloPrintOptions().set_print_metadata(false);
absl::flat_hash_set<int64_t> used_tuple_indices;
for (int64_t i = 0; i < tuple_size; ++i) {
used_tuple_indices.insert(i);
}
for (const HloInstruction* instr : {while_body->parameter_instruction(0),
while_cond->parameter_instruction(0)}) {
for (const HloInstruction* user : instr->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
VLOG(2) << "Cowardly refusing to analyze while loop with "
<< instr->ToString(print_no_metadata)
<< " used by non-GTE instruction "
<< user->ToString(print_no_metadata) << " in computation "
<< instr->parent()->name();
return false;
}
}
}
if (tuple_size == 0) {
VLOG(2) << "Can't remove elements from while loop's tuple -- it's already "
"empty.";
return false;
}
absl::flat_hash_set<int64_t> used_indices_after_loop;
if (while_op == while_op->parent()->root_instruction()) {
for (int64_t i = 0; i < while_body_root->operand_count(); ++i) {
used_indices_after_loop.insert(i);
}
}
for (auto user : while_op->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
for (int64_t i = 0; i < while_body_root->operand_count(); ++i) {
used_indices_after_loop.insert(i);
}
break;
}
used_indices_after_loop.insert(user->tuple_index());
}
struct InputIndicesSet {
void Merge(const InputIndicesSet& other) {
if (all.size() + other.all.size() <= all.capacity() && owned == nullptr) {
absl::c_copy(other.all, std::back_inserter(all));
return;
}
if (owned == nullptr) {
owned = std::make_unique<absl::flat_hash_set<int64_t>>();
owned->reserve(other.all.front()->size() * 2);
}
for (auto* deps : all) {
if (deps == owned.get()) {
continue;
}
owned->insert(deps->begin(), deps->end());
}
for (auto* deps : other.all) {
owned->insert(deps->begin(), deps->end());
}
all.clear();
all.push_back(owned.get());
}
void Add(int64_t index) {
if (owned == nullptr) {
CHECK(all.empty());
owned = std::make_unique<absl::flat_hash_set<int64_t>>();
all.push_back(owned.get());
}
owned->insert(index);
}
std::unique_ptr<absl::flat_hash_set<int64_t>> owned;
absl::InlinedVector<const absl::flat_hash_set<int64_t>*, 4> all;
};
absl::flat_hash_map<HloInstruction*, InputIndicesSet> inst_input_deps;
absl::flat_hash_map<HloInstruction*, UnionFind<HloInstruction*>>
disjoint_sets;
for (HloComputation* comp : {while_body, while_cond}) {
HloInstruction* while_input = comp->parameter_instruction(0);
for (HloInstruction* inst : comp->instructions()) {
if (inst == while_input || inst == while_body_root) {
continue;
}
disjoint_sets[inst].Get() = inst;
}
}
absl::flat_hash_set<int64_t> side_effecting_indices;
for (HloComputation* comp : {while_body, while_cond}) {
HloInstruction* while_input = comp->parameter_instruction(0);
for (HloInstruction* inst : comp->MakeInstructionPostOrder()) {
if (inst == while_input || inst == while_body_root) {
continue;
}
auto& deps = inst_input_deps[inst];
auto& my_set = disjoint_sets[inst];
if (inst->opcode() == HloOpcode::kGetTupleElement &&
inst->operand(0) == while_input) {
deps.Add(inst->tuple_index());
HloInstruction* output =
while_body_root->mutable_operand(inst->tuple_index());
if (output != inst) {
disjoint_sets[output].Merge(&my_set);
}
} else {
for (HloInstruction* operand : inst->operands()) {
disjoint_sets[operand].Merge(&my_set);
deps.Merge(inst_input_deps[operand]);
}
}
if (inst->HasSideEffect() || inst == while_cond->root_instruction()) {
for (auto* dep : deps.all) {
side_effecting_indices.insert(dep->begin(), dep->end());
}
}
}
}
absl::flat_hash_set<int64_t> indices_affecting_others;
for (int64_t i = 0; i < tuple_size; ++i) {
HloInstruction* output = while_body_root->mutable_operand(i);
for (auto* deps : inst_input_deps[output].all) {
for (int64_t index : *deps) {
if (index != i) {
indices_affecting_others.insert(index);
}
}
}
}
for (int64_t i = 0; i < tuple_size; ++i) {
if (!indices_affecting_others.contains(i) &&
!used_indices_after_loop.contains(i) &&
!side_effecting_indices.contains(i)) {
VLOG(2) << "Remove with dependencies " << i;
used_tuple_indices.erase(i);
}
}
absl::flat_hash_map<HloInstruction*, absl::flat_hash_set<int64_t>> groups;
for (int64_t i = 0; i < tuple_size; ++i) {
HloInstruction* output = while_body_root->mutable_operand(i);
groups[disjoint_sets[output].Get()].insert(i);
}
for (HloComputation* comp : {while_body, while_cond}) {
HloInstruction* while_input = comp->parameter_instruction(0);
for (HloInstruction* gte : while_input->users()) {
groups[disjoint_sets[gte].Get()].insert(gte->tuple_index());
}
}
for (const auto& group : groups) {
if (absl::c_any_of(group.second, [&](int64_t index) {
const HloInstruction* output = while_body_root->operand(index);
return side_effecting_indices.contains(index) ||
(used_indices_after_loop.contains(index) &&
!(output->opcode() == HloOpcode::kGetTupleElement &&
output->operand(0) ==
while_body->parameter_instruction(0) &&
output->tuple_index() == index));
})) {
continue;
}
VLOG(2) << "Remove with groups:";
for (int64_t index : group.second) {
VLOG(2) << " index " << index;
used_tuple_indices.erase(index);
}
}
if (used_tuple_indices.size() == tuple_size) {
VLOG(2) << "Loop " << while_op->ToString(print_no_metadata)
<< " uses all of its inputs; no simplification possible.";
return false;
}
CHECK_LT(used_tuple_indices.size(), tuple_size);
VLOG(1) << "Eliminating " << tuple_size - used_tuple_indices.size()
<< " elements from tuple of "
<< while_op->ToString(print_no_metadata);
TF_ASSIGN_OR_RETURN(while_op,
RemoveDeadTupleIndices(while_op, used_tuple_indices));
return true;
}
static absl::StatusOr<HloInstruction*> TryRemoveRepeatedWhileTupleIndicesHelper(
HloInstruction* while_op, const int64_t tuple_index, bool replace_with_init,
absl::flat_hash_set<int64_t>& duplicates) {
HloComputation* while_cond = while_op->while_condition();
HloComputation* while_body = while_op->while_body();
HloInstruction* while_init = while_op->mutable_operand(0);
VLOG(2) << "while_init " << while_init->ToString() << " operands "
<< while_init->operand_count();
VLOG(2) << "while_body_root " << while_body->root_instruction()->ToString()
<< " operands " << while_body->root_instruction()->operand_count();
for (HloComputation* comp : {while_body, while_cond}) {
auto new_get = comp->AddInstruction(HloInstruction::CreateGetTupleElement(
comp->parameter_instruction(0)->shape().tuple_shapes(tuple_index),
comp->parameter_instruction(0), tuple_index));
std::vector<HloInstruction*> instrs_to_replace;
for (auto* instr : comp->instructions()) {
if (instr->opcode() == HloOpcode::kGetTupleElement &&
duplicates.contains(instr->tuple_index()) &&
instr->operand(0) == comp->parameter_instruction(0)) {
instrs_to_replace.push_back(instr);
}
}
for (auto instr : instrs_to_replace) {
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(instr, new_get));
}
}
absl::flat_hash_set<int64_t> used_tuple_indices;
for (int index = 0; index < while_init->shape().tuple_shapes_size();
++index) {
if (!duplicates.count(index)) {
used_tuple_indices.insert(index);
}
}
TF_ASSIGN_OR_RETURN(
while_op, RemoveDeadTupleIndices(while_op, used_tuple_indices,
replace_with_init ? -1 : tuple_index));
return while_op;
}
static bool IsDynamicUpdateSliceWhileInsertion(
const HloInstruction* instr, const HloComputation* while_body) {
return instr->opcode() == HloOpcode::kDynamicUpdateSlice &&
instr->operand(0)->opcode() == HloOpcode::kGetTupleElement &&
instr->operand(0)->operand(0) == while_body->parameter_instruction(0);
}
static absl::StatusOr<bool> TryRemoveRepeatedWhileTupleIndices(
HloInstruction* while_op) {
CHECK_EQ(while_op->opcode(), HloOpcode::kWhile);
int index_to_investigate = 0;
if (!while_op->parent()->IsSafelyRemovable(while_op)) {
VLOG(2) << "Can't remove dead parameters from non-removable while op.";
return false;
}
HloInstruction* while_init = while_op->mutable_operand(0);
HloComputation* while_cond = while_op->while_condition();
HloComputation* while_body = while_op->while_body();
HloInstruction* while_body_root = while_body->root_instruction();
if (!while_init->shape().IsTuple()) {
VLOG(2) << "While op's carried value isn't tuple shaped.";
return false;
}
bool changed = false;
while (index_to_investigate < while_init->shape().tuple_shapes_size()) {
if (!while_init->shape().IsTuple() ||
while_init->opcode() != HloOpcode::kTuple) {
VLOG(2) << "While op's carried value isn't tuple shaped.";
return false;
}
if (while_body_root->opcode() != HloOpcode::kTuple) {
VLOG(2) << "While body's root is not a tuple(...) instruction.";
return false;
}
auto& while_shape = while_init->shape();
VLOG(2) << "Iterating " << index_to_investigate;
absl::flat_hash_set<int64_t> duplicates;
auto* pivot_init_elem = while_init->operand(index_to_investigate);
auto* pivot_body_elem = while_body_root->operand(index_to_investigate);
bool replace_with_init = true;
if (pivot_body_elem->opcode() == HloOpcode::kGetTupleElement &&
pivot_body_elem->operand(0) == while_body->parameter_instruction(0)) {
if (pivot_body_elem->tuple_index() != index_to_investigate) {
VLOG(2) << "Mismatch between pivot_body_elem->tuple_index() "
<< pivot_body_elem->tuple_index() << " index_to_investigate "
<< index_to_investigate;
index_to_investigate++;
continue;
}
} else if (IsDynamicUpdateSliceWhileInsertion(pivot_body_elem,
while_body)) {
if (pivot_body_elem->operand(0)->tuple_index() != index_to_investigate) {
VLOG(2)
<< "Mismatch between pivot_body_elem->operand(0)->tuple_index() "
<< pivot_body_elem->operand(0)->tuple_index()
<< " index_to_investigate " << index_to_investigate;
index_to_investigate++;
continue;
}
} else {
index_to_investigate++;
continue;
}
for (int64_t i = index_to_investigate + 1;
i < while_shape.tuple_shapes_size(); ++i) {
auto* init_elem = while_init->operand(i);
auto* body_elem = while_body_root->operand(i);
if (pivot_body_elem->opcode() == HloOpcode::kGetTupleElement &&
body_elem->opcode() == HloOpcode::kGetTupleElement &&
body_elem->operand(0) == while_body->parameter_instruction(0)) {
if (body_elem->tuple_index() != i) {
VLOG(2) << "Mismatch between body_elem->tuple_index() "
<< body_elem->tuple_index() << " i " << i;
continue;
}
} else if (IsDynamicUpdateSliceWhileInsertion(pivot_body_elem,
while_body) &&
IsDynamicUpdateSliceWhileInsertion(body_elem, while_body)) {
if (pivot_body_elem->operand_count() != body_elem->operand_count()) {
VLOG(2) << "Mismatch in operand count of dynamic-update-slice "
<< pivot_body_elem->operand_count() << " vs "
<< body_elem->operand_count();
continue;
}
if (body_elem->operand(0)->tuple_index() != i) {
VLOG(2) << "Mismatch between body_elem->operand(0)->tuple_index() "
<< body_elem->tuple_index() << " i " << i;
continue;
}
if (pivot_body_elem->operand(0) == body_elem->operand(0)) {
VLOG(2) << "Inserting in the same input index";
continue;
}
bool mismatch = false;
for (int64_t i = 1; i < body_elem->operand_count(); ++i) {
if (body_elem->operand(i) != pivot_body_elem->operand(i)) {
VLOG(2) << "Mismatch in insertion indices or values";
mismatch = true;
break;
}
}
if (mismatch) {
continue;
}
replace_with_init = false;
} else {
continue;
}
if (pivot_init_elem == init_elem) {
VLOG(2) << "init_elem " << init_elem->ToString() << " pivot_init_elem "
<< pivot_init_elem->ToString();
VLOG(2) << "body_elem " << body_elem->ToString() << " pivot_body_elem "
<< pivot_body_elem->ToString();
duplicates.insert(i);
}
}
if (!duplicates.empty()) {
VLOG(2) << "Duplicate found " << duplicates.size() << " pivot_init "
<< pivot_init_elem->ToString();
TF_ASSIGN_OR_RETURN(while_op, TryRemoveRepeatedWhileTupleIndicesHelper(
while_op, index_to_investigate,
replace_with_init, duplicates));
changed = true;
VLOG(2) << "Changed while_op " << while_op->ToString()
<< " while_op operand count " << while_op->operand_count();
while_init = while_op->mutable_operand(0);
while_cond = while_op->while_condition();
while_body = while_op->while_body();
while_body_root = while_body->root_instruction();
}
index_to_investigate++;
}
return changed;
}
static absl::StatusOr<bool> TryRemoveConstantParams(HloInstruction* while_op) {
HloModule* module = while_op->GetModule();
HloComputation* computation = while_op->parent();
auto* while_init = while_op->mutable_operand(0);
auto* while_body = while_op->while_body();
auto* while_cond = while_op->while_condition();
auto* while_body_root = while_body->root_instruction();
if (while_init->opcode() != HloOpcode::kTuple ||
while_body_root->opcode() != HloOpcode::kTuple) {
return false;
}
TF_RET_CHECK(while_cond->num_parameters() == 1);
TF_RET_CHECK(while_body->num_parameters() == 1);
TF_RET_CHECK(
ShapeUtil::Compatible(while_init->shape(), while_body_root->shape()));
absl::flat_hash_set<int64_t> constant_tuple_indices;
const auto& while_shape = while_init->shape();
for (int i = 0; i < while_shape.tuple_shapes_size(); ++i) {
auto* init_elem = while_init->operand(i);
auto* body_elem = while_body_root->operand(i);
if (init_elem->opcode() == HloOpcode::kConstant &&
body_elem->opcode() == HloOpcode::kConstant &&
init_elem->literal() == body_elem->literal()) {
constant_tuple_indices.insert(i);
}
}
if (constant_tuple_indices.empty()) {
return false;
}
std::vector<const Shape*> new_while_shape_elems;
for (int i = 0; i < while_shape.tuple_shapes_size(); ++i) {
if (!constant_tuple_indices.count(i)) {
new_while_shape_elems.push_back(&while_shape.tuple_shapes(i));
}
}
Shape new_while_shape =
ShapeUtil::MakeTupleShapeWithPtrs(new_while_shape_elems);
std::vector<std::unique_ptr<HloInstruction>> new_instrs;
auto add_new_instr = [&](std::unique_ptr<HloInstruction> instr) {
new_instrs.push_back(std::move(instr));
return new_instrs.back().get();
};
auto remove_constant_elems = [&](HloInstruction* instr) {
CHECK(ShapeUtil::Compatible(instr->shape(), while_shape));
std::vector<HloInstruction*> tuple_elems;
for (int i = 0; i < while_shape.tuple_shapes_size(); ++i) {
if (!constant_tuple_indices.count(i)) {
tuple_elems.push_back(
add_new_instr(HloInstruction::CreateGetTupleElement(
while_shape.tuple_shapes(i), instr, i)));
}
}
return HloInstruction::CreateTuple(tuple_elems);
};
auto add_constant_elems = [&](HloInstruction* instr) {
CHECK(ShapeUtil::Compatible(instr->shape(), new_while_shape));
std::vector<HloInstruction*> tuple_elems;
int64_t j = 0;
for (int i = 0; i < while_shape.tuple_shapes_size(); ++i) {
if (constant_tuple_indices.count(i)) {
tuple_elems.push_back(while_init->mutable_operand(i));
} else {
tuple_elems.push_back(
add_new_instr(HloInstruction::CreateGetTupleElement(
while_shape.tuple_shapes(i), instr, j)));
++j;
}
}
return HloInstruction::CreateTuple(tuple_elems);
};
if (ShapeUtil::IsEmptyTuple(new_while_shape)) {
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(while_op, while_init));
return true;
}
std::unique_ptr<HloComputation> new_while_cond =
while_cond->CloneWithReplacementPairs({
while_cond->parameter_instruction(0),
add_constant_elems(add_new_instr(HloInstruction::CreateParameter(
0, new_while_shape,
while_cond->parameter_instruction(0)->name()))),
});
std::unique_ptr<HloComputation> new_while_body =
while_body->CloneWithReplacementPairs(
{
while_body->parameter_instruction(0),
add_constant_elems(add_new_instr(HloInstruction::CreateParameter(
0, new_while_shape,
while_cond->parameter_instruction(0)->name()))),
},
{
while_body->root_instruction(),
remove_constant_elems(
add_new_instr(while_body->root_instruction()->Clone())),
});
new_instrs.clear();
auto* new_while_op = computation->AddInstruction(HloInstruction::CreateWhile(
new_while_shape,
module->AddEmbeddedComputation(std::move(new_while_cond)),
module->AddEmbeddedComputation(std::move(new_while_body)),
add_new_instr(remove_constant_elems(while_init))));
new_while_op->CopyBackendConfigFrom(while_op);
CopyFrontendAttributes(while_op, new_while_op);
CopyMetadata(while_op, new_while_op);
TF_RETURN_IF_ERROR(computation->ReplaceWithNewInstruction(
while_op, add_constant_elems(new_while_op)));
for (auto& instr : new_instrs) {
computation->AddInstruction(std::move(instr));
}
return true;
}
static absl::StatusOr<bool> TryRemoveWhileLoop(HloInstruction* while_op) {
if (!while_op->parent()->IsSafelyRemovable(while_op)) {
VLOG(2) << "Not attempting to remove while loop that is not removable: "
<< while_op->ToShortString();
return false;
}
if (while_op->while_condition()->HasSideEffect()) {
VLOG(2) << "Not attempting to remove while loop whose condition contains "
"side-effecting instructions: "
<< while_op->ToShortString();
return false;
}
optional<int64_t> trip_count =
ComputeWhileLoopTripCount(while_op, 1);
if (trip_count && *trip_count == 0) {
auto computation = while_op->parent();
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(
while_op, while_op->mutable_operand(0)));
return true;
}
const auto& attrs = while_op->frontend_attributes().map();
bool skip_trip_count_one_simplification =
attrs.contains("skip-simplify-while-loops_trip-count-one") &&
(attrs.at("skip-simplify-while-loops_trip-count-one") == "true");
if (trip_count && *trip_count == 1 && !skip_trip_count_one_simplification) {
bool has_side_effects = absl::c_any_of(
while_op->called_computations(), [](const HloComputation* computation) {
return computation->HasSideEffect();
});
if (!has_side_effects) {
auto computation = while_op->parent();
auto call_op = computation->AddInstruction(HloInstruction::CreateCall(
while_op->shape(), while_op->operands(), while_op->while_body()));
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(while_op, call_op));
TF_ASSIGN_OR_RETURN(auto inlined_instructions_map,
CallInliner::Inline(call_op));
(void)inlined_instructions_map;
return true;
} else {
VLOG(2) << "Not attempting to simplify while loop because it contains a "
"side-effecting node: "
<< while_op->ToShortString();
}
}
return false;
}
static absl::StatusOr<bool> TryPropagateConstant(HloInstruction* while_op) {
auto while_init = while_op->operand(0);
if (while_init->opcode() != HloOpcode::kTuple) {
return false;
}
auto while_body = while_op->while_body();
auto while_body_root = while_body->root_instruction();
if (while_body_root->opcode() != HloOpcode::kTuple) {
return false;
}
auto while_body_param = while_body->parameter_instruction(0);
const HloInstruction::InstructionVector& root_operands =
while_body_root->operands();
absl::flat_hash_map<int, const HloInstruction*> index_to_constant;
for (int i = 0; i < root_operands.size(); i++) {
const HloInstruction* init_tuple_elem = nullptr;
if (Match(root_operands[i],
m::GetTupleElement(m::Op().Is(while_body_param), i)
.WithShape(m::Shape().IsScalar())) &&
Match(while_init->operand(i), m::Constant(&init_tuple_elem))) {
VLOG(3) << "Found loop invariant tuple element " << i << " "
<< init_tuple_elem->ToString();
index_to_constant[i] = init_tuple_elem;
}
}
if (index_to_constant.empty()) {
return false;
}
auto propagate_constant =
[&](HloComputation* computation) -> absl::StatusOr<bool> {
HloInstruction* param = computation->parameter_instruction(0);
bool changed = false;
for (auto instr : param->users()) {
if (instr->opcode() == HloOpcode::kGetTupleElement) {
VLOG(3) << "tuple index " << instr->tuple_index() << " "
<< instr->ToString();
auto iter = index_to_constant.find(instr->tuple_index());
if (iter != index_to_constant.end()) {
const HloInstruction* hlo_constant = (*iter).second;
VLOG(3) << "Replace use of " << instr->ToString() << " with "
<< hlo_constant->ToString();
TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(
computation->AddInstruction(hlo_constant->Clone())));
changed = true;
}
}
}
return changed;
};
TF_ASSIGN_OR_RETURN(bool changed_cond,
propagate_constant(while_op->while_condition()));
TF_ASSIGN_OR_RETURN(bool changed_body, propagate_constant(while_body));
return changed_cond || changed_body;
}
static std::unique_ptr<HloInstruction> UnflattenTupleInstr(
absl::Span<HloInstruction*> instrs, const Shape& desired_shape,
std::vector<std::unique_ptr<HloInstruction>>* new_instrs) {
CHECK(desired_shape.IsTuple()) << ShapeUtil::HumanString(desired_shape);
std::vector<HloInstruction*> elems;
for (int i = 0; i < desired_shape.tuple_shapes_size(); ++i) {
const Shape& subshape = desired_shape.tuple_shapes(i);
if (!subshape.IsTuple()) {
elems.push_back(instrs[0]);
instrs.remove_prefix(1);
continue;
}
int64_t num_leaves = 0;
ShapeUtil::ForEachSubshape(
subshape, [&](const Shape& s, const ShapeIndex& ) {
if (!s.IsTuple()) {
++num_leaves;
}
});
std::unique_ptr<HloInstruction> subinstr =
UnflattenTupleInstr(instrs.subspan(0, num_leaves),
desired_shape.tuple_shapes(i), new_instrs);
elems.push_back(subinstr.get());
new_instrs->push_back(std::move(subinstr));
instrs.remove_prefix(num_leaves);
}
return HloInstruction::CreateTuple(elems);
}
static std::vector<HloInstruction*> GetFlatTupleElems(
HloInstruction* instr,
std::vector<std::unique_ptr<HloInstruction>>* new_instrs) {
const auto& shape = instr->shape();
if (!shape.IsTuple()) {
return {instr};
}
std::vector<HloInstruction*> elems;
for (int i = 0; i < shape.tuple_shapes_size(); ++i) {
const Shape& subshape = shape.tuple_shapes(i);
new_instrs->push_back(
HloInstruction::CreateGetTupleElement(subshape, instr, i));
auto* gte = new_instrs->back().get();
auto flattened_subshape = GetFlatTupleElems(gte, new_instrs);
elems.insert(elems.end(), flattened_subshape.begin(),
flattened_subshape.end());
}
return elems;
}
static absl::StatusOr<bool> TryFlattenNestedTuples(HloInstruction* while_op) {
HloModule* module = while_op->GetModule();
HloComputation* computation = while_op->parent();
auto* while_init = while_op->mutable_operand(0);
auto* while_body = while_op->while_body();
auto* while_cond = while_op->while_condition();
auto* while_body_root = while_body->root_instruction();
if (while_init->opcode() != HloOpcode::kTuple ||
while_body_root->opcode() != HloOpcode::kTuple) {
return false;
}
TF_RET_CHECK(while_cond->num_parameters() == 1);
TF_RET_CHECK(while_body->num_parameters() == 1);
TF_RET_CHECK(
ShapeUtil::Compatible(while_init->shape(), while_body_root->shape()));
Shape while_shape = while_init->shape();
if (!ShapeUtil::IsNestedTuple(while_shape)) {
return false;
}
std::vector<const Shape*> flattened_shape_elems;
ShapeUtil::ForEachSubshape(while_shape,
[&](const Shape& s, const ShapeIndex& ) {
if (!s.IsTuple()) {
flattened_shape_elems.push_back(&s);
}
});
Shape flattened_shape =
ShapeUtil::MakeTupleShapeWithPtrs(flattened_shape_elems);
std::vector<std::unique_ptr<HloInstruction>> new_instrs;
auto add_new_instr = [&](std::unique_ptr<HloInstruction> instr) {
new_instrs.push_back(std::move(instr));
return new_instrs.back().get();
};
auto nested = [&](HloInstruction* instr) {
std::vector<HloInstruction*> gtes;
const Shape& flat_shape = instr->shape();
gtes.reserve(flat_shape.tuple_shapes_size());
for (int i = 0; i < flat_shape.tuple_shapes_size(); ++i) {
gtes.push_back(add_new_instr(HloInstruction::CreateGetTupleElement(
flat_shape.tuple_shapes(i), instr, i)));
}
auto nested_instr =
UnflattenTupleInstr(absl::MakeSpan(gtes), while_shape, &new_instrs);
CHECK(ShapeUtil::Compatible(nested_instr->shape(), while_shape))
<< ShapeUtil::HumanString(nested_instr->shape()) << " vs "
<< ShapeUtil::HumanString(while_shape);
return nested_instr;
};
auto flattened = [&](HloInstruction* instr) {
return HloInstruction::CreateTuple(GetFlatTupleElems(instr, &new_instrs));
};
std::unique_ptr<HloComputation> new_while_cond =
while_cond->CloneWithReplacementPairs({
while_cond->parameter_instruction(0),
nested(add_new_instr(HloInstruction::CreateParameter(
0, flattened_shape,
while_cond->parameter_instruction(0)->name()))),
});
std::unique_ptr<HloComputation> new_while_body =
while_body->CloneWithReplacementPairs(
{
while_body->parameter_instruction(0),
nested(add_new_instr(HloInstruction::CreateParameter(
0, flattened_shape,
while_body->parameter_instruction(0)->name()))),
},
{
while_body->root_instruction(),
flattened(add_new_instr(while_body->root_instruction()->Clone())),
});
new_instrs.clear();
auto* new_while_op = computation->AddInstruction(HloInstruction::CreateWhile(
flattened_shape,
module->AddEmbeddedComputation(std::move(new_while_cond)),
module->AddEmbeddedComputation(std::move(new_while_body)),
computation->AddInstruction(flattened(while_init))));
new_while_op->CopyBackendConfigFrom(while_op);
CopyFrontendAttributes(while_op, new_while_op);
CopyMetadata(while_op, new_while_op);
TF_RETURN_IF_ERROR(
computation->ReplaceWithNewInstruction(while_op, nested(new_while_op)));
for (auto& instr : new_instrs) {
computation->AddInstruction(std::move(instr));
}
return true;
}
static absl::StatusOr<HloInstruction*> TryMergeInductionVariables(
HloInstruction* while_op, PrimitiveType elem_ty) {
CHECK(primitive_util::IsIntegralType(elem_ty)) << PrimitiveType_Name(elem_ty);
HloModule* module = while_op->GetModule();
HloComputation* computation = while_op->parent();
auto* while_init = while_op->mutable_operand(0);
auto* while_body = while_op->while_body();
auto* while_cond = while_op->while_condition();
auto* while_body_root = while_body->root_instruction();
if (while_init->opcode() != HloOpcode::kTuple ||
while_body_root->opcode() != HloOpcode::kTuple) {
return nullptr;
}
TF_RET_CHECK(while_cond->num_parameters() == 1);
TF_RET_CHECK(while_body->num_parameters() == 1);
TF_RET_CHECK(
ShapeUtil::Compatible(while_init->shape(), while_body_root->shape()));
Shape while_shape = while_init->shape();
std::optional<int64_t> trip_counter;
absl::flat_hash_map<int64_t, const HloConstantInstruction*> induction_vars;
for (int64_t i = 0; i < while_body_root->operand_count(); ++i) {
HloInstruction* constant;
if (!Match(while_body_root->mutable_operand(i),
m::AddAnyOrder(m::GetTupleElement(m::Parameter(), i),
m::ConstantScalar(&constant))
.WithShape(m::Shape().WithElementType(elem_ty)))) {
continue;
}
if (!trip_counter && constant->literal().IsAll(1) &&
while_init->operand(i)->IsConstant() &&
while_init->operand(i)->literal().IsAll(0)) {
VLOG(10) << "Found existing trip counter at index " << i;
trip_counter = i;
} else {
VLOG(10) << "Found induction variable at index " << i;
induction_vars.emplace(i, Cast<HloConstantInstruction>(constant));
}
}
if (induction_vars.size() + (trip_counter.has_value() ? 1 : 0) < 2) {
return nullptr;
}
std::vector<std::unique_ptr<HloInstruction>> new_instrs;
auto add_new_instr = [&](std::unique_ptr<HloInstruction> instr) {
new_instrs.push_back(std::move(instr));
return new_instrs.back().get();
};
auto add_binary_op = [&](const Shape& shape, HloOpcode opcode,
HloInstruction* lhs, HloInstruction* rhs) {
if (!ShapeUtil::Compatible(shape, lhs->shape())) {
lhs = add_new_instr(HloInstruction::CreateReshape(shape, lhs));
}
if (!ShapeUtil::Compatible(shape, rhs->shape())) {
rhs = add_new_instr(HloInstruction::CreateReshape(shape, rhs));
}
return add_new_instr(HloInstruction::CreateBinary(shape, opcode, lhs, rhs));
};
auto add_gte = [&](HloInstruction* src, int64_t idx) {
return add_new_instr(HloInstruction::CreateGetTupleElement(
src->shape().tuple_shapes(idx), src, idx));
};
Shape new_while_shape = while_shape;
bool added_trip_counter = false;
if (!trip_counter) {
VLOG(10) << "Adding new trip counter to end of loop's tuple.";
trip_counter = new_while_shape.tuple_shapes_size();
*new_while_shape.add_tuple_shapes() =
ShapeUtil::MakeShape(elem_ty, {});
added_trip_counter = true;
}
auto convert_to_old_form = [&](HloInstruction* instr) {
CHECK(ShapeUtil::Compatible(instr->shape(), new_while_shape));
std::vector<HloInstruction*> tuple_elems;
for (int i = 0; i < while_shape.tuple_shapes_size(); ++i) {
const auto& elem_shape = while_shape.tuple_shapes(i);
if (!induction_vars.count(i)) {
tuple_elems.push_back(add_gte(instr, i));
continue;
}
tuple_elems.push_back(add_binary_op(
elem_shape, HloOpcode::kAdd, add_gte(instr, i),
add_binary_op(elem_shape, HloOpcode::kMultiply,
add_gte(instr, *trip_counter),
add_new_instr(induction_vars.at(i)->Clone()))));
}
return HloInstruction::CreateTuple(tuple_elems);
};
auto convert_to_new_form = [&](HloInstruction* old_root,
HloParameterInstruction* loop_body_param) {
CHECK(ShapeUtil::Compatible(old_root->shape(), while_shape));
std::vector<HloInstruction*> tuple_elems;
tuple_elems.reserve(while_shape.tuple_shapes_size());
for (int i = 0; i < while_shape.tuple_shapes_size(); ++i) {
tuple_elems.push_back(
add_gte((induction_vars.count(i) ? loop_body_param : old_root), i));
}
if (added_trip_counter) {
tuple_elems.push_back(add_binary_op(
new_while_shape.tuple_shapes(*trip_counter), HloOpcode::kAdd,
add_gte(loop_body_param, *trip_counter),
add_new_instr(
HloInstruction::CreateConstant(LiteralUtil::One(elem_ty)))));
}
return HloInstruction::CreateTuple(tuple_elems);
};
auto get_new_while_init = [&](HloInstruction* init) {
CHECK(ShapeUtil::Compatible(init->shape(), while_shape));
if (!added_trip_counter) {
return init;
}
std::vector<HloInstruction*> tuple_elems;
tuple_elems.reserve(while_shape.tuple_shapes_size());
for (int i = 0; i < while_shape.tuple_shapes_size(); ++i) {
tuple_elems.push_back(add_gte(init, i));
}
tuple_elems.push_back(add_new_instr(
HloInstruction::CreateConstant(LiteralUtil::Zero(elem_ty))));
return add_new_instr(HloInstruction::CreateTuple(tuple_elems));
};
std::unique_ptr<HloComputation> new_while_cond =
while_cond->CloneWithReplacementPairs({
while_cond->parameter_instruction(0),
convert_to_old_form(add_new_instr(HloInstruction::CreateParameter(
0, new_while_shape,
while_cond->parameter_instruction(0)->name()))),
});
HloComputation* temp_new_while_body =
module->AddEmbeddedComputation(while_body->CloneWithReplacementPairs({
while_body->parameter_instruction(0),
convert_to_old_form(add_new_instr(HloInstruction::CreateParameter(
0, new_while_shape,
while_body->parameter_instruction(0)->name()))),
}));
std::unique_ptr<HloComputation> new_while_body =
temp_new_while_body->CloneWithReplacementPairs({
temp_new_while_body->root_instruction(),
convert_to_new_form(
add_new_instr(temp_new_while_body->root_instruction()->Clone()),
Cast<HloParameterInstruction>(
temp_new_while_body->parameter_instruction(0))),
});
TF_RETURN_IF_ERROR(module->RemoveEmbeddedComputation(temp_new_while_body));
new_instrs.clear();
auto* new_while = computation->AddInstruction(HloInstruction::CreateWhile(
new_while_shape,
module->AddEmbeddedComputation(std::move(new_while_cond)),
module->AddEmbeddedComputation(std::move(new_while_body)),
get_new_while_init(while_init)));
new_while->CopyBackendConfigFrom(while_op);
CopyFrontendAttributes(while_op, new_while);
CopyMetadata(while_op, new_while);
TF_RETURN_IF_ERROR(computation->ReplaceWithNewInstruction(
while_op, convert_to_old_form(new_while)));
for (auto& instr : new_instrs) {
computation->AddInstruction(std::move(instr));
}
return new_while;
}
absl::StatusOr<bool> WhileLoopSimplifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(3,
"WhileLoopSimplifier::Run(), before:\n" + module->ToString());
bool changed = false;
std::vector<HloInstruction*> while_ops;
for (auto* comp : module->computations(execution_threads)) {
for (auto* instr : comp->instructions()) {
if (instr->opcode() == HloOpcode::kWhile) {
while_ops.push_back(instr);
}
}
}
for (HloInstruction* while_op : while_ops) {
TF_ASSIGN_OR_RETURN(bool result,
TryRemoveRepeatedWhileTupleIndices(while_op));
changed |= result;
if (result) {
continue;
}
TF_ASSIGN_OR_RETURN(result, TryFlattenNestedTuples(while_op));
changed |= result;
if (result) {
continue;
}
TF_ASSIGN_OR_RETURN(result, TryRemoveDeadWhileParams(while_op));
changed |= result;
if (result) {
continue;
}
TF_ASSIGN_OR_RETURN(result, TryRemoveConstantParams(while_op));
changed |= result;
if (result) {
continue;
}
if (simplify_compare_instrs_) {
TF_ASSIGN_OR_RETURN(result, TryRemoveTrivialCompare(while_op));
changed |= result;
if (result) {
continue;
}
}
if (ContainsInstrWithOpcode(while_op->while_body(),
{HloOpcode::kSend, HloOpcode::kSendDone,
HloOpcode::kRecv, HloOpcode::kRecvDone}) ||
ContainsInstrWithOpcode(while_op->while_condition(),
{HloOpcode::kSend, HloOpcode::kSendDone,
HloOpcode::kRecv, HloOpcode::kRecvDone})) {
VLOG(2) << "Not attempting to simplify while loop because it contains a "
"send/recv node: "
<< while_op->ToShortString();
continue;
}
TF_ASSIGN_OR_RETURN(result, TryPropagateConstant(while_op));
changed |= result;
TF_ASSIGN_OR_RETURN(result, TryRemoveWhileLoop(while_op));
changed |= result;
if (result) {
continue;
}
if (ContainsInstrWithOpcode(while_op->while_body(), {HloOpcode::kDomain}) ||
ContainsInstrWithOpcode(while_op->while_condition(),
{HloOpcode::kDomain})) {
continue;
}
bool merged_induction_vars = false;
for (auto elem_ty : {S8, U8, S32, U32, S64, U64}) {
TF_ASSIGN_OR_RETURN(auto* new_while_op,
TryMergeInductionVariables(while_op, elem_ty));
if (new_while_op) {
while_op = new_while_op;
changed = true;
merged_induction_vars = true;
}
}
if (merged_induction_vars) {
continue;
}
}
HloDCE dce;
TF_ASSIGN_OR_RETURN(bool dce_changed, dce.Run(module));
changed |= dce_changed;
XLA_VLOG_LINES(3,
"WhileLoopSimplifier::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/while_loop_simplifier.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
using ::testing::_;
namespace op = xla::testing::opcode_matchers;
HloInstruction* FindFirstWhile(HloModule* m) {
const auto& instrs = m->entry_computation()->instructions();
return *absl::c_find_if(instrs, HloPredicateIsOp<HloOpcode::kWhile>);
}
class WhileLoopSimplifierTest : public HloTestBase {
protected:
[[nodiscard]] std::unique_ptr<VerifiedHloModule> MakeModuleWithSimpleLoop(
int num_iters);
[[nodiscard]] std::unique_ptr<VerifiedHloModule>
MakeModuleWithSimpleLoopTupleElementLoopBound(int num_iters);
};
std::unique_ptr<VerifiedHloModule>
WhileLoopSimplifierTest::MakeModuleWithSimpleLoop(int num_iters) {
std::string hlo_string_template = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant({{LOOP_BOUND}})
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
)";
std::string hlo_string = absl::StrReplaceAll(
hlo_string_template, {{"{{LOOP_BOUND}}", absl::StrCat(42 + num_iters)}});
return ParseAndReturnVerifiedModule(hlo_string).value();
}
std::unique_ptr<VerifiedHloModule>
WhileLoopSimplifierTest::MakeModuleWithSimpleLoopTupleElementLoopBound(
int num_iters) {
std::string hlo_string_template = R"(
HloModule SimpleLoopWithIndirectLoopBound
SimpleLoopWithIndirectLoopBound.body {
loop_var.1 = (s32[], s32[3]{0}, s32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
limit = s32[] get-tuple-element(loop_var.1), index=2
ROOT tuple = (s32[], s32[3]{0}, s32[]) tuple(add, multiply, limit)
}
SimpleLoopWithIndirectLoopBound.condition {
loop_var.2 = (s32[], s32[3]{0}, s32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
get-tuple-element.4 = s32[] get-tuple-element(loop_var.2), index=2
ROOT less-than = pred[] compare(get-tuple-element.3, get-tuple-element.4), direction=LT
}
ENTRY SimpleLoopWithIndirectLoopBound {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
constant.2 = s32[] constant({{LOOP_BOUND}})
tuple.1 = (s32[], s32[3]{0}, s32[]) tuple(constant.3, constant.4,
constant.2)
ROOT while = (s32[], s32[3]{0}, s32[]) while(tuple.1),
condition=SimpleLoopWithIndirectLoopBound.condition,
body=SimpleLoopWithIndirectLoopBound.body
}
)";
std::string hlo_string = absl::StrReplaceAll(
hlo_string_template, {{"{{LOOP_BOUND}}", absl::StrCat(42 + num_iters)}});
return ParseAndReturnVerifiedModule(hlo_string).value();
}
TEST_F(WhileLoopSimplifierTest, LoopWithZeroIterationSimplified) {
auto m = MakeModuleWithSimpleLoop(0);
ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
op::Tuple(op::Constant(), op::Constant()));
}
TEST_F(WhileLoopSimplifierTest,
LoopWithZeroIterationTupleElementLoopBoundSimplified) {
auto m = MakeModuleWithSimpleLoopTupleElementLoopBound(0);
ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
op::Tuple(op::Constant(), op::Constant(), op::Constant()));
}
TEST_F(WhileLoopSimplifierTest, LoopWithOneIterationSimplified) {
auto m = MakeModuleWithSimpleLoop(1);
ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
op::Tuple(op::Add(), op::Multiply()));
}
TEST_F(WhileLoopSimplifierTest,
LoopWithOneIterationTupleELementLoopBoundSimplified) {
auto m = MakeModuleWithSimpleLoopTupleElementLoopBound(1);
ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
op::Tuple(op::Add(), op::Multiply(), op::Constant()));
}
TEST_F(WhileLoopSimplifierTest, LoopWithTwoIterationsNotSimplified) {
auto m = MakeModuleWithSimpleLoop(2);
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest,
LoopWithControlDependencySimplifiedDependencyPreserved) {
auto m = MakeModuleWithSimpleLoop(1);
HloComputation* computation = m->entry_computation();
auto* while_op = computation->root_instruction();
ASSERT_EQ(while_op->opcode(), HloOpcode::kWhile);
auto* true_op = while_op->while_body()->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
TF_ASSERT_OK(true_op->AddControlDependencyTo(
while_op->while_body()->root_instruction()));
ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
EXPECT_THAT(computation->root_instruction()->control_predecessors(),
ElementsAre(op::Constant()))
<< computation->ToString();
}
TEST_F(WhileLoopSimplifierTest, LoopWithSendNotSimplified) {
auto m = MakeModuleWithSimpleLoop(1);
HloComputation* computation = m->entry_computation();
auto* while_op = computation->root_instruction();
ASSERT_EQ(while_op->opcode(), HloOpcode::kWhile);
auto* while_body = while_op->while_body();
auto* token = while_body->AddInstruction(HloInstruction::CreateToken());
auto* send = while_body->AddInstruction(HloInstruction::CreateSend(
while_body->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))),
token,
0));
while_body->AddInstruction(HloInstruction::CreateSendDone(send));
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest, LoopWithRecvNotSimplified) {
auto m = MakeModuleWithSimpleLoop(1);
HloComputation* computation = m->entry_computation();
auto* while_op = computation->root_instruction();
ASSERT_EQ(while_op->opcode(), HloOpcode::kWhile);
auto* while_body = while_op->while_body();
auto* token = while_body->AddInstruction(HloInstruction::CreateToken());
auto* recv = while_body->AddInstruction(
HloInstruction::CreateRecv(ShapeUtil::MakeShape(F32, {1}), token,
0));
while_body->AddInstruction(HloInstruction::CreateRecvDone(recv));
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest, LoopWithInfeedSimplified) {
auto m = MakeModuleWithSimpleLoop(1);
HloComputation* computation = m->entry_computation();
auto* while_op = computation->root_instruction();
ASSERT_EQ(while_op->opcode(), HloOpcode::kWhile);
auto* while_body = while_op->while_body();
auto token = while_body->AddInstruction(HloInstruction::CreateToken());
while_body->AddInstruction(HloInstruction::CreateInfeed(
ShapeUtil::MakeShape(F32, {1}), token, "config"));
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest, LoopWithInfeedInCondNotSimplified) {
auto m = MakeModuleWithSimpleLoop(1);
HloComputation* computation = m->entry_computation();
auto* while_op = computation->root_instruction();
ASSERT_EQ(while_op->opcode(), HloOpcode::kWhile);
auto* while_cond = while_op->while_condition();
auto token = while_cond->AddInstruction(HloInstruction::CreateToken());
while_cond->AddInstruction(HloInstruction::CreateInfeed(
ShapeUtil::MakeShape(F32, {1}), token, "config"));
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest, NonTupleShapedLoopNotSimplified) {
const std::string hlo_string = R"(
HloModule NonTupleShapedLoop
NonTupleShapedLoop.body {
loop_var.1 = s32[] parameter(0)
constant.1 = s32[] constant(-1)
ROOT add = s32[] add(s32[] loop_var.1, s32[] constant.1)
}
NonTupleShapedLoop.condition {
loop_var = s32[] parameter(0)
constant = s32[] constant(100)
ROOT less-than = pred[] compare(s32[] loop_var, s32[] constant), direction=LT
}
ENTRY INonTupleShapedLoop {
constant.2 = s32[] constant(42)
ROOT while = s32[] while(s32[] constant.2),
condition=NonTupleShapedLoop.condition,
body=NonTupleShapedLoop.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest, LoopSwappingTupleElementsNotSimplified) {
const std::string hlo_string = R"(
HloModule SwappingTupleElements
SwappingTupleElements.body {
loop_var = (s32[], s32[]) parameter(0)
get-tuple-element = s32[] get-tuple-element((s32[], s32[]) loop_var),index=1
get-tuple-element.1 = s32[] get-tuple-element((s32[], s32[]) loop_var),
index=0
ROOT tuple = (s32[], s32[]) tuple(s32[] get-tuple-element,
s32[] get-tuple-element.1)
}
SwappingTupleElements.always_true {
param = (s32[], s32[]) parameter(0)
ROOT constant = pred[] constant(true)
}
ENTRY SwappingTupleElements {
x = s32[] parameter(0)
y = s32[] parameter(1)
tuple.1 = (s32[], s32[]) tuple(s32[] x, s32[] y)
ROOT while = (s32[], s32[]) while((s32[], s32[]) tuple.1),
condition=SwappingTupleElements.always_true,
body=SwappingTupleElements.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest,
LoopWithUnusedButModifiedTupleElementNotSimplified) {
const std::string hlo_string = R"(
HloModule UnusedButModifiedTupleElement
UnusedButModifiedTupleElement.body {
loop_var = (s32[]) parameter(0)
constant.1 = s32[] constant(1)
ROOT tuple = (s32[]) tuple(s32[] constant.1)
}
UnusedButModifiedTupleElement.always_true {
param = (s32[]) parameter(0)
ROOT constant = pred[] constant(true)
}
ENTRY UnusedButModifiedTupleElement {
constant.2 = s32[] constant(0)
tuple.1 = (s32[]) tuple(s32[] constant.2)
ROOT while = (s32[]) while((s32[]) tuple.1),
condition=UnusedButModifiedTupleElement.always_true,
body=UnusedButModifiedTupleElement.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest,
LoopWithUnusedOutsideLoopButModifiedTupleElementSimplified) {
const std::string hlo_string = R"(
HloModule UnusedButModifiedTupleElement
UnusedButModifiedTupleElement.body {
loop_var = (s32[], s32[]) parameter(0)
constant.1 = s32[] constant(1)
ROOT tuple = (s32[], s32[]) tuple(s32[] constant.1, constant.1)
}
UnusedButModifiedTupleElement.cond {
param = (s32[], s32[]) parameter(0)
gte.cond = s32[] get-tuple-element(param), index=0
constant.3 = s32[] constant(1)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY UnusedButModifiedTupleElement {
constant.2 = s32[] constant(0)
tuple.1 = (s32[], s32[]) tuple(constant.2, constant.2)
while = (s32[], s32[]) while(tuple.1),
condition=UnusedButModifiedTupleElement.cond,
body=UnusedButModifiedTupleElement.body
ROOT gte = s32[] get-tuple-element(while), index=0
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
EXPECT_TRUE(TupleSimplifier().Run(m.get()).ok());
EXPECT_TRUE(HloDCE().Run(m.get()).ok());
auto m_while = AllOf(op::While(), op::Shape("(s32[])"));
EXPECT_THAT(m->entry_computation()->root_instruction(),
op::GetTupleElement(m_while));
}
TEST_F(WhileLoopSimplifierTest, LoopWithEmptyTupleNotSimplified) {
const std::string hlo_string = R"(
HloModule EmptyTuple
EmptyTuple.body {
loop_var = () parameter(0)
ROOT tuple = () tuple()
}
EmptyTuple.always_true {
param = () parameter(0)
ROOT constant = pred[] constant(true)
}
ENTRY EmptyTuple {
tuple.1 = () tuple()
ROOT while = () while(() tuple.1), condition=EmptyTuple.always_true,
body=EmptyTuple.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest, LoopWithElemUsedTwiceNotSimplified) {
const std::string hlo_string = R"(
HloModule ElemUsedTwice
ElemUsedTwice.body {
param0 = (s32[], s32[]) parameter(0)
get-tuple-element = s32[] get-tuple-element((s32[], s32[]) param0), index=0
ROOT tuple = (s32[], s32[]) tuple(s32[] get-tuple-element,
s32[] get-tuple-element)
}
ElemUsedTwice.always_true {
param = (s32[], s32[]) parameter(0)
ROOT constant = pred[] constant(true)
}
ENTRY ElemUsedTwice {
x = s32[] parameter(0)
y = s32[] parameter(1)
tuple.1 = (s32[], s32[]) tuple(s32[] x, s32[] y)
ROOT while = (s32[], s32[]) while((s32[], s32[]) tuple.1),
condition=ElemUsedTwice.always_true, body=ElemUsedTwice.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest, RemoveUnusedLoopOperands) {
const std::string hlo_string = R"(
HloModule RemoveUnusedOperands
RemoveUnusedOperands.body {
loop_var = (s32[], s32[], s32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element((s32[], s32[],
s32[]) loop_var), index=0
get-tuple-element.2 = s32[] get-tuple-element((s32[], s32[],
s32[]) loop_var), index=1
constant.1 = s32[] constant(1)
add = s32[] add(s32[] get-tuple-element.2, s32[] constant.1)
get-tuple-element.3 = s32[] get-tuple-element((s32[], s32[], s32[])
loop_var), index=2
ROOT tuple = (s32[], s32[], s32[]) tuple(s32[] get-tuple-element.1,
s32[] add, s32[] get-tuple-element.3)
}
RemoveUnusedOperands.loop_condition {
constant.2 = s32[] constant(0)
param0 = (s32[], s32[], s32[]) parameter(0)
get-tuple-element = s32[] get-tuple-element((s32[], s32[], s32[]) param0),
index=2
ROOT equal-to = pred[] compare(s32[] constant.2, s32[] get-tuple-element), direction=EQ
}
ENTRY RemoveUnusedOperands {
x = s32[] parameter(0)
constant.3 = s32[] constant(0)
y = s32[] parameter(1)
tuple.1 = (s32[], s32[], s32[]) tuple(s32[] x, s32[] constant.3,
s32[] y)
ROOT while = (s32[], s32[], s32[]) while((s32[], s32[], s32[]) tuple.1),
condition=RemoveUnusedOperands.loop_condition,
body=RemoveUnusedOperands.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
const auto& instrs = m->entry_computation()->instructions();
HloInstruction* new_while_op =
*absl::c_find_if(instrs, [&](const HloInstruction* instr) {
return (instr->opcode() == HloOpcode::kWhile &&
instr->name() != "while");
});
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
EXPECT_TRUE(
ShapeUtil::Equal(new_while_op->shape(),
ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32})))
<< ShapeUtil::HumanString(new_while_op->shape());
EXPECT_THAT(
new_while_op->while_body()->root_instruction(),
op::Tuple(
op::Add(op::GetTupleElement(op::Parameter(0), 0),
op::Constant()),
op::GetTupleElement(op::Parameter(0), 1)));
EXPECT_THAT(new_while_op->while_condition()->root_instruction(),
op::Eq(op::Constant(),
op::GetTupleElement(op::Parameter(0), 1)));
}
TEST_F(WhileLoopSimplifierTest, RemoveUnusedLoopOperandsCheckMetadata) {
const std::string hlo_string = R"(
HloModule RemoveUnusedOperands
RemoveUnusedOperands.body {
loop_var = (s32[], s32[], s32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element((s32[], s32[],
s32[]) loop_var), index=0
get-tuple-element.2 = s32[] get-tuple-element((s32[], s32[],
s32[]) loop_var), index=1
constant.1 = s32[] constant(1)
add = s32[] add(s32[] get-tuple-element.2, s32[] constant.1)
get-tuple-element.3 = s32[] get-tuple-element((s32[], s32[], s32[])
loop_var), index=2
ROOT tuple = (s32[], s32[], s32[]) tuple(s32[] get-tuple-element.1,
s32[] add, s32[] get-tuple-element.3)
}
RemoveUnusedOperands.loop_condition {
constant.2 = s32[] constant(0)
param0 = (s32[], s32[], s32[]) parameter(0)
get-tuple-element = s32[] get-tuple-element((s32[], s32[], s32[]) param0),
index=2
ROOT equal-to = pred[] compare(s32[] constant.2, s32[] get-tuple-element), direction=EQ
}
ENTRY RemoveUnusedOperands {
x = s32[] parameter(0)
constant.3 = s32[] constant(0)
y = s32[] parameter(1)
tuple.1 = (s32[], s32[], s32[]) tuple(s32[] x, s32[] constant.3,
s32[] y)
ROOT while = (s32[], s32[], s32[]) while((s32[], s32[], s32[]) tuple.1),
condition=RemoveUnusedOperands.loop_condition,
body=RemoveUnusedOperands.body, metadata={op_name="while"}
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
OpMetadata while_metadata;
while_metadata.set_op_name("while");
EXPECT_THAT(m->entry_computation()->root_instruction(),
AllOf(op::Tuple(), op::Metadata(while_metadata)));
EXPECT_THAT(m->entry_computation()->GetInstructionWithName("while.1"),
AllOf(op::While(), op::Metadata(while_metadata)));
}
TEST_F(WhileLoopSimplifierTest,
RemoveUnusedLoopOperandsDespiteSideEffectingOps) {
const std::string hlo_string = R"(
HloModule RemoveUnusedOperands
body {
loop_var = (s32[]) parameter(0)
gte0 = s32[] get-tuple-element(loop_var), index=0
token0 = token[] after-all()
unused = ((s32[], pred[]), token[]) infeed(token0)
ROOT tuple = (s32[]) tuple(gte0)
}
cond {
loop_var = (s32[]) parameter(0)
ROOT constant = pred[] constant(true)
}
ENTRY RemoveUnusedOperands {
x = s32[] parameter(0)
tuple.1 = (s32[]) tuple(s32[] x)
ROOT while = (s32[]) while((s32[]) tuple.1),
condition=cond, body=body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
const auto& instrs = m->entry_computation()->instructions();
HloInstruction* new_while_op =
*absl::c_find_if(instrs, [&](const HloInstruction* instr) {
return (instr->opcode() == HloOpcode::kWhile &&
instr->name() != "while");
});
EXPECT_TRUE(ShapeUtil::IsEmptyTuple(new_while_op->shape()))
<< new_while_op->shape().ToString();
}
TEST_F(WhileLoopSimplifierTest, LoopWithNonTupleBodyShapeNotSimplified) {
const std::string hlo_string = R"(
HloModule BodyHasNonTupleRoot
BodyHasNonTupleRoot.passthrough {
ROOT param = (s32[], s32[]) parameter(0)
}
BodyHasNonTupleRoot.always_true {
param.1 = (s32[], s32[]) parameter(0)
ROOT constant = pred[] constant(true)
}
ENTRY BodyHasNonTupleRoot {
init_value = (s32[], s32[]) parameter(0)
ROOT while = (s32[], s32[]) while((s32[], s32[]) init_value),
condition=BodyHasNonTupleRoot.always_true,
body=BodyHasNonTupleRoot.passthrough
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest,
LoopWithNonTupleBodyRootInstructionNotSimplified) {
const std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT custom-call = (s32[], s32[3]{0}) custom-call(add, multiply),
custom_call_target="x"
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(44)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest, LoopWithArrayConstantNotSimplified) {
const std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[3]{0}, s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
get-tuple-element.3 = s32[3]{0} get-tuple-element(loop_var.1), index=2
add.2 = s32[3]{0} add(get-tuple-element.2, get-tuple-element.3)
ROOT tuple = (s32[], s32[3]{0}, s32[3]{0}) tuple(add, add.2, get-tuple-element.3)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[3]{0}, s32[3]{0}) parameter(0)
get-tuple-element.4 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(47)
ROOT less-than = pred[] compare(get-tuple-element.4, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}, s32[3]{0}) tuple(constant.3, constant.4, constant.4)
ROOT while = (s32[], s32[3]{0}, s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value());
}
TEST_F(WhileLoopSimplifierTest, FlattenNestedTuple) {
const std::string hlo_string = R"(
HloModule Test
Body {
param = ((s32[1]), (s32[2], s32[3], (s32[4]))) parameter(0)
ta = (s32[1]) get-tuple-element(param), index=0
a = s32[1] get-tuple-element(ta), index=0
a.1 = s32[1] add(a, a)
tbcd = (s32[2], s32[3], (s32[4])) get-tuple-element(param), index=1
ROOT tuple = ((s32[1]), (s32[2], s32[3], (s32[4]))) tuple(ta, tbcd)
}
Cond {
param = ((s32[1]), (s32[2], s32[3], (s32[4]))) parameter(0)
ROOT cond = pred[] constant(true)
}
ENTRY Loop {
a = s32[1] constant({0})
b = s32[2] constant({0,1})
c = s32[3] constant({0,1,2})
d = s32[4] constant({0,1,2,3})
ta = (s32[1]) tuple(a)
td = (s32[4]) tuple(d)
tbcd = (s32[2], s32[3], (s32[4])) tuple(b, c, td)
init = ((s32[1]), (s32[2], s32[3], (s32[4]))) tuple(ta, tbcd)
ROOT while = ((s32[1]), (s32[2], s32[3], (s32[4]))) while(init),
condition=Cond, body=Body
})";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
EXPECT_TRUE(HloDCE().Run(m.get()).ok());
HloInstruction* new_while = FindFirstWhile(m.get());
Shape flat_tuple = ParseShape("(s32[1], s32[2], s32[3], s32[4])").value();
SCOPED_TRACE(m->ToString());
EXPECT_TRUE(ShapeUtil::Equal(new_while->shape(), flat_tuple));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_body()->root_instruction()->shape(), flat_tuple));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_body()->parameter_instruction(0)->shape(), flat_tuple));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_condition()->parameter_instruction(0)->shape(),
flat_tuple));
EXPECT_TRUE(ShapeUtil::Equal(
m->entry_computation()->root_instruction()->shape(),
ParseShape("((s32[1]), (s32[2], s32[3], (s32[4])))").value()));
}
TEST_F(WhileLoopSimplifierTest, OnlyConstantsInLoopCarry) {
const std::string hlo_string = R"(
HloModule Test
Body {
param = (s32[1]) parameter(0)
a = s32[1] constant({0})
ROOT tuple = (s32[1]) tuple(a)
}
Cond {
param = (s32[1]) parameter(0)
ROOT cond = pred[] constant(true)
}
ENTRY Loop {
a = s32[1] constant({0})
init = (s32[1]) tuple(a)
ROOT while = (s32[1]) while(init), condition=Cond, body=Body
})";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
EXPECT_TRUE(HloDCE().Run(m.get()).ok());
EXPECT_TRUE(TupleSimplifier().Run(m.get()).ok());
EXPECT_THAT(m->entry_computation()->root_instruction(),
op::Tuple(op::Constant()));
}
TEST_F(WhileLoopSimplifierTest, RemoveConstantFromLoopCarry) {
const std::string hlo_string = R"(
HloModule Test
Body {
param = (s32[1], s32[2], s32[3]) parameter(0)
a = s32[1] get-tuple-element(param), index=0
a.1 = s32[1] add(a, a)
b = s32[2] constant({1,1})
c = s32[3] constant({10,10,10})
ROOT tuple = (s32[1], s32[2], s32[3]) tuple(a.1, b, c)
}
Cond {
param = (s32[1], s32[2], s32[3]) parameter(0)
a = s32[1] get-tuple-element(param), index=0
b = s32[2] get-tuple-element(param), index=1
c = s32[3] get-tuple-element(param), index=2
ROOT cond = pred[] constant(true)
}
ENTRY Loop {
a = s32[1] constant({0})
b = s32[2] constant({1,1})
c = s32[3] constant({2,2,2})
init = (s32[1], s32[2], s32[3]) tuple(a,b,c)
ROOT while = (s32[1], s32[2], s32[3]) while(init),
condition=Cond, body=Body
})";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
EXPECT_TRUE(HloDCE().Run(m.get()).ok());
EXPECT_TRUE(TupleSimplifier().Run(m.get()).ok());
HloInstruction* new_while = FindFirstWhile(m.get());
Shape new_while_shape = ParseShape("(s32[1], s32[3])").value();
EXPECT_TRUE(ShapeUtil::Equal(new_while->shape(), new_while_shape));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_body()->root_instruction()->shape(), new_while_shape));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_body()->parameter_instruction(0)->shape(),
new_while_shape));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_condition()->parameter_instruction(0)->shape(),
new_while_shape));
EXPECT_TRUE(
ShapeUtil::Equal(m->entry_computation()->root_instruction()->shape(),
ParseShape("(s32[1], s32[2], s32[3])").value()));
EXPECT_THAT(m->entry_computation()->root_instruction(),
op::Tuple(_, op::Constant(), _));
}
const char* const kSimpleMergeInductionVariablesModule = R"(
HloModule Test
Body {
param = (TYPE[], TYPE[], TYPE[]) parameter(0)
a = TYPE[] get-tuple-element(param), index=0
one = TYPE[] constant(1)
a1 = TYPE[] add(a, one)
b = TYPE[] get-tuple-element(param), index=1
negone = TYPE[] constant(-1)
b1 = TYPE[] add(b, negone)
c = TYPE[] add(a, b)
ROOT tuple = (TYPE[], TYPE[], TYPE[]) tuple(a1,b1,c)
}
Cond {
param = (TYPE[], TYPE[], TYPE[]) parameter(0)
a = TYPE[] get-tuple-element(param), index=0
b = TYPE[] get-tuple-element(param), index=1
sum = TYPE[] power(a, b)
ten = TYPE[] constant(10)
ROOT cond = pred[] compare(sum, ten), direction=LT
}
ENTRY Loop {
a = TYPE[] constant(10)
b = TYPE[] constant(100)
c = TYPE[] constant(0)
init = (TYPE[], TYPE[], TYPE[]) tuple(a,b,c)
while = (TYPE[], TYPE[], TYPE[]) while(init), condition=Cond, body=Body
a1 = TYPE[] get-tuple-element(while), index=0
b1 = TYPE[] get-tuple-element(while), index=1
c1 = TYPE[] get-tuple-element(while), index=2
sum = TYPE[] add(a1, b1)
ROOT sum.1 = TYPE[] add(sum, c1)
})";
TEST_F(WhileLoopSimplifierTest, MergeInductionVariables_Simple) {
std::string hlo_string = absl::StrReplaceAll(
kSimpleMergeInductionVariablesModule, {{"TYPE", "s32"}});
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
EXPECT_TRUE(HloDCE().Run(m.get()).ok());
EXPECT_TRUE(TupleSimplifier().Run(m.get()).ok());
HloInstruction* new_while = FindFirstWhile(m.get());
SCOPED_TRACE(m->ToString());
Shape new_while_shape = ParseShape("(s32[], s32[], s32[], s32[])").value();
EXPECT_TRUE(ShapeUtil::Equal(new_while->shape(), new_while_shape));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_body()->root_instruction()->shape(), new_while_shape));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_body()->parameter_instruction(0)->shape(),
new_while_shape));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_condition()->parameter_instruction(0)->shape(),
new_while_shape));
EXPECT_THAT(new_while->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(), 0),
op::GetTupleElement(op::Parameter(), 1), op::Add(),
op::Add(op::GetTupleElement(op::Parameter(), 3),
op::Constant())));
EXPECT_THAT(new_while->while_condition()->root_instruction(),
op::Lt(op::Power(op::Add(), op::Add()), op::Constant()));
}
TEST_F(WhileLoopSimplifierTest, MergeInductionVariables_SkipS16) {
std::string hlo_string = absl::StrReplaceAll(
kSimpleMergeInductionVariablesModule, {{"TYPE", "s16"}});
EXPECT_FALSE(WhileLoopSimplifier()
.Run(ParseAndReturnVerifiedModule(hlo_string).value().get())
.value());
}
TEST_F(WhileLoopSimplifierTest, RemoveRepeatedParams) {
const std::string hlo_string = R"(
HloModule SwappingTupleElements
SwappingTupleElements.body {
loop_var = (s32[], s32[], s32[]) parameter(0)
get-tuple-element = s32[] get-tuple-element(loop_var), index=0
get-tuple-element.1 = s32[] get-tuple-element(loop_var), index=1
get-tuple-element.2 = s32[] get-tuple-element(loop_var), index=2
y = s32[] add(get-tuple-element.1, get-tuple-element.2)
ROOT tuple = (s32[], s32[], s32[]) tuple(s32[] get-tuple-element, y,
s32[] get-tuple-element.2)
}
SwappingTupleElements.always_true {
param = (s32[], s32[], s32[]) parameter(0)
get-tuple-element = s32[] get-tuple-element(param), index=0
get-tuple-element.1 = s32[] get-tuple-element(param), index=1
ROOT less-than = pred[] compare(get-tuple-element, get-tuple-element.1), direction=LT
}
ENTRY SwappingTupleElements {
x = s32[] parameter(0)
y = s32[] parameter(1)
tuple.1 = (s32[], s32[], s32[]) tuple(s32[] x, s32[] y, s32[] x)
ROOT while = (s32[], s32[], s32[]) while(tuple.1),
condition=SwappingTupleElements.always_true,
body=SwappingTupleElements.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
HloInstruction* new_while = FindFirstWhile(m.get());
Shape new_while_shape = ParseShape("(s32[], s32[])").value();
EXPECT_TRUE(ShapeUtil::Equal(new_while->shape(), new_while_shape));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_body()->root_instruction()->shape(), new_while_shape));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_body()->parameter_instruction(0)->shape(),
new_while_shape));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_condition()->parameter_instruction(0)->shape(),
new_while_shape));
}
TEST_F(WhileLoopSimplifierTest, LoopWithUnusedGroupSimplified) {
const std::string hlo_string = R"(
HloModule LoopWithUnusedGroupSimplified
LoopWithUnusedGroupSimplified.body {
loop_var = (s32[], s32[], s32[]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=1
gte1 = s32[] get-tuple-element(loop_var), index=2
add = s32[] add(gte0, gte1)
ROOT tuple = (s32[], s32[], s32[]) tuple(constant.1, add, add)
}
LoopWithUnusedGroupSimplified.cond {
param = (s32[], s32[], s32[]) parameter(0)
gte.cond = s32[] get-tuple-element(param), index=0
constant.3 = s32[] constant(1)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY LoopWithUnusedGroupSimplified {
constant.2 = s32[] constant(0)
tuple.1 = (s32[], s32[], s32[]) tuple(constant.2, constant.2, constant.2)
while = (s32[], s32[], s32[]) while(tuple.1),
condition=LoopWithUnusedGroupSimplified.cond,
body=LoopWithUnusedGroupSimplified.body
ROOT gte = s32[] get-tuple-element(while), index=0
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
EXPECT_TRUE(TupleSimplifier().Run(m.get()).ok());
EXPECT_TRUE(HloDCE().Run(m.get()).ok());
auto m_while = AllOf(op::While(), op::Shape("(s32[])"));
EXPECT_THAT(m->entry_computation()->root_instruction(),
op::GetTupleElement(m_while));
}
TEST_F(WhileLoopSimplifierTest, LoopWithUnusedNonPassthroughElementSimplified) {
const std::string hlo_string = R"(
HloModule LoopWithUnusedNonPassthroughElementSimplified
LoopWithUnusedNonPassthroughElementSimplified.body {
loop_var = (s32[], s32[], s32[]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=1
gte1 = s32[] get-tuple-element(loop_var), index=2
add = s32[] add(gte0, gte1)
add2 = s32[] add(gte0, gte0)
ROOT tuple = (s32[], s32[], s32[]) tuple(constant.1, add2, add)
}
LoopWithUnusedNonPassthroughElementSimplified.cond {
param = (s32[], s32[], s32[]) parameter(0)
gte.cond = s32[] get-tuple-element(param), index=0
constant.3 = s32[] constant(1)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY LoopWithUnusedNonPassthroughElementSimplified {
constant.2 = s32[] constant(0)
tuple.1 = (s32[], s32[], s32[]) tuple(constant.2, constant.2, constant.2)
while = (s32[], s32[], s32[]) while(tuple.1),
condition=LoopWithUnusedNonPassthroughElementSimplified.cond,
body=LoopWithUnusedNonPassthroughElementSimplified.body
gte2 = s32[] get-tuple-element(while), index=0
gte3 = s32[] get-tuple-element(while), index=1
ROOT tuple.2 = (s32[], s32[]) tuple(gte2, gte3)
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
EXPECT_TRUE(TupleSimplifier().Run(m.get()).ok());
EXPECT_TRUE(HloDCE().Run(m.get()).ok());
EXPECT_THAT(m->entry_computation()->root_instruction(),
AllOf(op::While(), op::Shape("(s32[], s32[])")));
}
TEST_F(WhileLoopSimplifierTest, RemoveUnusedParamsDespiteSendRecv) {
const std::string hlo_string = R"(
HloModule RemoveUnusedParamsDespiteSendRecv
RemoveUnusedParamsDespiteSendRecv.body {
loop_var = (s32[], s32[], s32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element((s32[], s32[],
s32[]) loop_var), index=0
get-tuple-element.2 = s32[] get-tuple-element((s32[], s32[],
s32[]) loop_var), index=1
constant.1 = s32[] constant(1)
token.1 = token[] after-all()
send.1 = (s32[], u32[], token[]) send(constant.1, token.1), channel_id=42, is_host_transfer=true
send-done.1 = token[] send-done(send.1), channel_id=42, is_host_transfer=true
recv.1 = (s32[], u32[], token[]) recv(send-done.1), channel_id=43, is_host_transfer=true
add = s32[] add(s32[] get-tuple-element.2, s32[] constant.1)
recv-done.1 = (s32[], token[]) recv-done(recv.1), channel_id=43, is_host_transfer=true
get-tuple-element.3 = s32[] get-tuple-element((s32[], s32[], s32[])
loop_var), index=2
ROOT tuple = (s32[], s32[], s32[]) tuple(s32[] get-tuple-element.1,
s32[] add, s32[] get-tuple-element.3)
}
RemoveUnusedParamsDespiteSendRecv.loop_condition {
constant.2 = s32[] constant(0)
param0 = (s32[], s32[], s32[]) parameter(0)
get-tuple-element = s32[] get-tuple-element((s32[], s32[], s32[]) param0),
index=2
ROOT equal-to = pred[] compare(s32[] constant.2, s32[] get-tuple-element), direction=EQ
}
ENTRY RemoveUnusedParamsDespiteSendRecv {
x = s32[] parameter(0)
constant.3 = s32[] constant(0)
y = s32[] parameter(1)
tuple.1 = (s32[], s32[], s32[]) tuple(s32[] x, s32[] constant.3,
s32[] y)
ROOT while = (s32[], s32[], s32[]) while((s32[], s32[], s32[]) tuple.1),
condition=RemoveUnusedParamsDespiteSendRecv.loop_condition,
body=RemoveUnusedParamsDespiteSendRecv.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
HloInstruction* new_while = FindFirstWhile(m.get());
Shape new_while_shape = ParseShape("(s32[], s32[])").value();
EXPECT_TRUE(ShapeUtil::Equal(new_while->shape(), new_while_shape));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_body()->root_instruction()->shape(), new_while_shape));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_body()->parameter_instruction(0)->shape(),
new_while_shape));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_condition()->parameter_instruction(0)->shape(),
new_while_shape));
}
TEST_F(WhileLoopSimplifierTest, RemoveTrivialCompare) {
const std::string hlo_template = R"(
HloModule RemoveTrivialCompare
RemoveTrivialCompare.body {
loop_var = (pred[], s32[]) parameter(0)
get-tuple-element.2 = s32[] get-tuple-element((pred[], s32[]) loop_var), index=1
cons = s32[] constant({{LOOP_CONSTANT}})
comp = pred[] compare(get-tuple-element.2, cons), direction={{DIRECTION}}
constant.1 = s32[] constant(1)
add = s32[] add(s32[] get-tuple-element.2, s32[] constant.1)
ROOT tuple = (pred[], s32[]) tuple(comp,
s32[] add)
}
RemoveTrivialCompare.loop_condition {
constant.2 = s32[] constant(10)
param0 = (pred[], s32[]) parameter(0)
get-tuple-element = s32[] get-tuple-element((pred[], s32[]) param0),
index=1
ROOT equal-to = pred[] compare(s32[] get-tuple-element, s32[] constant.2), direction=LT
}
ENTRY RemoveTrivialCompare {
constant.3 = s32[] constant(1)
t = pred[] constant(true)
tuple.1 = (pred[], s32[]) tuple(t, s32[] constant.3)
ROOT while = (pred[], s32[]) while((pred[], s32[]) tuple.1),
condition=RemoveTrivialCompare.loop_condition,
body=RemoveTrivialCompare.body
}
)";
for (std::string dir : {"LT", "GT"}) {
for (int i = 1; i > -5; i--) {
std::string hlo_string = absl::StrReplaceAll(
hlo_template,
{{"{{LOOP_CONSTANT}}", absl::StrCat(i)}, {"{{DIRECTION}}", dir}});
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(WhileLoopSimplifier(true)
.Run(m.get())
.value());
HloInstruction* while_instr = FindFirstWhile(m.get());
EXPECT_THAT(while_instr->while_body()->root_instruction(),
op::Tuple(op::Constant(), _));
EXPECT_TRUE(while_instr->while_body()
->root_instruction()
->operand(0)
->literal()
.IsAll(dir == "GT"));
}
for (int i = 11; i < 15; i++) {
std::string hlo_string = absl::StrReplaceAll(
hlo_template,
{{"{{LOOP_CONSTANT}}", absl::StrCat(i)}, {"{{DIRECTION}}", dir}});
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(WhileLoopSimplifier(true)
.Run(m.get())
.value());
HloInstruction* while_instr = FindFirstWhile(m.get());
EXPECT_THAT(while_instr->while_body()->root_instruction(),
op::Tuple(op::Constant(), _));
EXPECT_TRUE(while_instr->while_body()
->root_instruction()
->operand(0)
->literal()
.IsAll(dir == "LT"));
}
}
}
TEST_F(WhileLoopSimplifierTest, NotRemoveCompare) {
const std::string hlo_string = R"(
HloModule RemoveTrivialCompare
RemoveTrivialCompare.body {
loop_var = (pred[], s32[]) parameter(0)
get-tuple-element.2 = s32[] get-tuple-element((pred[], s32[]) loop_var), index=1
five = s32[] constant(5)
comp = pred[] compare(get-tuple-element.2, five), direction=LT
constant.1 = s32[] constant(1)
add = s32[] add(s32[] get-tuple-element.2, s32[] constant.1)
ROOT tuple = (pred[], s32[]) tuple(comp,
s32[] add)
}
RemoveTrivialCompare.loop_condition {
constant.2 = s32[] constant(10)
param0 = (pred[], s32[]) parameter(0)
get-tuple-element = s32[] get-tuple-element((pred[], s32[]) param0),
index=1
ROOT equal-to = pred[] compare(s32[] get-tuple-element, s32[] constant.2), direction=LT
}
ENTRY RemoveTrivialCompare {
constant.3 = s32[] constant(0)
t = pred[] constant(true)
tuple.1 = (pred[], s32[]) tuple(t, s32[] constant.3)
ROOT while = (pred[], s32[]) while((pred[], s32[]) tuple.1),
condition=RemoveTrivialCompare.loop_condition,
body=RemoveTrivialCompare.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(WhileLoopSimplifier(true)
.Run(m.get())
.value());
}
TEST_F(WhileLoopSimplifierTest, RemoveDynUpdSlice) {
const std::string hlo_string = R"(
HloModule jit_scan
%region_0.6 (arg_tuple.7: (s32[], f32[], f32[3], f32[3])) -> (s32[], f32[], f32[3], f32[3]) {
%arg_tuple.7 = (s32[], f32[], f32[3]{0}, f32[3]{0}) parameter(0)
%get-tuple-element.8 = s32[] get-tuple-element((s32[], f32[], f32[3]{0}, f32[3]{0}) %arg_tuple.7), index=0
%constant.12 = s32[] constant(1)
%add.28 = s32[] add(s32[] %get-tuple-element.8, s32[] %constant.12)
%get-tuple-element.9 = f32[] get-tuple-element((s32[], f32[], f32[3]{0}, f32[3]{0}) %arg_tuple.7), index=1
%sine.15 = f32[] sine(f32[] %get-tuple-element.9)
%get-tuple-element.10 = f32[3]{0} get-tuple-element((s32[], f32[], f32[3]{0}, f32[3]{0}) %arg_tuple.7), index=2
%cosine.16 = f32[] cosine(f32[] %get-tuple-element.9)
%reshape.18 = f32[1]{0} reshape(f32[] %cosine.16)
%constant.14 = s32[] constant(0)
%compare.19 = pred[] compare(s32[] %get-tuple-element.8, s32[] %constant.14), direction=LT
%constant.13 = s32[] constant(3)
%add.20 = s32[] add(s32[] %get-tuple-element.8, s32[] %constant.13)
%select.21 = s32[] select(pred[] %compare.19, s32[] %add.20, s32[] %get-tuple-element.8)
%dynamic-update-slice.22 = f32[3]{0} dynamic-update-slice(f32[3]{0} %get-tuple-element.10, f32[1]{0} %reshape.18, s32[] %select.21)
%get-tuple-element.11 = f32[3]{0} get-tuple-element((s32[], f32[], f32[3]{0}, f32[3]{0}) %arg_tuple.7), index=3
%dynamic-update-slice.27 = f32[3]{0} dynamic-update-slice(f32[3]{0} %get-tuple-element.11, f32[1]{0} %reshape.18, s32[] %select.21)
ROOT %tuple.29 = (s32[], f32[], f32[3]{0}, f32[3]{0}) tuple(s32[] %add.28, f32[] %sine.15, f32[3]{0} %dynamic-update-slice.22, f32[3]{0} %dynamic-update-slice.27)
}
%region_1.30 (arg_tuple.31: (s32[], f32[], f32[3], f32[3])) -> pred[] {
%arg_tuple.31 = (s32[], f32[], f32[3]{0}, f32[3]{0}) parameter(0)
%get-tuple-element.32 = s32[] get-tuple-element((s32[], f32[], f32[3]{0}, f32[3]{0}) %arg_tuple.31), index=0
%constant.36 = s32[] constant(3)
ROOT %compare.37 = pred[] compare(s32[] %get-tuple-element.32, s32[] %constant.36), direction=LT
}
ENTRY %main.44 (Arg_0.1: f32[]) -> (f32[], f32[3], f32[3]) {
%constant.4 = s32[] constant(0)
%Arg_0.1 = f32[] parameter(0), sharding={replicated}
%constant.2 = f32[] constant(0)
%broadcast.3 = f32[3]{0} broadcast(f32[] %constant.2), dimensions={}
%tuple.5 = (s32[], f32[], f32[3]{0}, f32[3]{0}) tuple(s32[] %constant.4, f32[] %Arg_0.1, f32[3]{0} %broadcast.3, f32[3]{0} %broadcast.3)
%while.38 = (s32[], f32[], f32[3]{0}, f32[3]{0}) while((s32[], f32[], f32[3]{0}, f32[3]{0}) %tuple.5), condition=%region_1.30, body=%region_0.6
%get-tuple-element.40 = f32[] get-tuple-element((s32[], f32[], f32[3]{0}, f32[3]{0}) %while.38), index=1
%get-tuple-element.41 = f32[3]{0} get-tuple-element((s32[], f32[], f32[3]{0}, f32[3]{0}) %while.38), index=2
%get-tuple-element.42 = f32[3]{0} get-tuple-element((s32[], f32[], f32[3]{0}, f32[3]{0}) %while.38), index=3
ROOT %tuple.43 = (f32[], f32[3]{0}, f32[3]{0}) tuple(f32[] %get-tuple-element.40, f32[3]{0} %get-tuple-element.41, f32[3]{0} %get-tuple-element.42)
})";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value());
HloInstruction* new_while = FindFirstWhile(m.get());
Shape new_while_shape = ParseShape("(s32[], f32[], f32[3]{0})").value();
EXPECT_TRUE(ShapeUtil::Equal(new_while->shape(), new_while_shape));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_body()->root_instruction()->shape(), new_while_shape));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_body()->parameter_instruction(0)->shape(),
new_while_shape));
EXPECT_TRUE(ShapeUtil::Equal(
new_while->while_condition()->parameter_instruction(0)->shape(),
new_while_shape));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_simplifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_simplifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
740c6a4b-47a4-47dd-979f-37886ea081a6 | cpp | tensorflow/tensorflow | shaped_buffer | third_party/xla/xla/service/shaped_buffer.cc | third_party/xla/xla/service/shaped_buffer_test.cc | #include "xla/service/shaped_buffer.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "xla/layout_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/logging.h"
namespace xla {
ShapedBuffer::ShapedBuffer(Shape on_device_shape, int device_ordinal,
int physical_device_ordinal)
: on_device_shape_(std::move(on_device_shape)),
device_ordinal_(device_ordinal),
buffers_(&on_device_shape_) {
physical_device_ordinal_ =
physical_device_ordinal == -1 ? device_ordinal_ : physical_device_ordinal;
on_host_shape_ = ShapeUtil::DeviceShapeToHostShape(on_device_shape_);
}
ShapedBuffer::ShapedBuffer(Shape on_host_shape, Shape on_device_shape,
int device_ordinal, int physical_device_ordinal)
: ShapedBuffer(on_device_shape, device_ordinal, physical_device_ordinal) {}
ShapedBuffer::ShapedBuffer(ShapedBuffer&& s) noexcept
: on_host_shape_(std::move(s.on_host_shape_)),
on_device_shape_(std::move(s.on_device_shape_)),
device_ordinal_(s.device_ordinal_),
physical_device_ordinal_(s.physical_device_ordinal_),
buffers_(std::move(s.buffers_)) {
buffers_.replace_shape_ptr(on_device_shape_);
}
ShapedBuffer& ShapedBuffer::operator=(ShapedBuffer&& s) noexcept {
on_device_shape_ = std::move(s.on_device_shape_);
on_host_shape_ = std::move(s.on_host_shape_);
device_ordinal_ = s.device_ordinal_;
physical_device_ordinal_ = s.physical_device_ordinal_;
buffers_ = std::move(s.buffers_);
buffers_.replace_shape_ptr(on_device_shape_);
return *this;
}
ShapedBuffer::~ShapedBuffer() {}
absl::StatusOr<ShapedBuffer> ShapedBuffer::SubShapedBuffer(
const ShapeIndex& index) const {
TF_ASSIGN_OR_RETURN(const Shape* device_sub_shape,
ShapeUtil::TryGetSubshape(on_device_shape(), index));
ShapedBuffer sub_shaped_buffer(*device_sub_shape, device_ordinal_,
physical_device_ordinal_);
TF_ASSIGN_OR_RETURN(ShapeTree<se::DeviceMemoryBase> sub_buffers,
buffers_.SubShapeTree(index));
sub_shaped_buffer.set_buffers(std::move(sub_buffers));
return std::move(sub_shaped_buffer);
}
void ShapedBuffer::clear() {
for (auto& pair : buffers_) {
pair.second = se::DeviceMemoryBase();
}
}
std::string ShapedBuffer::ToString() const {
std::string s =
absl::StrCat("ShapedBuffer(", device_ordinal(),
"), on-device shape=" +
ShapeUtil::HumanStringWithLayout(on_device_shape()),
":\n");
ShapeUtil::ForEachSubshape(
on_device_shape(),
[this, &s](const Shape& subshape, const ShapeIndex& index) {
std::string shape_str;
if (subshape.IsTuple()) {
shape_str = "tuple";
} else {
shape_str = ShapeUtil::HumanStringWithLayout(subshape);
}
const se::DeviceMemoryBase& memory = buffer(index);
absl::StrAppendFormat(&s, " %s%p (%d bytes) : %s\n",
std::string(index.size() * 2, ' '),
memory.opaque(), memory.size(), shape_str);
});
return s;
}
std::ostream& operator<<(std::ostream& out, const ShapedBuffer& buffer) {
out << buffer.ToString();
return out;
}
ScopedShapedBuffer::ScopedShapedBuffer(Shape on_device_shape,
se::DeviceMemoryAllocator* allocator,
int device_ordinal,
int physical_device_ordinal)
: ShapedBuffer(std::move(on_device_shape), device_ordinal,
physical_device_ordinal),
allocator_(allocator) {}
ScopedShapedBuffer::ScopedShapedBuffer(Shape on_host_shape,
Shape on_device_shape,
se::DeviceMemoryAllocator* allocator,
int device_ordinal,
int physical_device_ordinal)
: ScopedShapedBuffer(std::move(on_device_shape), allocator, device_ordinal,
physical_device_ordinal) {}
ScopedShapedBuffer::ScopedShapedBuffer(ShapedBuffer shaped_buffer,
se::DeviceMemoryAllocator* allocator)
: ShapedBuffer(std::move(shaped_buffer)), allocator_(allocator) {}
ScopedShapedBuffer::ScopedShapedBuffer(ScopedShapedBuffer&& s) noexcept
: ShapedBuffer(static_cast<ShapedBuffer&&>(s)), allocator_(s.allocator_) {
s.allocator_ = nullptr;
}
ScopedShapedBuffer& ScopedShapedBuffer::operator=(
ScopedShapedBuffer&& s) noexcept {
Deallocate();
*static_cast<ShapedBuffer*>(this) = std::move(static_cast<ShapedBuffer&>(s));
allocator_ = s.allocator_;
s.allocator_ = nullptr;
return *this;
}
ScopedShapedBuffer::~ScopedShapedBuffer() { Deallocate(); }
ShapedBuffer ScopedShapedBuffer::release() {
ShapedBuffer shaped_buffer(static_cast<ShapedBuffer&&>(*this));
buffers_ = ShapeTree<se::DeviceMemoryBase>();
return shaped_buffer;
}
void ScopedShapedBuffer::Deallocate() {
if (allocator_ == nullptr) {
return;
}
absl::flat_hash_set<void*> deallocated_ptrs;
for (auto& pair : buffers_) {
se::DeviceMemoryBase& memory_base = pair.second;
if (!memory_base.is_null() &&
deallocated_ptrs.insert(memory_base.opaque()).second) {
TF_CHECK_OK(allocator_->Deallocate(device_ordinal(), memory_base));
}
}
}
ScopedShapedBuffer ScopedShapedBuffer::TakeSubTree(ShapeIndexView index) {
const xla::Shape& sub_on_device_shape =
xla::ShapeUtil::GetSubshape(on_device_shape(), {index});
ScopedShapedBuffer output(sub_on_device_shape, memory_allocator(),
device_ordinal(), physical_device_ordinal());
auto src_it = buffers().find(index);
auto dst_it = output.buffers().begin();
while (dst_it != output.buffers().end()) {
dst_it->second = src_it->second;
src_it->second = tensorflow::se::DeviceMemoryBase(nullptr, 0);
++src_it;
++dst_it;
}
return output;
}
} | #include "xla/service/shaped_buffer.h"
#include <memory>
#include <utility>
#include <vector>
#include "xla/service/platform_util.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "xla/test.h"
#include "tsl/platform/test_benchmark.h"
namespace xla {
namespace {
TEST(ShapedBufferTest, ScopedShapeBufferAsShapedBufferB71629047) {
TF_ASSERT_OK_AND_ASSIGN(auto* platform,
xla::PlatformUtil::GetDefaultPlatform());
TF_ASSERT_OK_AND_ASSIGN(auto executors,
xla::PlatformUtil::GetStreamExecutors(platform));
xla::se::StreamExecutorMemoryAllocator allocator(platform, executors);
const xla::Shape shape = xla::ShapeUtil::MakeShape(xla::F32, {});
const int kDeviceOrdinal = 0;
auto scoped_buffer = std::make_unique<xla::ScopedShapedBuffer>(
shape, shape, &allocator, kDeviceOrdinal);
std::unique_ptr<xla::ShapedBuffer> buffer = std::move(scoped_buffer);
buffer = nullptr;
}
class TestAllocator : public se::DeviceMemoryAllocator {
public:
TestAllocator()
: se::DeviceMemoryAllocator(PlatformUtil::GetDefaultPlatform().value()) {}
~TestAllocator() override {
if (!allocations_.empty()) {
ADD_FAILURE() << "Some allocations not freed!";
}
}
using se::DeviceMemoryAllocator::Allocate;
absl::StatusOr<se::OwningDeviceMemory> Allocate(
int device_ordinal, uint64_t size, bool ,
int64_t ) override {
if (size == 0) {
return se::OwningDeviceMemory();
}
void* buf = malloc(size);
allocations_.insert({device_ordinal, buf});
return se::OwningDeviceMemory(se::DeviceMemoryBase(buf, size),
device_ordinal, this);
}
absl::Status Deallocate(int device_ordinal,
se::DeviceMemoryBase mem) override {
if (mem.is_null()) {
return absl::OkStatus();
}
auto it = allocations_.find({device_ordinal, mem.opaque()});
if (it == allocations_.end()) {
ADD_FAILURE() << "Allocation not found (double free?)";
} else {
free(mem.opaque());
allocations_.erase(it);
}
return absl::OkStatus();
}
bool AllowsAsynchronousDeallocation() const override { return false; }
absl::StatusOr<se::Stream*> GetStream(int device_ordinal) override {
LOG(FATAL) << "Not implemented";
}
private:
std::set<std::pair< int64_t, void*>> allocations_;
};
TEST(ScopedShapedBufferTest, TestMoveAssignmentOperator) {
Shape s = ShapeUtil::MakeShape(F32, {1});
TestAllocator allocator;
ScopedShapedBuffer sb1(s, &allocator, 0);
sb1.set_buffer(allocator.Allocate(0, 42).value(),
{});
ScopedShapedBuffer sb2(s, &allocator, 1);
sb2.set_buffer(allocator.Allocate(1, 10).value(),
{});
sb1 = std::move(sb2);
}
TEST(ScopedShapedBufferTest, TestTakeSubTree) {
TestAllocator allocator;
Shape s = ShapeUtil::MakeShape(F32, {1});
s = xla::ShapeUtil::MakeTupleShape(std::vector<xla::Shape>(2, s));
s = xla::ShapeUtil::MakeTupleShape(std::vector<xla::Shape>(3, s));
ScopedShapedBuffer sb(s, &allocator, 0);
sb.buffers().ForEachMutableElement(
[&](const xla::ShapeIndex& index, se::DeviceMemoryBase* buffer) {
TF_ASSERT_OK_AND_ASSIGN(
se::OwningDeviceMemory m,
allocator.Allocate(0, 77));
*buffer = m.Release();
});
ShapeTree<se::DeviceMemoryBase> buffers = sb.buffers();
xla::ShapeIndex subtree_index = {1};
ScopedShapedBuffer output = sb.TakeSubTree(subtree_index);
output.buffers().ForEachElement([&](const xla::ShapeIndex& sub_index,
const se::DeviceMemoryBase& buffer) {
xla::ShapeIndex orig_index = subtree_index;
for (int i : sub_index) {
orig_index.push_back(i);
}
EXPECT_TRUE(buffers.find(orig_index)->second.IsSameAs(buffer));
});
sb.buffers().ForEachElement([&](const xla::ShapeIndex& index,
const se::DeviceMemoryBase& buffer) {
if ((index.size() >= subtree_index.size()) &&
ShapeIndexView(index).first(subtree_index.size()) == subtree_index) {
EXPECT_TRUE(buffer.is_null());
} else {
EXPECT_TRUE(buffers.find(index)->second.IsSameAs(buffer));
}
});
}
TEST(ScopedShapedBufferTest, TestSubShapeTree) {
Shape array_shape = ShapeUtil::MakeShape(F32, {1});
Shape tuple_shape =
xla::ShapeUtil::MakeTupleShape({array_shape, array_shape});
TestAllocator allocator;
ScopedShapedBuffer sb(tuple_shape, &allocator, 0);
sb.buffers().ForEachMutableElement(
[&](const xla::ShapeIndex& index, se::DeviceMemoryBase* buffer) {
TF_ASSERT_OK_AND_ASSIGN(
se::OwningDeviceMemory m,
allocator.Allocate(0, 32));
*buffer = m.Release();
});
auto ssb_statusor = sb.SubShapedBuffer({1});
ASSERT_TRUE(ssb_statusor.ok());
auto ssb = std::move(ssb_statusor).value();
EXPECT_EQ(ssb.on_host_shape(), array_shape);
EXPECT_EQ(ssb.on_device_shape(), array_shape);
}
void BM_TakeSubTree(::testing::benchmark::State& state) {
const int depth = state.range(0);
const int fan_out = state.range(1);
TestAllocator allocator;
xla::Shape shape = xla::ShapeUtil::MakeShape(xla::F32, {32, 64, 128});
for (int i = 0; i < depth; ++i) {
std::vector<xla::Shape> shapes(fan_out, shape);
shape = xla::ShapeUtil::MakeTupleShape(shapes);
}
xla::ScopedShapedBuffer shaped_buffer(shape, &allocator,
0);
for (auto s : state) {
(void)shaped_buffer.TakeSubTree({fan_out / 2}).release();
}
}
BENCHMARK(BM_TakeSubTree)
->ArgPair(1, 4)
->ArgPair(1, 8)
->ArgPair(1, 32)
->ArgPair(1, 64)
->ArgPair(1, 128)
->ArgPair(1, 256)
->ArgPair(1, 512)
->ArgPair(2, 4)
->ArgPair(2, 8)
->ArgPair(2, 32)
->ArgPair(2, 64)
->ArgPair(2, 128);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/shaped_buffer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/shaped_buffer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b9c06777-8747-4fb4-8b7a-eda275f5c353 | cpp | tensorflow/tensorflow | hlo_graph_dumper | third_party/xla/xla/service/hlo_graph_dumper.cc | third_party/xla/xla/service/hlo_graph_dumper_test.cc | #include "xla/service/hlo_graph_dumper.h"
#include <cstdint>
#include <unordered_map>
#include "absl/base/const_init.h"
#include "absl/base/thread_annotations.h"
#include "absl/hash/hash.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/shape.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/thread_annotations.h"
#ifndef _WIN32
#include <unistd.h>
#endif
#include <algorithm>
#include <atomic>
#include <deque>
#include <functional>
#include <map>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/dnn.h"
#include "xla/tsl/lib/gtl/map_util.h"
#include "xla/tsl/lib/io/zlib_compression_options.h"
#include "xla/tsl/lib/io/zlib_outputbuffer.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "tsl/platform/base64.h"
#include "tsl/platform/env.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/regexp.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
using absl::StrAppend;
using absl::StrCat;
using absl::StrFormat;
using absl::StrJoin;
using std::nullopt;
using std::optional;
enum NodeFilterResult {
kNormalNode,
kHideNode,
kHighlightNode,
kSomeOperandsOmitted,
kOmitNodeOperands,
kSomeUsersOmitted,
};
class NodeFilter {
public:
NodeFilter() : filter_([](const HloInstruction*) { return kNormalNode; }) {}
explicit NodeFilter(
std::function<NodeFilterResult(const HloInstruction* instr)> filter,
std::optional<int> num_rendered = std::nullopt)
: filter_(std::move(filter)), num_rendered_(num_rendered) {}
bool Show(const HloInstruction* instr) const {
return filter_(instr) != kHideNode;
}
bool Highlight(const HloInstruction* instr) const {
return filter_(instr) == kHighlightNode;
}
bool OmitOperands(const HloInstruction* instr) const {
return filter_(instr) == kOmitNodeOperands;
}
bool SomeOrAllOperandsOmitted(const HloInstruction* instr) const {
auto result = filter_(instr);
return result == kOmitNodeOperands || result == kSomeOperandsOmitted;
}
bool Deemphasized(const HloInstruction* instr) const {
auto result = filter_(instr);
return result == kOmitNodeOperands || result == kSomeOperandsOmitted ||
result == kSomeUsersOmitted;
}
std::optional<int> GetNumRendered() const { return num_rendered_; }
private:
std::function<NodeFilterResult(const HloInstruction* instr)> filter_;
std::optional<int> num_rendered_;
};
bool IsSmall(const HloInstruction* instr) {
if (ShapeUtil::HasPrimitiveType(instr->shape(), OPAQUE_TYPE) ||
ShapeUtil::HasPrimitiveType(instr->shape(), TOKEN)) {
return true;
}
return ShapeUtil::ElementsInRecursive(instr->shape()) < 4096;
}
enum ColorScheme {
kBlue,
kBrown,
kDarkBlue,
kDarkGreen,
kDarkOrange,
kDarkRed,
kGray,
kGreen,
kOrange,
kPurple,
kRed,
kWhite,
kYellow,
kDashedBorder,
};
struct NodeColors {
std::string style;
std::string fill_color;
std::string stroke_color;
std::string font_color;
};
NodeColors NodeColorsForScheme(ColorScheme color) {
switch (color) {
case kBlue:
return NodeColors{"filled", "#bbdefb", "#8aacc8", "black"};
case kBrown:
return NodeColors{"filled", "#bcaaa4", "#8c7b75", "black"};
case kDarkBlue:
return NodeColors{"filled", "#1565c0", "#003c8f", "white"};
case kDarkGreen:
return NodeColors{"filled", "#2e7d32", "#005005", "white"};
case kDarkOrange:
return NodeColors{"filled", "#ffb74d", "#c88719", "black"};
case kDarkRed:
return NodeColors{"filled", "#b71c1c", "#7f0000", "white"};
case kGray:
return NodeColors{"filled", "#cfd8dc", "#9ea7aa", "black"};
case kGreen:
return NodeColors{"filled", "#c8e6c9", "#97b498", "black"};
case kOrange:
return NodeColors{"filled", "#ffe0b2", "#cbae82", "black"};
case kPurple:
return NodeColors{"filled", "#e1bee7", "#af8eb5", "black"};
case kRed:
return NodeColors{"filled", "#ffcdd2", "#cb9ca1", "black"};
case kWhite:
return NodeColors{"filled", "white", "#9e9e9e", "black"};
case kYellow:
return NodeColors{"filled", "#fff9c4", "#cbc693", "black"};
case kDashedBorder:
return NodeColors{"filled,dashed", "white", "#757575", "#757575"};
}
}
std::string NodeFillColorForStatistic(const Statistic& statistic) {
auto stat_val = statistic.stat_val();
if (stat_val == 0) {
return "#f5f5f5";
} else if (stat_val < 10) {
return "#f7d4cc";
} else if (stat_val < 20) {
return "#f8b2a3";
} else if (stat_val < 30) {
return "#f9a28f";
} else if (stat_val < 40) {
return "#fa917b";
} else if (stat_val < 50) {
return "#fb8066";
} else if (stat_val < 60) {
return "#fc7052";
} else if (stat_val < 70) {
return "#fd5f3d";
} else if (stat_val < 80) {
return "#fd4e29";
} else if (stat_val < 90) {
return "#fe3e14";
} else {
return "#ff2d00";
}
}
std::string NodeFontColorForStatistic(const Statistic& statistic) {
if (statistic.stat_val() < 60) {
return "black";
} else {
return "white";
}
}
std::string NodeColorAttributes(ColorScheme color) {
NodeColors node_colors = NodeColorsForScheme(color);
return StrFormat(R"(style="%s", fontcolor="%s", color="%s", fillcolor="%s")",
node_colors.style, node_colors.font_color,
node_colors.stroke_color, node_colors.fill_color);
}
std::string HtmlLikeStringSanitize(absl::string_view s) {
return absl::StrReplaceAll(s,
{{"<", "<"}, {">", ">"}, {"\"", """}});
}
bool IsFusedBroadcastOfConstantEffectiveScalar(const HloInstruction* instr) {
namespace m = match;
return instr->parent()->IsFusionComputation() &&
Match(instr, m::Broadcast(m::ConstantEffectiveScalar()));
}
optional<std::string> MatchTrivialComputation(
const HloComputation* computation) {
namespace m = match;
if (computation->instruction_count() != 3) {
return nullopt;
}
HloInstruction* root = computation->root_instruction();
const HloInstruction *param0, *param1;
if (!Match(root, m::Op()
.WithNumOperands(2)
.WithShape(m::Shape().IsEffectiveScalar())
.WithBinaryOperandsAnyOrder(
m::Parameter(¶m0, 0)
.WithShape(m::Shape().IsEffectiveScalar()),
m::Parameter(¶m1, 1)
.WithShape(m::Shape().IsEffectiveScalar())))) {
return nullopt;
}
if (root->operand(0) == param1) {
CHECK_EQ(root->operand(1), param0);
if (root->opcode() == HloOpcode()) {
switch (root->comparison_direction()) {
case ComparisonDirection::kLe:
case ComparisonDirection::kGe:
case ComparisonDirection::kGt:
case ComparisonDirection::kLt:
return nullopt;
default:
break;
}
}
}
switch (root->opcode()) {
case HloOpcode::kAdd:
return "add";
case HloOpcode::kMultiply:
return "multiply";
case HloOpcode::kMinimum:
return "min";
case HloOpcode::kMaximum:
return "max";
case HloOpcode::kXor:
return "xor";
case HloOpcode::kAnd:
return "and";
case HloOpcode::kOr:
return "or";
case HloOpcode::kCompare: {
switch (root->comparison_direction()) {
case ComparisonDirection::kLe:
return "less-or-equal";
case ComparisonDirection::kGe:
return "greater-or-equal";
case ComparisonDirection::kGt:
return "greater-than";
case ComparisonDirection::kLt:
return "less-than";
case ComparisonDirection::kEq:
return "equal-to";
case ComparisonDirection::kNe:
return "not-equal-to";
}
}
default:
return nullopt;
}
}
class HloDotDumper {
public:
HloDotDumper(
const HloComputation* computation, absl::string_view label,
const DebugOptions& debug_options, HloRenderOptions hlo_render_options,
NodeFilter filter,
std::optional<absl::flat_hash_map<const HloInstruction*, ColorStats>>
color_map = std::nullopt)
: computation_(computation),
label_(label),
debug_options_(debug_options),
hlo_render_options_(hlo_render_options),
filter_(std::move(filter)),
color_map_(color_map) {}
std::string Dump();
std::optional<std::string> CssIdForInstruction(const HloInstruction& instr) {
if (instr.opcode() == HloOpcode::kFusion) {
auto it = cluster_ids_.find(instr.called_computations()[0]);
if (it == cluster_ids_.end()) {
return std::nullopt;
}
return StrCat("#a_clust", it->second, " path");
}
auto it = node_ids_.find(&instr);
if (it == node_ids_.end()) {
return std::nullopt;
}
return StrCat("#node", it->second, " polygon");
}
private:
std::string InstructionId(const HloInstruction* instruction) {
return StrCat(reinterpret_cast<uint64_t>(instruction));
}
std::string SubcomputationId(const HloComputation* computation) {
return StrCat("cluster_", reinterpret_cast<uint64_t>(computation));
}
std::string Header();
std::string Footer();
bool ShouldShowSubcomputation(const HloComputation* subcomp);
bool ShouldShowFusionSubcomputation(const HloInstruction* instr);
bool ShouldMergeIntoUsers(const HloInstruction* instr) const;
std::string DumpSubcomputation(const HloComputation* subcomp,
const HloInstruction* parent_instr);
std::string DumpComputation(const HloComputation* comp);
std::string DumpRootTag();
std::string DumpInstruction(const HloInstruction* instr);
ColorScheme GetInstructionColor(const HloInstruction* instr);
std::string GetInstructionNodeShape(const HloInstruction* instr);
std::string GetInstructionNodeLabel(const HloInstruction* instr);
std::string GetInstructionNodeMetadata(const HloInstruction* instr);
std::string GetInstructionNodeBackendConfig(const HloInstruction* instr);
std::string GetInstructionNodeExtraInfo(const HloInstruction* instr);
std::string GetInstructionNodeInlinedOperands(const HloInstruction* instr);
void AddInstructionIncomingEdges(const HloInstruction* instr);
const HloInstruction* GetNodeForEdge(const HloInstruction* instr);
std::string GetInstructionTrivialComputationStr(const HloInstruction* instr);
const HloComputation* computation_;
const std::string label_;
const DebugOptions& debug_options_;
const HloRenderOptions hlo_render_options_;
const NodeFilter filter_;
const std::optional<absl::flat_hash_map<const HloInstruction*, ColorStats>>
color_map_;
int64_t next_node_id_ = 1;
absl::flat_hash_map<const HloInstruction*, int64_t> node_ids_;
int64_t root_node_id_;
int64_t next_edge_id_ = 1;
std::unordered_multimap<
std::pair<const HloInstruction*, const HloInstruction*>, int64_t,
absl::Hash<std::pair<const HloInstruction*, const HloInstruction*>>>
edge_ids_;
int64_t next_cluster_id_ = 1;
absl::flat_hash_map<const HloComputation*, int64_t> cluster_ids_;
std::vector<std::string> edges_;
absl::flat_hash_map<HloSharding, ColorScheme> sharding_colors_;
int64_t next_shard_color_ = 0;
};
std::string HloDotDumper::Dump() {
std::string body;
StrAppend(&body, DumpComputation(computation_));
StrAppend(&body, DumpRootTag());
std::string g = Header();
StrAppend(&g, body);
StrAppend(&g, Footer());
return g;
}
std::string HloDotDumper::Header() {
constexpr char fmt[] = R"(digraph G {
rankdir = TB;
compound = true;
label = <<b>%s</b>>;
labelloc = t;
tooltip = " ";
stylesheet=<
data:text/css,
@import url(https:
svg text {
font-family: 'Roboto';
font-size: 12px;
}
%s
>
)";
VLOG(3) << "Generating Header";
std::string graph_label =
StrCat(label_, "<br/>Computation ", computation_->name());
if (computation_->IsFusionComputation()) {
StrAppend(&graph_label, " (in fusion instruction ",
computation_->FusionInstruction()->name(), ")");
}
std::vector<std::string> edge_css_rules;
std::string kBlue = "#1976d2";
std::string kRed = "#d32f2f";
for (const auto& kv : edge_ids_) {
const HloInstruction* from_node = kv.first.first;
const HloInstruction* to_node = kv.first.second;
int64_t edge_id = kv.second;
auto add_hover_css_rule = [&](std::string elem_type, int64_t elem_id,
std::string color) {
edge_css_rules.push_back(
StrFormat(" #%s%d:hover ~ #edge%d text { fill: %s; }\n"
" #%s%d:hover ~ #edge%d path { "
"stroke: %s; stroke-width: .2em; }\n"
" #%s%d:hover ~ #edge%d polygon { "
"fill: %s; stroke: %s; stroke-width: .2em; }\n",
elem_type, elem_id, edge_id, color,
elem_type, elem_id, edge_id, color,
elem_type, elem_id, edge_id, color, color));
};
int64_t from_node_id = tsl::gtl::FindWithDefault(node_ids_, from_node, -1);
if (from_node_id == -1) {
LOG(FATAL) << from_node->name() << " was added to edges but not to nodes";
}
int64_t to_node_id = to_node
? tsl::gtl::FindWithDefault(node_ids_, to_node, -1)
: root_node_id_;
if (to_node != nullptr && to_node_id == -1) {
LOG(FATAL) << to_node->name() << " was added to edges but not to nodes";
}
add_hover_css_rule("node", from_node_id, kBlue);
add_hover_css_rule("node", to_node_id, kRed);
if (to_node) {
VLOG(3) << "Adding css for edge " << edge_id << " from node "
<< from_node->name() << " to node " << to_node->name();
} else {
VLOG(3) << "Adding css for edge " << edge_id << " from node "
<< from_node->name() << " to root tag";
}
if (to_node) {
if (from_node->IsFused() &&
from_node->parent()->root_instruction() == from_node) {
int64_t cluster_id = cluster_ids_.at(from_node->parent());
add_hover_css_rule("clust", cluster_id, kBlue);
}
if (to_node->IsFused() && to_node->opcode() == HloOpcode::kParameter) {
int64_t cluster_id = cluster_ids_.at(to_node->parent());
add_hover_css_rule("clust", cluster_id, kRed);
}
}
}
return StrFormat(
fmt, graph_label,
absl::StrReplaceAll(StrJoin(edge_css_rules, "\n"), {{"#", "%23"}}));
}
std::string HloDotDumper::Footer() {
return StrCat(StrJoin(edges_, "\n"), "\n}");
}
bool HloDotDumper::ShouldShowFusionSubcomputation(const HloInstruction* instr) {
CHECK_EQ(instr->opcode(), HloOpcode::kFusion);
return ShouldShowSubcomputation(instr->fused_instructions_computation());
}
bool HloDotDumper::ShouldShowSubcomputation(const HloComputation* subcomp) {
if (subcomp->IsFusionComputation()) {
const HloInstruction* fusion = subcomp->FusionInstruction();
if (!filter_.Show(fusion) || filter_.SomeOrAllOperandsOmitted(fusion) ||
!hlo_render_options_.show_fusion_subcomputations) {
return false;
}
}
if (!subcomp->IsFusionComputation() && MatchTrivialComputation(subcomp)) {
return false;
}
if (subcomp->WhileCallInstruction() != nullptr &&
!hlo_render_options_.show_while_subcomputations) {
return false;
}
return absl::c_any_of(
subcomp->instructions(),
[&](const HloInstruction* instr) { return filter_.Show(instr); });
}
std::string HloDotDumper::DumpSubcomputation(
const HloComputation* subcomp, const HloInstruction* parent_instr) {
VLOG(2) << "Dumping subcomputation " << subcomp->name();
if (parent_instr->opcode() != HloOpcode::kFusion) {
const HloInstruction* from = GetNodeForEdge(subcomp->root_instruction());
VLOG(2) << "Edge: from " << from->name() << " to " << parent_instr->name()
<< " as " << next_edge_id_;
edge_ids_.insert({{from, parent_instr}, next_edge_id_++});
constexpr char edge_fmt[] =
R"(%s -> %s [ltail="%s", style="dashed" tooltip="%s -> %s"];)";
edges_.push_back(StrFormat(
edge_fmt, InstructionId(from), InstructionId(parent_instr),
SubcomputationId(subcomp), subcomp->name(), parent_instr->name()));
}
if (cluster_ids_.find(subcomp) != cluster_ids_.end()) {
return "";
}
cluster_ids_[subcomp] = next_cluster_id_++;
std::string id = SubcomputationId(subcomp);
std::string subcomp_label, style;
if (parent_instr->opcode() == HloOpcode::kFusion) {
subcomp_label =
StrFormat("Fused expression for <b>%s</b><br/>%s",
HtmlLikeStringSanitize(parent_instr->name()),
HtmlLikeStringSanitize(parent_instr->ToCategory()));
std::string extra_info = GetInstructionNodeExtraInfo(parent_instr);
if (!extra_info.empty()) {
StrAppend(&subcomp_label, "<br/>", extra_info);
}
std::string node_backend_config =
GetInstructionNodeBackendConfig(parent_instr);
if (!node_backend_config.empty()) {
StrAppend(&subcomp_label, "<br/>", node_backend_config);
}
bool highlight = filter_.Highlight(parent_instr);
std::string fillcolor;
std::string strokecolor;
if (!highlight && (parent_instr->module_has_statistics() ||
parent_instr->has_statistics())) {
fillcolor = parent_instr->has_statistics()
? NodeFillColorForStatistic(
parent_instr->statistic_to_visualize())
: "#f5f5f5";
strokecolor = "#c2c2c2";
} else if (debug_options_.xla_hlo_graph_sharding_color() && !highlight) {
NodeColors node_colors =
NodeColorsForScheme(GetInstructionColor(parent_instr));
fillcolor = node_colors.fill_color;
strokecolor = node_colors.stroke_color;
} else {
fillcolor = highlight ? "#ffcdd2" : "#f5f5f5";
strokecolor = highlight ? "#b71c1c" : "#c2c2c2";
}
style =
StrFormat(R"(style="rounded,filled,bold"; fillcolor="%s"; color="%s;")",
fillcolor, strokecolor);
} else {
subcomp_label = StrFormat("Subcomputation for <b>%s</b><br/>%s",
HtmlLikeStringSanitize(parent_instr->name()),
HtmlLikeStringSanitize(subcomp->name()));
style = "style=rounded; color=black;";
}
std::string comp_body = DumpComputation(subcomp);
constexpr char computation_fmt[] = R"(subgraph %s {
%s
label = <%s>;
labelloc = t;
tooltip = " ";
%s
}
)";
return StrFormat(computation_fmt, id, style, subcomp_label, comp_body, id);
}
std::string HloDotDumper::DumpComputation(const HloComputation* comp) {
std::string g;
for (const auto* instr : comp->instructions()) {
if (!filter_.Show(instr)) {
continue;
}
for (const HloComputation* subcomp : instr->called_computations()) {
if (ShouldShowSubcomputation(subcomp)) {
StrAppend(&g, DumpSubcomputation(subcomp, instr));
}
}
StrAppend(&g, DumpInstruction(instr));
}
return g;
}
std::string HloDotDumper::DumpRootTag() {
const HloInstruction* from = GetNodeForEdge(computation_->root_instruction());
if (!filter_.Show(from) || from->opcode() == HloOpcode::kConstant ||
IsFusedBroadcastOfConstantEffectiveScalar(from)) {
return "";
}
auto from_id = InstructionId(from);
HloInstruction* to = nullptr;
auto to_id = SubcomputationId(computation_);
std::string node_body = "ROOT";
std::string node_shape = "circle";
ColorScheme color = kBrown;
VLOG(2) << "Adding root tag as node " << next_node_id_;
root_node_id_ = next_node_id_++;
VLOG(2) << "Adding edge from " << from->name() << " to root tag as "
<< next_edge_id_;
edge_ids_.insert({{from, to}, next_edge_id_++});
edges_.push_back(StrFormat(R"(%s -> %s [tooltip=" "];)", from_id, to_id));
return StrFormat(R"(%s [label=<%s>, shape=%s, tooltip=" ", %s];)"
"\n",
to_id, node_body, node_shape, NodeColorAttributes(color));
}
static const HloConstantInstruction* TryGetFusionParameterConstant(
const HloInstruction* instr) {
if (instr->opcode() != HloOpcode::kParameter || !instr->IsFused()) {
return nullptr;
}
const HloInstruction* fusion = instr->parent()->FusionInstruction();
const HloInstruction* operand = fusion->operand(instr->parameter_number());
return DynCast<HloConstantInstruction>(operand);
}
bool HloDotDumper::ShouldMergeIntoUsers(const HloInstruction* instr) const {
if ((instr->opcode() == HloOpcode::kGetTupleElement &&
instr != instr->parent()->root_instruction()) ||
TryGetFusionParameterConstant(instr) != nullptr) {
return true;
}
const int kMinUsersToOmit = 3;
return instr->opcode() == HloOpcode::kParameter && instr->shape().IsTuple() &&
!instr->IsFused() &&
absl::c_count_if(instr->users(),
[&](const HloInstruction* user) {
return filter_.Show(user);
}) > kMinUsersToOmit &&
absl::c_all_of(instr->users(), [&](const HloInstruction* user) {
return !filter_.Show(user) ||
user->opcode() == HloOpcode::kGetTupleElement;
});
}
std::string HloDotDumper::DumpInstruction(const HloInstruction* instr) {
if ((instr->opcode() == HloOpcode::kConstant ||
IsFusedBroadcastOfConstantEffectiveScalar(instr)) &&
instr != instr->parent()->root_instruction()) {
return "";
}
if (ShouldMergeIntoUsers(instr)) {
return "";
}
if (instr->opcode() == HloOpcode::kFusion &&
ShouldShowFusionSubcomputation(instr)) {
return "";
}
VLOG(2) << "Adding node " << instr->name() << " as " << next_node_id_;
node_ids_[instr] = next_node_id_++;
std::string node_shape = GetInstructionNodeShape(instr);
std::string node_label = GetInstructionNodeLabel(instr);
std::string node_metadata = GetInstructionNodeMetadata(instr);
std::string node_backend_config = GetInstructionNodeBackendConfig(instr);
std::string extra_info = GetInstructionNodeExtraInfo(instr);
std::string inlined_constants = GetInstructionNodeInlinedOperands(instr);
std::string trivial_subcomputation =
GetInstructionTrivialComputationStr(instr);
AddInstructionIncomingEdges(instr);
NodeColors node_colors;
std::string node_style;
std::string node_attributes;
if (hlo_render_options_.override_node_colors && color_map_.has_value()) {
if (color_map_->contains(instr)) {
node_colors.fill_color = color_map_->at(instr).color;
node_attributes = color_map_->at(instr).stats;
} else {
VLOG(2) << "color_map_ for instruction:" << instr->name() << "is empty"
<< "\n";
node_colors.fill_color = "#808080";
}
node_colors.style = "filled";
node_colors.font_color = "black";
node_colors.stroke_color = "#c2c2c2";
node_style =
StrFormat(R"(style="%s", fontcolor="%s", color="%s", fillcolor="%s")",
node_colors.style, node_colors.font_color,
node_colors.stroke_color, node_colors.fill_color);
} else {
ColorScheme color = GetInstructionColor(instr);
if (!debug_options_.xla_hlo_graph_sharding_color()) {
if (filter_.Deemphasized(instr)) {
color = kDashedBorder;
}
if (filter_.Highlight(instr)) {
node_shape = "diamond";
color = kDarkRed;
}
}
node_colors = NodeColorsForScheme(color);
if (instr->has_statistics()) {
const auto& statistic_to_visualize = instr->statistic_to_visualize();
node_colors.fill_color =
NodeFillColorForStatistic(statistic_to_visualize);
node_colors.stroke_color = "#c2c2c2";
node_colors.font_color =
NodeFontColorForStatistic(statistic_to_visualize);
} else if (instr->module_has_statistics()) {
node_colors.fill_color = "#f5f5f5";
node_colors.stroke_color = "#c2c2c2";
node_colors.font_color = "black";
}
node_style =
StrFormat(R"(style="%s", fontcolor="%s", color="%s", fillcolor="%s")",
node_colors.style, node_colors.font_color,
node_colors.stroke_color, node_colors.fill_color);
}
std::string node_body = node_label;
for (const std::string& s :
{trivial_subcomputation, extra_info, inlined_constants,
node_backend_config, node_attributes}) {
if (!s.empty()) {
StrAppend(&node_body, "<br/>", s);
}
}
return StrFormat(R"(%s [label=<%s>, shape=%s, tooltip="%s", %s];)"
"\n",
InstructionId(instr), node_body, node_shape, node_metadata,
node_style);
}
std::string HloDotDumper::GetInstructionNodeInlinedOperands(
const HloInstruction* instr) {
auto stringify_constant = [](const HloConstantInstruction* constant,
const Shape& shape) {
if (ShapeUtil::IsZeroElementArray(shape)) {
return StrFormat("{} (%s)", ShapeUtil::HumanString(constant->shape()));
}
optional<int64_t> elem_count;
if (shape.IsArray()) {
elem_count = ShapeUtil::ElementsIn(constant->shape());
}
if (elem_count.has_value() && *elem_count <= 8 && constant->HasLiteral()) {
std::string literal_str = constant->literal().ToStringWithoutShape();
if (literal_str.size() <= 64) {
return StrFormat("%s %s", shape.ToString(), literal_str);
}
}
std::string constant_name;
if (absl::StartsWith(constant->name(), "constant")) {
constant_name = std::string(constant->name());
} else {
constant_name = StrCat("constant ", constant->name());
}
return StrFormat("%s %s", constant_name, ShapeUtil::HumanString(shape));
};
std::vector<std::string> lines;
constexpr int64_t kMaxOperandsShown = 32;
for (int64_t i = 0; i < instr->operand_count(); ++i) {
const HloInstruction* operand = instr->operand(i);
optional<std::string> operand_str;
if (const auto* constant_operand =
DynCast<HloConstantInstruction>(operand)) {
operand_str =
stringify_constant(constant_operand, constant_operand->shape());
} else if (IsFusedBroadcastOfConstantEffectiveScalar(operand)) {
operand_str = stringify_constant(
Cast<HloConstantInstruction>(operand->operand(0)), operand->shape());
} else if (ShouldMergeIntoUsers(operand)) {
if (operand->opcode() == HloOpcode::kParameter) {
if (const HloConstantInstruction* constant =
TryGetFusionParameterConstant(operand)) {
operand_str = stringify_constant(constant, constant->shape());
} else {
operand_str = StrFormat("Parameter %d", operand->parameter_number());
}
} else if (operand->opcode() == HloOpcode::kGetTupleElement) {
operand_str =
StrFormat("tuple-element %d of %s %s", operand->tuple_index(),
operand->operand(0)->name(),
ShapeUtil::HumanStringWithLayout(operand->shape()));
} else {
operand_str = std::string(operand->name());
}
}
if (operand_str) {
if (instr->operand_count() > 1) {
lines.push_back(StrFormat("<b>operand %d</b> = %s", i, *operand_str));
} else {
lines.push_back(StrFormat("<b>operand</b> = %s", *operand_str));
}
}
if (lines.size() == kMaxOperandsShown && i < instr->operand_count() - 1) {
lines.push_back("...");
break;
}
}
if (instr->opcode() == HloOpcode::kParameter && instr->IsFused()) {
const HloInstruction* param_input =
instr->parent()->FusionInstruction()->operand(
instr->parameter_number());
if (param_input->opcode() == HloOpcode::kGetTupleElement) {
lines.push_back(
StrFormat("tuple-element %d of %s %s", param_input->tuple_index(),
param_input->operand(0)->name(),
ShapeUtil::HumanStringWithLayout(param_input->shape())));
}
}
return StrJoin(lines, "<br/>");
}
ColorScheme HloDotDumper::GetInstructionColor(const HloInstruction* instr) {
if (debug_options_.xla_hlo_graph_sharding_color()) {
if (!instr->has_sharding()) {
return kDashedBorder;
}
auto it = sharding_colors_.find(instr->sharding());
if (it != sharding_colors_.end()) {
return it->second;
}
ColorScheme color = static_cast<ColorScheme>(
kBlue + (next_shard_color_++ % (kDashedBorder - kBlue)));
sharding_colors_.emplace(instr->sharding(), color);
return color;
}
auto parameter_color = IsSmall(instr) ? kOrange : kDarkOrange;
if (absl::c_any_of(instr->operands(), [&](const HloInstruction* operand) {
return operand->opcode() == HloOpcode::kParameter &&
ShouldMergeIntoUsers(operand) &&
TryGetFusionParameterConstant(operand) == nullptr;
})) {
return parameter_color;
}
switch (instr->opcode()) {
case HloOpcode::kAbs:
case HloOpcode::kAdd:
case HloOpcode::kAnd:
case HloOpcode::kAtan2:
case HloOpcode::kBitcastConvert:
case HloOpcode::kCeil:
case HloOpcode::kClamp:
case HloOpcode::kClz:
case HloOpcode::kCompare:
case HloOpcode::kComplex:
case HloOpcode::kConvert:
case HloOpcode::kCos:
case HloOpcode::kDivide:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kFloor:
case HloOpcode::kImag:
case HloOpcode::kIota:
case HloOpcode::kIsFinite:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kMultiply:
case HloOpcode::kNegate:
case HloOpcode::kNot:
case HloOpcode::kPopulationCount:
case HloOpcode::kOr:
case HloOpcode::kXor:
case HloOpcode::kPower:
case HloOpcode::kReal:
case HloOpcode::kReducePrecision:
case HloOpcode::kRemainder:
case HloOpcode::kRng:
case HloOpcode::kRngGetAndUpdateState:
case HloOpcode::kRngBitGenerator:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kRsqrt:
case HloOpcode::kSelect:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical:
case HloOpcode::kStochasticConvert:
case HloOpcode::kLogistic:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kSlice:
case HloOpcode::kSort:
case HloOpcode::kTopK:
case HloOpcode::kSqrt:
case HloOpcode::kCbrt:
case HloOpcode::kSubtract:
case HloOpcode::kTan:
case HloOpcode::kTanh:
return kWhite;
case HloOpcode::kAddDependency:
case HloOpcode::kAfterAll:
case HloOpcode::kGetTupleElement:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kPad:
case HloOpcode::kTuple:
return kWhite;
case HloOpcode::kConstant:
return kWhite;
case HloOpcode::kBroadcast:
case HloOpcode::kDynamicUpdateSlice:
return kYellow;
case HloOpcode::kConcatenate:
case HloOpcode::kDynamicSlice:
case HloOpcode::kReshape:
case HloOpcode::kDynamicReshape:
case HloOpcode::kReverse:
case HloOpcode::kTranspose:
return kGreen;
case HloOpcode::kCopy:
case HloOpcode::kCopyStart:
case HloOpcode::kCopyDone:
return kGreen;
case HloOpcode::kBitcast:
if (!instr->IsFused()) {
return kWhite;
}
return kGreen;
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncDone:
return GetInstructionColor(instr->async_wrapped_instruction());
case HloOpcode::kConvolution:
case HloOpcode::kDot:
case HloOpcode::kFft:
case HloOpcode::kTriangularSolve:
case HloOpcode::kCholesky:
return kDarkBlue;
case HloOpcode::kParameter:
return parameter_color;
case HloOpcode::kBatchNormGrad:
case HloOpcode::kBatchNormInference:
case HloOpcode::kBatchNormTraining:
case HloOpcode::kReduce:
case HloOpcode::kReduceWindow:
case HloOpcode::kScatter:
case HloOpcode::kSelectAndScatter:
case HloOpcode::kGather:
return kPurple;
case HloOpcode::kDomain:
case HloOpcode::kFusion:
case HloOpcode::kMap:
case HloOpcode::kGetDimensionSize:
case HloOpcode::kSetDimensionSize:
return kGray;
case HloOpcode::kAllGather:
case HloOpcode::kAllGatherStart:
case HloOpcode::kAllGatherDone:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllReduceDone:
case HloOpcode::kAllToAll:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kInfeed:
case HloOpcode::kOutfeed:
case HloOpcode::kPartitionId:
case HloOpcode::kRecv:
case HloOpcode::kRecvDone:
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kReplicaId:
return kBrown;
case HloOpcode::kCall:
case HloOpcode::kConditional:
case HloOpcode::kCustomCall:
case HloOpcode::kWhile:
return kDarkGreen;
}
}
std::string HloDotDumper::GetInstructionNodeShape(const HloInstruction* instr) {
switch (instr->opcode()) {
case HloOpcode::kWhile:
return "ellipse";
default:
return "rect";
}
}
std::string HloDotDumper::GetInstructionNodeLabel(const HloInstruction* instr) {
if (instr->opcode() == HloOpcode::kParameter) {
return StrFormat("<b>Parameter %d</b>", instr->parameter_number());
}
if (absl::StartsWith(instr->name(), HloOpcodeString(instr->opcode()))) {
return StrFormat("<b>%s</b>", HtmlLikeStringSanitize(instr->name()));
}
std::string extended_opcode =
StrCat(HloOpcodeString(instr->opcode()),
instr->opcode() != HloOpcode::kFusion
? ""
: StrCat(":", xla::ToString(instr->fusion_kind())));
return StrFormat("<b>%s</b><br/>%s", HtmlLikeStringSanitize(instr->name()),
HtmlLikeStringSanitize(extended_opcode));
}
std::string HloDotDumper::GetInstructionNodeMetadata(
const HloInstruction* instr) {
std::vector<std::string> lines;
if (!instr->metadata().op_name().empty()) {
lines.push_back(HtmlLikeStringSanitize(instr->metadata().op_name()));
}
if (!instr->metadata().op_type().empty()) {
lines.push_back(StrFormat(
"op_type: %s", HtmlLikeStringSanitize(instr->metadata().op_type())));
}
if (!instr->metadata().source_file().empty() &&
instr->metadata().source_line() != 0) {
lines.push_back(StrFormat("source: %s:%d", instr->metadata().source_file(),
instr->metadata().source_line()));
}
if (instr->metadata().stack_frame_id() != 0) {
auto hlo_module = instr->parent()->parent();
int frame_id = instr->metadata().stack_frame_id();
while (frame_id != 0) {
HloModule::StackFrame frame = hlo_module->get_stack_frame(frame_id);
if (frame.empty()) {
break;
}
frame_id = frame.parent_frame_id;
lines.push_back(StrFormat(
"%s:%s:%d%s", frame.file_name, frame.function_name, frame.line,
frame.column == 0 ? "" : StrFormat(":%d", frame.column)));
}
}
return StrJoin(lines, "\n");
}
static std::vector<std::pair<std::string, std::string>>
ExtractCudnnConvBackendConfigProps(const gpu::CudnnConvBackendConfig& config) {
std::vector<std::pair<std::string, std::string>> props;
if (config.conv_result_scale() != 1) {
props.emplace_back("conv_result_scale", StrCat(config.conv_result_scale()));
}
if (config.side_input_scale() != 0 && config.side_input_scale() != 1) {
props.emplace_back("side_input_scale", StrCat(config.side_input_scale()));
}
if (config.activation_mode() == se::dnn::ActivationMode::kLeakyRelu) {
props.emplace_back("leakyrelu_alpha", StrCat(config.leakyrelu_alpha()));
}
props.emplace_back(
"activation_mode",
se::dnn::ActivationModeString(
static_cast<se::dnn::ActivationMode>(config.activation_mode())));
props.emplace_back("algo",
se::dnn::AlgorithmDesc(config.algorithm()).ToString());
return props;
}
static std::vector<std::pair<std::string, std::string>>
ExtractGemmBackendConfigProps(const gpu::GemmBackendConfig& config,
const HloInstruction* instr) {
std::vector<std::pair<std::string, std::string>> props;
if (primitive_util::IsComplexType(instr->shape().element_type())) {
if (config.alpha_real() != 1 || config.alpha_imag() != 1) {
props.emplace_back("alpha_real", StrCat(config.alpha_real()));
props.emplace_back("alpha_imag", StrCat(config.alpha_real()));
}
} else {
if (config.alpha_real() != 1) {
props.emplace_back("alpha", StrCat(config.alpha_real()));
}
}
if (config.beta() != 0 && config.beta() != 1) {
props.emplace_back("beta", StrCat(config.beta()));
}
props.emplace_back(
"", absl::StrReplaceAll(
DotDimensionNumbersToString(config.dot_dimension_numbers()),
{{", ", "<br/>"}}));
if (config.algorithm_case() == gpu::GemmBackendConfig::kSelectedAlgorithm) {
props.emplace_back("algorithm", StrCat(config.selected_algorithm()));
}
if (config.epilogue() != gpu::GemmBackendConfig::DEFAULT) {
props.emplace_back(
"epilogue", gpu::GemmBackendConfig::Epilogue_Name(config.epilogue()));
}
return props;
}
std::string HloDotDumper::GetInstructionNodeBackendConfig(
const HloInstruction* instr) {
std::vector<std::pair<std::string, std::string>> props;
if (gpu::IsCustomCallToDnnConvolution(*instr)) {
absl::StatusOr<gpu::GpuBackendConfig> config =
instr->backend_config<gpu::GpuBackendConfig>();
if (config.ok()) {
props = ExtractCudnnConvBackendConfigProps(
config->cudnn_conv_backend_config());
}
} else if (gpu::IsCublasGemm(*instr)) {
absl::StatusOr<gpu::GpuBackendConfig> config =
instr->backend_config<gpu::GpuBackendConfig>();
if (config.ok()) {
props =
ExtractGemmBackendConfigProps(config->gemm_backend_config(), instr);
}
}
if (!props.empty()) {
return StrCat((props.size() > 1 ? "<br/>" : ""),
StrJoin(props, "<br/>",
[](std::string* out,
const std::pair<std::string, std::string>& kv) {
if (!kv.first.empty()) {
return StrAppend(out, kv.first, "=", kv.second);
}
StrAppend(out, kv.second);
}));
}
if (!hlo_render_options_.show_backend_config ||
instr->raw_backend_config_string().empty()) {
return "";
}
return StrCat("backend_config=\"", instr->raw_backend_config_string(), "\"");
}
std::string HloDotDumper::GetInstructionNodeExtraInfo(
const HloInstruction* instr) {
std::vector<std::string> lines;
for (const auto& line : instr->ExtraAttributesToString(
HloPrintOptions().set_print_subcomputation_mode(
HloPrintOptions::PrintSubcomputationMode::kOff))) {
constexpr int kMaxDeviceIdFieldLen = 128;
if ((absl::StartsWith(line, "replica_groups=") ||
absl::StartsWith(line, "source_target_pairs=") ||
absl::StartsWith(line, "control-predecessors=")) &&
line.length() > kMaxDeviceIdFieldLen) {
lines.push_back(HtmlLikeStringSanitize(
StrCat(line.substr(0, kMaxDeviceIdFieldLen - 3), "...")));
} else if (absl::StartsWith(line, "feature_group_count=")) {
lines.push_back(StrFormat("<b>%s</b>", HtmlLikeStringSanitize(line)));
} else {
lines.push_back(HtmlLikeStringSanitize(line));
}
}
if (instr->opcode() != HloOpcode::kFusion ||
!ShouldShowFusionSubcomputation(instr)) {
bool shape_is_multidim = false;
ShapeUtil::ForEachSubshape(instr->shape(),
[&](const Shape& s, const ShapeIndex&) {
shape_is_multidim |= s.dimensions_size() > 1;
});
std::string instr_shape;
if (instr->opcode() != HloOpcode::kTuple && shape_is_multidim) {
instr_shape = ShapeUtil::HumanStringWithLayout(instr->shape());
} else {
instr_shape = ShapeUtil::HumanString(instr->shape());
}
constexpr int kMaxShapeLen = 64;
if (instr_shape.length() > kMaxShapeLen) {
instr_shape = StrCat(
absl::string_view(instr_shape).substr(0, kMaxShapeLen - 3), "...");
}
lines.push_back(HtmlLikeStringSanitize(instr_shape));
}
if (debug_options_.xla_hlo_graph_addresses()) {
lines.push_back(StrFormat("[%p]", instr));
}
return StrJoin(lines, "<br/>");
}
void HloDotDumper::AddInstructionIncomingEdges(const HloInstruction* instr) {
constexpr int kMaxEdgesBetweenTwoNodes = 64;
auto add_edge = [&](const HloInstruction* from, const HloInstruction* to,
int64_t operand_num, bool control_edge = false) {
if (edge_ids_.count({from, to}) > kMaxEdgesBetweenTwoNodes) {
return;
}
from = GetNodeForEdge(from);
if (!filter_.Show(from) || from->opcode() == HloOpcode::kConstant ||
IsFusedBroadcastOfConstantEffectiveScalar(from) ||
ShouldMergeIntoUsers(from)) {
return;
}
VLOG(2) << "Adding edge from " << from->name() << " to " << to->name()
<< " as " << next_edge_id_;
edge_ids_.insert({{from, to}, next_edge_id_++});
std::string edge_label;
if (control_edge) {
edge_label = "style=\"dotted\" color=\"gray\" label=\"ctrl\"";
} else if (instr->operand_count() > 1) {
edge_label =
StrFormat(R"( headlabel="%d", labeldistance=2)", operand_num);
}
constexpr char kEdgeFmt[] =
R"(%s -> %s [arrowhead=%s tooltip="%s -> %s" %s];)";
edges_.push_back(StrFormat(kEdgeFmt, InstructionId(from), InstructionId(to),
(IsSmall(from) ? "empty" : "normal"),
from->name(), to->name(), edge_label));
};
if (instr->opcode() == HloOpcode::kParameter && instr->IsFused()) {
if (instr->parent() != computation_) {
const HloInstruction* fusion = instr->parent()->FusionInstruction();
add_edge(fusion->operand(instr->parameter_number()), instr,
0);
}
} else {
for (int64_t i = 0; i < instr->operand_count(); ++i) {
add_edge(instr->operand(i), instr, i);
}
for (const HloInstruction* pred : instr->control_predecessors()) {
add_edge(pred, instr, 0, true);
}
}
}
std::string HloDotDumper::GetInstructionTrivialComputationStr(
const HloInstruction* instr) {
if (instr->opcode() == HloOpcode::kFusion) {
return "";
}
std::vector<std::string> lines;
for (int64_t i = 0; i < instr->called_computations().size(); ++i) {
optional<std::string> computation_type =
MatchTrivialComputation(instr->called_computations()[i]);
if (!computation_type) {
continue;
}
if (instr->called_computations().size() == 1) {
lines.push_back(StrFormat("Subcomputation: <b>%s</b>",
HtmlLikeStringSanitize(*computation_type)));
} else {
lines.push_back(StrFormat("Subcomputation %d: <b>%s</b>", i,
HtmlLikeStringSanitize(*computation_type)));
}
}
return StrJoin(lines, "<br/>");
}
const HloInstruction* HloDotDumper::GetNodeForEdge(
const HloInstruction* instr) {
if (instr->opcode() == HloOpcode::kGetTupleElement) {
instr = instr->operand(0);
}
while (instr->opcode() == HloOpcode::kFusion &&
ShouldShowFusionSubcomputation(instr)) {
instr = instr->fused_expression_root();
}
return instr;
}
NodeFilter MakeNodeRadiusAroundFilter(
const HloInstruction* root, int64_t radius,
const absl::flat_hash_set<const HloInstruction*>& boundary) {
absl::flat_hash_map<const HloInstruction*, NodeFilterResult> nodes;
std::deque<std::pair<const HloInstruction*, int64_t>> worklist;
worklist.push_back({root, 0});
while (!worklist.empty()) {
const HloInstruction* instr;
int64_t depth;
std::tie(instr, depth) = worklist.front();
worklist.pop_front();
nodes[instr] = kNormalNode;
if (depth == radius) {
continue;
}
if (boundary.contains(instr)) {
continue;
}
if (instr == root || instr->opcode() != HloOpcode::kTuple) {
for (const HloInstruction* operand : instr->operands()) {
if (!nodes.contains(operand)) {
int new_depth = (operand->opcode() == HloOpcode::kBitcast ||
instr->opcode() == HloOpcode::kBitcast)
? depth
: depth + 1;
worklist.push_back({operand, new_depth});
}
}
}
for (const HloComputation* computation : instr->called_computations()) {
worklist.push_back({computation->root_instruction(), depth + 1});
}
if (instr->opcode() == HloOpcode::kConstant) {
continue;
}
constexpr int kMaxUsersToRender = 16;
if (instr->user_count() > kMaxUsersToRender) {
nodes[instr] = kSomeUsersOmitted;
continue;
}
for (const HloInstruction* user : instr->users()) {
if (!nodes.contains(user)) {
worklist.push_back({user, depth + 1});
}
}
}
auto is_displayed = [&](const HloInstruction* instr) {
return nodes.contains(instr) || instr->opcode() == HloOpcode::kConstant ||
instr->parent() != root->parent();
};
for (auto& kv : nodes) {
const HloInstruction* instr = kv.first;
NodeFilterResult& filter_result = kv.second;
const auto& operands = instr->operands();
if (absl::c_any_of(operands, is_displayed) &&
!absl::c_all_of(operands, is_displayed)) {
filter_result = kSomeOperandsOmitted;
} else if (!operands.empty() && absl::c_none_of(operands, is_displayed)) {
filter_result = kOmitNodeOperands;
}
if (filter_result == kSomeUsersOmitted &&
absl::c_all_of(instr->users(), is_displayed)) {
filter_result = kNormalNode;
}
}
nodes[root] = kHighlightNode;
return NodeFilter(
[=](const HloInstruction* instr) {
auto it = nodes.find(instr);
if (it != nodes.end()) {
return it->second;
}
if (instr->parent() != root->parent()) {
return kNormalNode;
}
return kHideNode;
},
nodes.size());
}
NodeFilter MakeNodeFromToFilter(const HloInstruction* from,
const HloInstruction* to, int64_t max_nodes,
bool* hit_limit) {
*hit_limit = false;
std::deque<std::vector<const HloInstruction*>> queue;
queue.push_front({from});
absl::flat_hash_set<const HloInstruction*> visited;
absl::flat_hash_set<const HloInstruction*> to_display = {from, to};
while (!queue.empty() && to_display.size() < max_nodes) {
std::vector<const HloInstruction*> path = std::move(queue.front());
queue.pop_front();
if (!visited.insert(path.back()).second) {
continue;
}
for (const auto* user : path.back()->users()) {
if (user == to) {
auto it = path.begin();
for (; it != path.end() && to_display.size() < max_nodes; ++it) {
to_display.insert(*it);
}
if (it != path.end()) {
*hit_limit = true;
}
} else if (!visited.count(user)) {
auto new_path = path;
new_path.push_back(user);
queue.push_back(std::move(new_path));
}
}
}
return NodeFilter([=](const HloInstruction* instr) {
if (instr == from || instr == to) {
return kHighlightNode;
}
return to_display.count(instr) ? kNormalNode : kHideNode;
});
}
absl::Mutex url_renderer_mu(absl::kConstInit);
std::function<absl::StatusOr<std::string>(absl::string_view)>* url_renderer
ABSL_GUARDED_BY(url_renderer_mu) = nullptr;
absl::Mutex fusion_visualizer_state_mu(absl::kConstInit);
namespace {
struct FusionVisualizerProgress {
void AddState(absl::string_view dot, absl::string_view explanation,
std::optional<std::string> to_highlight) {
if (dot_graphs.empty() || dot_graphs.back() != dot) {
dot_graphs.push_back(std::string(dot));
}
frames.push_back({static_cast<int>(dot_graphs.size() - 1),
std::string(explanation), to_highlight.value_or("")});
}
std::vector<std::string> dot_graphs;
struct FusionFrame {
int dot_graph;
std::string label;
std::string to_highlight;
};
std::vector<FusionFrame> frames;
};
}
static auto& fusion_visualizer_states
TF_GUARDED_BY(fusion_visualizer_state_mu) = *new absl::flat_hash_map<
std::pair<int64_t, int64_t>, FusionVisualizerProgress>();
static std::pair<int, int> FusionVisualizerStateKey(
const HloComputation& computation) {
return std::make_pair(computation.parent()->unique_id(),
computation.unique_id());
}
}
static absl::StatusOr<std::string> CompressAndEncode(absl::string_view input) {
class WritableStringFile : public tsl::WritableFile {
public:
explicit WritableStringFile(std::string* data) : data_(data){};
~WritableStringFile() override = default;
absl::Status Append(absl::string_view data) override {
absl::StrAppend(data_, data);
return absl::OkStatus();
}
absl::Status Close() override { return absl::OkStatus(); }
absl::Status Flush() override { return absl::OkStatus(); }
absl::Status Sync() override { return absl::OkStatus(); }
private:
std::string* data_;
};
std::string compressed;
WritableStringFile f(&compressed);
auto gz_opts = tsl::io::ZlibCompressionOptions::GZIP();
tsl::io::ZlibOutputBuffer gz_file(&f, gz_opts.input_buffer_size,
gz_opts.output_buffer_size, gz_opts);
TF_RETURN_IF_ERROR(gz_file.Init());
TF_RETURN_IF_ERROR(gz_file.Append(input));
TF_RETURN_IF_ERROR(gz_file.Close());
std::string encoded;
TF_RETURN_IF_ERROR(tsl::Base64Encode(compressed, &encoded));
return absl::StrReplaceAll(encoded, {{"_", "/"}, {"-", "+"}});
}
static std::string EscapeJSONString(absl::string_view raw) {
return absl::StrCat(
"\"",
absl::StrReplaceAll(raw, {{"\n", "\\n"}, {"\"", "\\\""}, {"\\", "\\\\"}}),
"\"");
}
absl::StatusOr<std::string> WrapFusionExplorer(
const FusionVisualizerProgress& visualizer_progress,
absl::string_view graph_title) {
if (visualizer_progress.frames.empty()) {
return Internal("Empty");
}
std::string dot_graphs =
StrFormat("[%s]", StrJoin(visualizer_progress.dot_graphs, ", ",
[&](std::string* out, const std::string& dot) {
StrAppend(out, EscapeJSONString(dot));
}));
std::string frames = StrJoin(
visualizer_progress.frames, ", ", [&](std::string* out, const auto& p) {
StrAppend(out, StrFormat("[%d, %s, %s]", p.dot_graph,
EscapeJSONString(p.label),
EscapeJSONString(p.to_highlight)));
});
TF_ASSIGN_OR_RETURN(std::string dot_graphs_compressed,
CompressAndEncode(dot_graphs));
return absl::StrReplaceAll(
R"wrapper(
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<style>
html, body {height: 100%; text-align: center;}
#rendered {height: 70%; width: 80%; border:1px solid black; margin: auto; }
#label {width: 80%; margin: auto;}
#performance_note { font-size: small; color: gray; }
#frames_list {
list-style: none; text-align: left; height: 20%; overflow: scroll;
}
#frames_list li { padding: 0.2em; margin: 0.2em; }
.selected { background-color: #e0e0e0; }
.selected a { color: black; text-decoration: none; }
#rendered svg { height: 100% !important; width: 100% !important; }
</style>
</head>
<body>
<script src="https:
integrity="sha384-LigJPbR3TOfU/Xbb+PjiN1dGJYPweLk7kiGnaMgmxnUmKWaCFKbb5tH6iLlyVhPZ"
crossorigin="anonymous"></script>
<script src="https:
</script>
<title>Fusion Explorer: $TITLE</title>
<div id='rendered'><center>Loading...</center></div>
<ul id='frames_list'></ul>
<p>Use j/k for keyboard navigation.</p>
<p id='performance_note'>Loading data...</p>
<script>
<!--
const renderCache = {};
const cssregex = new RegExp('stylesheet=<([^]*)\n>\n', 'gm');
const hpccWasm = window["@hpcc-js/wasm"];
const getIdFromHash = () => {
let hash = window.location.hash;
if (hash.indexOf('frame') == -1) {
return 0;
}
return parseInt(window.location.hash.substring('#frame'.length, window.location.hash.length));
}
const renderCurrentFrame = () => {
if (!window.loaded) { return; }
const frames_list = document.getElementById('frames_list');
const currId = getIdFromHash();
for (let selected of frames_list.getElementsByClassName('selected')) {
selected.classList.remove('selected');
}
const selected = frames_list.children[currId];
selected.classList.add('selected');
selected.scrollIntoView();
const frame = frames[currId];
const dot_ptr = frame[0];
let dot_txt = window.dots[dot_ptr];
const label = frame[1];
document.getElementById('performance_note').innerText = "Rendering...";
const results = cssregex.exec(dot_txt)
let css_data = ''
if (results !== null) {
css_data = results[1].replace(/\s*data:.*\s*,/,'');
css_data = unescape(css_data);
dot_txt = dot_txt.replace(cssregex, '');
}
let render_start = performance.now();
const render_callback = svg => {
renderCache[dot_ptr] = svg;
var area = document.getElementById('rendered');
area.innerHTML = `${svg}<style>${css_data}</style>`;
var panzoom = svgPanZoom(area.children[0], {
zoomEnabled: true, controlIconsEnabled: true, maxZoom: 200, });
var to_highlight = frame[2].length ?
document.querySelector(`${frame[2]}`) : null;
if (to_highlight) {
to_highlight.style.setProperty('fill', 'red');
}
document.getElementById('performance_note').innerText =
`Rendering took ${(performance.now() - render_start).toFixed(2)}ms`;
let text_nodes = document.getElementsByTagName("text");
for (var el of text_nodes) {
if (title_to_id.has(el.innerHTML)) {
el.style.cursor = "pointer";
}
}
};
if (renderCache[dot_ptr]) {
render_callback(renderCache[dot_ptr]);
} else {
hpccWasm.graphviz.layout(dot_txt, "svg", "dot").then(render_callback);
}
};
const update = (delta) => {
let currId = getIdFromHash();
currId = (currId + delta + frames.length) % frames.length;
window.location.hash = `#frame${currId}`
};
const renderFrameList = () => {
const currId = getIdFromHash();
const frames_list = document.getElementById('frames_list');
for (let i=0; i<frames.length; i++) {
const f = frames[i];
let frame_descr = f[1];
const rendered = document.createElement("li");
if (frame_descr == "") {
frame_descr = "Unnamed state";
}
rendered.innerHTML = `<a href="#frame${i}">${frame_descr}</a>`;
if (i == currId) {
rendered.classList.add('selected');
}
frames_list.appendChild(rendered);
}
};
const decompress = async function(compressed) {
const ds = new DecompressionStream('gzip');
const in_fetch = await fetch(`data:application/octet-stream;base64,${compressed}`);
const in_blob = await in_fetch.blob();
const out_stream = in_blob.stream().pipeThrough(ds);
const out_blob = await new Response(out_stream).blob();
return await out_blob.text();
}
const dots_compressed = "$DOTS";
const frames = [$FRAMES];
let loaded = false;
window.addEventListener('hashchange', () => {
renderCurrentFrame();
});
window.addEventListener("keydown", (event) => {
if (event.defaultPrevented) {
return;
}
if (event.key == "j") {
update(1);
} else if (event.key == "k") {
update(-1);
} else {
return;
}
event.preventDefault();
}, true);
document.addEventListener("DOMContentLoaded", () => {
decompress(dots_compressed).then(text => {
window.dots = JSON.parse(text);
window.loaded = true;
renderFrameList();
renderCurrentFrame();
});
window.title_to_id = new Map();
for (let i=0; i < frames.length; i++) {
title_to_id.set(frames[i][1], i);
}
document.addEventListener("click", (event) => {
let txt = event.target.innerHTML;
if (title_to_id.has(txt)) {
let id = title_to_id.get(txt);
window.location.hash = `#frame${id}`;
}
});
});
</script>
</body>
</html>
)wrapper",
{{"$DOTS", dot_graphs_compressed},
{"$FRAMES", frames},
{"$TITLE", graph_title}});
}
static std::string GraphTitle(const HloComputation& computation) {
return absl::StrCat(computation.parent()->name(), "_", computation.name());
}
absl::StatusOr<std::string> WrapFusionExplorer(
const HloComputation& computation) {
absl::MutexLock lock(&fusion_visualizer_state_mu);
const FusionVisualizerProgress& visualizer_progress =
fusion_visualizer_states[FusionVisualizerStateKey(computation)];
return WrapFusionExplorer(visualizer_progress, GraphTitle(computation));
}
static absl::StatusOr<std::string> WrapDotInHtml(absl::string_view dot,
absl::string_view title) {
FusionVisualizerProgress progress;
progress.AddState(dot, title, std::nullopt);
return WrapFusionExplorer(progress, title);
}
static absl::StatusOr<std::string> WrapDotInFormat(
const HloComputation& computation, absl::string_view dot,
RenderedGraphFormat format) ABSL_EXCLUSIVE_LOCKS_REQUIRED(url_renderer_mu) {
switch (format) {
case RenderedGraphFormat::kUrl:
CHECK(url_renderer != nullptr)
<< "Should have checked url_renderer != null before calling.";
return (*url_renderer)(dot);
case RenderedGraphFormat::kHtml:
return WrapDotInHtml(dot, GraphTitle(computation));
case RenderedGraphFormat::kDot:
return std::string(dot);
}
}
void RegisterGraphToURLRenderer(
std::function<absl::StatusOr<std::string>(absl::string_view)> renderer) {
absl::MutexLock lock(&url_renderer_mu);
if (url_renderer != nullptr) {
LOG(WARNING) << "Multiple calls to RegisterGraphToURLRenderer. Last call "
"wins, but because order of initialization in C++ is "
"nondeterministic, this may not be what you want.";
}
delete url_renderer;
url_renderer =
new std::function<absl::StatusOr<std::string>(absl::string_view)>(
std::move(renderer));
}
void RegisterFusionState(const HloComputation& computation,
absl::string_view label,
const HloInstruction& consumer,
const HloInstruction* producer) {
absl::MutexLock lock(&fusion_visualizer_state_mu);
FusionVisualizerProgress& fusion_progress =
fusion_visualizer_states[FusionVisualizerStateKey(computation)];
static constexpr int kRenderRadius = 4;
absl::flat_hash_set<const HloInstruction*> render_boundary;
for (const HloInstruction* user : consumer.users()) {
render_boundary.insert(user);
}
HloDotDumper dumper(
consumer.parent(),
StrCat("Rendering of ", kRenderRadius, " nodes around fusion consumer"),
consumer.GetModule()->config().debug_options(), {},
MakeNodeRadiusAroundFilter(&consumer, kRenderRadius, render_boundary));
std::string dot_txt = dumper.Dump();
std::optional<std::string> producer_to_highlight;
if (producer) {
producer_to_highlight = dumper.CssIdForInstruction(*producer);
}
fusion_progress.AddState(dot_txt, label, producer_to_highlight);
}
absl::StatusOr<std::string> RenderGraph(
const HloComputation& computation, absl::string_view label,
const DebugOptions& debug_options, RenderedGraphFormat format,
HloRenderOptions hlo_render_options,
std::optional<absl::flat_hash_map<const HloInstruction*, ColorStats>>
color_map) {
absl::MutexLock lock(&url_renderer_mu);
if (format == RenderedGraphFormat::kUrl && url_renderer == nullptr) {
return Unavailable("Can't render as URL; no URL renderer was registered.");
}
std::string rendered_dot =
HloDotDumper(&computation, label, debug_options, hlo_render_options,
NodeFilter(), color_map)
.Dump();
return WrapDotInFormat(computation, rendered_dot, format);
}
absl::StatusOr<std::string> RenderAllComputationsToHtml(
const HloModule& module) {
FusionVisualizerProgress progress;
std::vector<HloInstruction*> instrs =
module.entry_computation()->MakeInstructionPostOrder();
absl::c_reverse(instrs);
for (const HloInstruction* instr : instrs) {
if (absl::c_linear_search(
std::vector<HloOpcode>{HloOpcode::kConstant,
HloOpcode::kGetTupleElement},
instr->opcode())) {
continue;
}
HloRenderOptions opts;
opts.show_fusion_subcomputations = true;
opts.show_backend_config = true;
opts.show_while_subcomputations = instr->opcode() == HloOpcode::kWhile;
static constexpr int64_t max_nodes_to_render = 100;
absl::flat_hash_set<const HloInstruction*> render_boundary;
NodeFilter filter = MakeNodeRadiusAroundFilter(instr, 2, render_boundary);
if (filter.GetNumRendered().value_or(1) > max_nodes_to_render) {
filter = MakeNodeRadiusAroundFilter(instr, 1, render_boundary);
}
std::string dot =
HloDotDumper(module.entry_computation(), instr->name(),
module.config().debug_options(), opts, filter)
.Dump();
progress.AddState(dot, instr->name(), std::nullopt);
}
return WrapFusionExplorer(progress, module.name());
}
absl::StatusOr<std::string> RenderNeighborhoodAround(
const HloInstruction& node, int radius, RenderedGraphFormat format,
HloRenderOptions hlo_render_options,
const absl::flat_hash_set<const HloInstruction*>& boundary,
std::optional<absl::flat_hash_map<const HloInstruction*, ColorStats>>
color_map) {
absl::MutexLock lock(&url_renderer_mu);
if (format == RenderedGraphFormat::kUrl && url_renderer == nullptr) {
return FailedPrecondition(
"Can't render as URL; no URL renderer was registered.");
}
std::string label =
StrCat("Neighborhood of ", radius, " nodes around ", node.name());
std::string rendered_dot =
HloDotDumper(
node.parent(), label, node.GetModule()->config().debug_options(),
hlo_render_options,
MakeNodeRadiusAroundFilter(&node, radius, boundary), color_map)
.Dump();
return WrapDotInFormat(*node.parent(), rendered_dot, format);
}
absl::StatusOr<std::string> RenderAllPathsFromTo(
const HloInstruction& from, const HloInstruction& to, int64_t max_nodes,
RenderedGraphFormat format, HloRenderOptions hlo_render_options) {
absl::MutexLock lock(&url_renderer_mu);
if (format == RenderedGraphFormat::kUrl && url_renderer == nullptr) {
return FailedPrecondition(
"Can't render as URL; no URL renderer was registered.");
}
CHECK_EQ(from.parent(), to.parent()) << "Nodes must be in same computation!";
auto debug_options = from.GetModule()->config().debug_options();
bool hit_limit = false;
NodeFilter filter = MakeNodeFromToFilter(&from, &to, max_nodes, &hit_limit);
std::string label;
if (!hit_limit) {
label = StrCat("All paths from ", from.name(), " to ", to.name());
} else {
label = StrCat(max_nodes, " nodes on the shortest paths from ", from.name(),
" to ", to.name(),
"<br/><br/>***SHOWING ONLY A SUBSET OF ALL PATHS BETWEEN "
"NODES***<br/><br/>");
}
std::string rendered_dot = HloDotDumper(from.parent(), label, debug_options,
hlo_render_options, filter)
.Dump();
return WrapDotInFormat(*from.parent(), rendered_dot, format);
}
} | #include "xla/service/hlo_graph_dumper.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "xla/xla.pb.h"
namespace xla {
namespace {
using absl::StrCat;
using ::testing::HasSubstr;
using HloGraphDumperTest = HloTestBase;
std::string TestName() {
return ::testing::UnitTest::GetInstance()->current_test_info()->name();
}
TEST_F(HloGraphDumperTest, NestedFusion) {
HloComputation::Builder b("b");
auto shape = ShapeUtil::MakeShape(F32, {10, 100});
std::vector<HloInstruction*> params;
for (int i = 0; i <= 4; ++i) {
params.push_back(b.AddInstruction(
HloInstruction::CreateParameter(i, shape, StrCat("param", i))));
}
std::vector<HloInstruction*> sums;
sums.push_back(b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, params[0], params[1])));
for (int i = 0; i <= 2; ++i) {
sums.push_back(b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, sums[i], params[i + 2])));
}
HloModuleConfig config;
HloModule m(TestName(), config);
m.AddEntryComputation(b.Build());
HloComputation* root_computation = m.entry_computation();
auto* outer_fusion = root_computation->CreateFusionInstruction(
{sums[3], sums[2], sums[1], sums[0]}, HloInstruction::FusionKind::kLoop);
std::vector<HloInstruction*> fused_sums;
for (auto* instr : outer_fusion->fused_instructions_computation()
->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kAdd) {
fused_sums.push_back(instr);
}
}
auto* inner_fusion =
outer_fusion->fused_instructions_computation()->CreateFusionInstruction(
{fused_sums[1], fused_sums[0]}, HloInstruction::FusionKind::kLoop);
TF_ASSERT_OK_AND_ASSIGN(
std::string graph,
RenderGraph(*root_computation, "", DebugOptions(),
RenderedGraphFormat::kDot));
for (const HloComputation* computation :
{root_computation,
inner_fusion->fused_instructions_computation(),
outer_fusion->fused_instructions_computation()}) {
for (const HloInstruction* instruction : computation->instructions()) {
EXPECT_THAT(graph, HasSubstr(instruction->name()));
}
}
const HloInstruction* inner_sum = nullptr;
for (const HloInstruction* instruction :
inner_fusion->fused_instructions_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kAdd) {
inner_sum = instruction;
break;
}
}
ASSERT_NE(inner_sum, nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::string neighborhood_graph,
RenderNeighborhoodAround(*inner_sum, 1,
RenderedGraphFormat::kDot));
EXPECT_THAT(neighborhood_graph, HasSubstr(inner_sum->name()));
}
TEST_F(HloGraphDumperTest, Constant) {
HloComputation::Builder b("b");
auto instruction = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(-42)));
instruction->SetAndSanitizeName("i_am_a_constant_root_instruction");
HloModuleConfig config;
HloModule m(TestName(), config);
HloComputation* root_computation = m.AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(
std::string graph,
RenderGraph(*root_computation, "an_empty_graph", DebugOptions(),
RenderedGraphFormat::kDot));
EXPECT_THAT(graph, HasSubstr("an_empty_graph"));
}
TEST_F(HloGraphDumperTest, TupleConstant) {
Shape tuple_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(S32, {4, 5})});
HloComputation::Builder b("b");
auto constant = b.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(tuple_shape)));
auto gte = b.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::MakeShape(F32, {3, 2}), constant, 0));
HloModuleConfig config;
HloModule m(TestName(), config);
HloComputation* root_computation = m.AddEntryComputation(b.Build(gte));
TF_ASSERT_OK_AND_ASSIGN(
std::string graph,
RenderGraph(*root_computation, "tuple_constant", DebugOptions(),
RenderedGraphFormat::kDot));
EXPECT_THAT(graph, HasSubstr("tuple_constant"));
EXPECT_THAT(graph, HasSubstr("constant (f32[3,2], s32[4,5])"));
}
TEST_F(HloGraphDumperTest, Compare) {
const char* hlo_string = R"(
HloModule comp
ENTRY comp {
param.0 = f32[10] parameter(0)
param.1 = f32[10] parameter(1)
ROOT lt = pred[10] compare(param.0, param.1), direction=LT
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
std::string graph,
RenderGraph(*module->entry_computation(), "tuple_constant",
DebugOptions(), RenderedGraphFormat::kDot));
EXPECT_THAT(graph, HasSubstr("direction=LT"));
}
TEST_F(HloGraphDumperTest, HasStatisticsViz) {
const char* hlo_string = R"(
HloModule comp
ENTRY comp {
param.0 = f32[10] parameter(0), statistics={visualizing_index=0,stat-0=0.5}
param.1 = f32[10] parameter(1), statistics={visualizing_index=1,stat-0=55.5,stat-1=44.4}
ROOT lt = pred[10] compare(param.0, param.1), direction=LT
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
std::string graph,
RenderGraph(*module->entry_computation(), "tuple_constant",
DebugOptions(), RenderedGraphFormat::kDot));
}
TEST_F(HloGraphDumperTest, RootIsConstant) {
const char* hlo_string = R"(
HloModule indexed_conditional
%then_branch (empty: ()) -> f32[] {
%empty = () parameter(0)
ROOT %then = f32[] constant(1)
}
%else_branch (empty.1: ()) -> f32[] {
%empty.1 = () parameter(0)
ROOT %else = f32[] constant(2)
}
ENTRY %conditional_select (constant: pred[]) -> (f32[]) {
%constant = pred[] parameter(0)
%emptytuple = () tuple()
%conditional = f32[] conditional(pred[] %constant, () %emptytuple, () %emptytuple), true_computation=%then_branch, false_computation=%else_branch
ROOT %t = (f32[]) tuple(f32[] %conditional)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
std::string graph,
RenderGraph(*module->entry_computation(), "tuple_constant",
DebugOptions(), RenderedGraphFormat::kDot));
}
TEST_F(HloGraphDumperTest, OverrideColors) {
const char* hlo_string = R"(
HloModule comp
ENTRY comp {
param.0 = f32[10] parameter(0)
param.1 = f32[10] parameter(1)
ROOT lt = pred[10] compare(param.0, param.1), direction=LT
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
absl::flat_hash_map<const HloInstruction*, ColorStats> color_map;
ColorStats color_stats_1;
color_stats_1.color = "#A9C343";
color_stats_1.stats = absl::StrFormat("%.3f", 1.11);
ColorStats color_stats_2;
color_stats_2.color = "#BC8A3F";
color_stats_2.stats = absl::StrFormat("%.3f", 2.22);
color_map[module->entry_computation()->GetInstructionWithName("param.0")] =
color_stats_1;
color_map[module->entry_computation()->GetInstructionWithName("param.1")] =
color_stats_2;
HloRenderOptions hlo_render_options;
hlo_render_options.override_node_colors = true;
TF_ASSERT_OK_AND_ASSIGN(
std::string graph,
RenderGraph(*module->entry_computation(), "tuple_constant",
DebugOptions(), RenderedGraphFormat::kDot, hlo_render_options,
color_map));
EXPECT_THAT(graph, HasSubstr("#A9C343"));
EXPECT_THAT(graph, HasSubstr("1.110"));
EXPECT_THAT(graph, HasSubstr("#BC8A3F"));
EXPECT_THAT(graph, HasSubstr("2.220"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_graph_dumper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_graph_dumper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
10bb58ac-bb24-4434-b6f2-4ba070e2f308 | cpp | tensorflow/tensorflow | reduce_scatter_reassociate | third_party/xla/xla/service/reduce_scatter_reassociate.cc | third_party/xla/xla/service/reduce_scatter_reassociate_test.cc | #include "xla/service/reduce_scatter_reassociate.h"
#include <optional>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/all_reduce_key.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_domain_map.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
bool AreCompatible(const HloReduceScatterInstruction *rs0,
const HloReduceScatterInstruction *rs1,
ReductionKind op_kind) {
std::optional<AllReduceKey> key0 = GetAllReduceKey(rs0);
std::optional<AllReduceKey> key1 = GetAllReduceKey(rs1);
auto kind0 = MatchReductionComputation(rs0->to_apply());
auto dims_match = rs0->scatter_dimension() == rs1->scatter_dimension();
return key0 && key1 && kind0 && *key0 == *key1 && kind0 == op_kind &&
dims_match;
}
}
absl::StatusOr<bool> ReduceScatterReassociate::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
if (hlo_query::ContainsLayoutConstrainedCollective(
*module, HloOpcode::kReduceScatter)) {
VLOG(1)
<< "Skip ReduceScatterReassociate because the module contains reduce-"
"scatter with constrained layouts";
return false;
}
int64_t next_channel_id = hlo_query::NextChannelId(*module);
bool changed = false;
for (auto computation : module->computations(execution_threads)) {
for (HloInstruction *inst : computation->MakeInstructionPostOrder()) {
std::optional<ReductionKind> kind = MatchReductionInstruction(inst);
if (!kind || inst->operand(0)->opcode() != HloOpcode::kReduceScatter ||
inst->operand(1)->opcode() != HloOpcode::kReduceScatter ||
!inst->shape().IsArray()) {
continue;
}
auto *rs0 = Cast<HloReduceScatterInstruction>(inst->mutable_operand(0));
auto *rs1 = Cast<HloReduceScatterInstruction>(inst->mutable_operand(1));
if (!AreCompatible(rs0, rs1, *kind)) {
VLOG(2) << "Reduce-Scatter operations are not compatible, skipping";
continue;
}
if (rs0->user_count() != 1 || rs1->user_count() != 1) {
VLOG(2) << "Reduce-Scatter operations have > 1 users";
continue;
}
HloInstruction *new_op =
computation->AddInstruction(inst->CloneWithNewOperands(
rs0->mutable_operand(0)->shape(),
{rs0->mutable_operand(0), rs1->mutable_operand(0)}));
HloInstruction *new_rs = computation->AddInstruction(
rs0->CloneWithNewOperands(inst->shape(), {new_op}));
if (new_rs->channel_id()) {
new_rs->set_channel_id(next_channel_id++);
}
TF_RETURN_IF_ERROR(inst->ReplaceAllUsesWith(new_rs));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(inst));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(rs0));
if (rs0 != rs1) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(rs1));
}
changed = true;
}
}
return changed;
}
} | #include "xla/service/reduce_scatter_reassociate.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace m = xla::testing::opcode_matchers;
class ReduceScatterReassociateTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module, bool expect_change) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_module));
auto changed = ReduceScatterReassociate().Run(module.get());
if (!changed.ok()) {
return changed.status();
}
EXPECT_EQ(changed.value(), expect_change);
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
size_t ReduceScatterCount(std::unique_ptr<HloModule>& module) {
return absl::c_count_if(module->entry_computation()->instructions(),
HloPredicateIsOp<HloOpcode::kReduceScatter>);
}
};
TEST_F(ReduceScatterReassociateTest, Simple) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum
ROOT add = f32[4] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::ReduceScatter(m::Add(m::Parameter(0), m::Parameter(1))));
EXPECT_EQ(ReduceScatterCount(module), 1);
}
TEST_F(ReduceScatterReassociateTest, SimpleWithConstrainLayout) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, constrain_layout=true, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, constrain_layout=true, to_apply=sum
ROOT add = f32[4] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, SimpleChain) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
p2 = f32[8] parameter(2)
p3 = f32[8] parameter(3)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum
rs2 = f32[4] reduce-scatter(p2), dimensions={0}, to_apply=sum
rs3 = f32[4] reduce-scatter(p3), dimensions={0}, to_apply=sum
add0 = f32[4] add(rs0, rs1)
add1 = f32[4] add(add0, rs2)
ROOT add2 = f32[4] add(add1, rs3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
m::ReduceScatter(m::Add(
m::Add(m::Add(m::Parameter(0), m::Parameter(1)), m::Parameter(2)),
m::Parameter(3))));
EXPECT_EQ(ReduceScatterCount(module), 1);
}
TEST_F(ReduceScatterReassociateTest, SimpleTree) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
p2 = f32[8] parameter(2)
p3 = f32[8] parameter(3)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum
rs2 = f32[4] reduce-scatter(p2), dimensions={0}, to_apply=sum
rs3 = f32[4] reduce-scatter(p3), dimensions={0}, to_apply=sum
add0 = f32[4] add(rs0, rs1)
add1 = f32[4] add(rs2, rs3)
ROOT add2 = f32[4] add(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
m::ReduceScatter(m::Add(m::Add(m::Parameter(0), m::Parameter(1)),
m::Add(m::Parameter(2), m::Parameter(3)))));
EXPECT_EQ(ReduceScatterCount(module), 1);
}
TEST_F(ReduceScatterReassociateTest, MismatchOp0) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
max {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT r = f32[] maximum(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=max
ROOT add = f32[4] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, MismatchOp1) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
max {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT r = f32[] maximum(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=max
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=max
ROOT add = f32[4] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, MismatchDimension) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8,8] parameter(0)
p1 = f32[8,8] parameter(1)
rs0 = f32[8,8] reduce-scatter(p0), dimensions={0}, to_apply=sum
rs1 = f32[8,8] reduce-scatter(p1), dimensions={1}, to_apply=sum
ROOT add = f32[8,8] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, MismatchReplicaGroups) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, replica_groups={{0}}, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, replica_groups={}, to_apply=sum
ROOT add = f32[4] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, MismatchHasChannelId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, channel_id=3, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum
ROOT add = f32[4] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, MismatchUseGlobalDeviceId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, replica_groups={{0,1}}, channel_id=3, use_global_device_ids=true, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, replica_groups={{0,1}}, channel_id=4, to_apply=sum
ROOT add = f32[4] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, NotSingleUser) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum
add = f32[4] add(rs0, rs1)
ROOT t = (f32[4], f32[4]) tuple(rs0, add)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, DoubleUse) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum
add = f32[4] add(rs0, rs0)
ROOT c = f32[4] copy(add)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_scatter_reassociate.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_scatter_reassociate_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3bc61d67-4a76-418b-a671-c9c315c7a6bd | cpp | tensorflow/tensorflow | change_op_data_type | third_party/xla/xla/service/change_op_data_type.cc | third_party/xla/xla/service/change_op_data_type_test.cc | #include "xla/service/change_op_data_type.h"
#include <optional>
#include "xla/service/hlo_creation_utils.h"
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_contraction_rewriter.h"
#endif
namespace xla {
namespace {
std::optional<PrimitiveType> GetUniformOperandType(
const HloInstruction* instr) {
std::optional<PrimitiveType> type;
for (const HloInstruction* operand : instr->operands()) {
if (!type.has_value()) {
type = operand->shape().element_type();
} else if (operand->shape().element_type() != type.value()) {
return std::nullopt;
}
}
return type;
}
}
absl::StatusOr<bool> ChangeOpDataType::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
HloCloner default_cloner = [](const HloInstruction* inst, const Shape& shape,
absl::Span<HloInstruction* const> operands) {
return inst->CloneWithNewOperands(shape, operands);
};
HloCloner cloner = cloner_ ? cloner_ : default_cloner;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
std::optional<PrimitiveType> operand_type = GetUniformOperandType(instr);
if (!op_matcher_(instr) || !operand_type.has_value() ||
!instr->shape().IsArray() ||
instr->opcode() == HloOpcode::kParameter) {
continue;
}
const PrimitiveType from_type = *operand_type;
auto it = to_type_map_.find(from_type);
if (it == to_type_map_.end()) {
continue;
}
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
if (cpu::OneDnnContractionRewriter::ShouldRewriteInstr(instr, true)) {
continue;
}
#endif
const PrimitiveType to_type = it->second;
absl::InlinedVector<HloInstruction*, 8> new_operands;
for (HloInstruction* operand : instr->mutable_operands()) {
new_operands.push_back(MakeConvertToHlo(operand, to_type));
}
Shape new_shape = instr->shape();
new_shape.set_element_type(to_type);
HloInstruction* new_instr =
comp->AddInstruction(cloner(instr, new_shape, new_operands));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(
instr, MakeConvertToHlo(new_instr, from_type)));
changed = true;
}
}
return changed;
}
} | #include "xla/service/change_op_data_type.h"
#include <string>
#include <tuple>
#include <vector>
#include "absl/types/span.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace m = ::xla::match;
class ChangeOpDataTypeTest : public HloTestBase {
public:
ChangeOpDataTypeTest()
: HloTestBase(false,
false) {}
};
TEST_F(ChangeOpDataTypeTest, Simple) {
const char* const kModuleStr = R"(
HloModule module
ENTRY entry {
ROOT op = add(f16[10] parameter(0), f16[10] parameter(1))
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr));
ChangeOpDataType pass(F16, F32, HloPredicateTrue);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Convert(m::Add(m::Convert(m::Parameter(0)).WithShape(F32, {10}),
m::Convert(m::Parameter(1)).WithShape(F32, {10})))
.WithShape(F16, {10})));
}
TEST_F(ChangeOpDataTypeTest, AllTypesMustBeSame) {
const char* const kModuleStr = R"(
HloModule module
ENTRY entry {
ROOT op = f16[1] dynamic-slice(f16[10] parameter(0), s32[1] parameter(1)), dynamic_slice_sizes={1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr));
ChangeOpDataType pass(F16, F32, HloPredicateTrue);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_FALSE(changed);
}
TEST_F(ChangeOpDataTypeTest, DotAndConv) {
const char* const kModuleStr = R"(
HloModule module
ENTRY entry {
dot = f16[10,10] dot(f16[10,10] parameter(0), f16[10,10] parameter(1)),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
conv = f16[1,2,1] convolution(f16[1,2,1] parameter(2), f16[1,1,1] parameter(3)),
window={size=1}, dim_labels=b0f_0io->b0f
root = tuple(dot, conv)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr));
ChangeOpDataType pass(
F16, F32, HloPredicateIsOp<HloOpcode::kDot, HloOpcode::kConvolution>);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Convert(
m::Dot(m::Convert(m::Parameter(0)).WithShape(F32, {10, 10}),
m::Convert(m::Parameter(1)).WithShape(F32, {10, 10})))
.WithShape(F16, {10, 10}),
m::Convert(m::Convolution(
m::Convert(m::Parameter(2)).WithShape(F32, {1, 2, 1}),
m::Convert(m::Parameter(3)).WithShape(F32, {1, 1, 1})))
.WithShape(F16, {1, 2, 1}))));
}
TEST_F(ChangeOpDataTypeTest, SimpleWithCloner) {
const char* const kModuleStr = R"(
HloModule module
ENTRY entry {
ROOT op = add(f16[10] parameter(0), f16[10] parameter(1))
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr));
HloPredicate matcher = HloPredicateTrue;
int count = 0;
ChangeOpDataType::HloCloner cloner =
[&count](const HloInstruction* instr, const Shape& shape,
absl::Span<HloInstruction* const> operands) {
count++;
return instr->CloneWithNewOperands(shape, operands);
};
ChangeOpDataType pass(F16, F32, matcher, cloner);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_EQ(count, 1);
}
TEST_F(ChangeOpDataTypeTest, SimpleWithMultipleTypes) {
const char* const kModuleStr = R"(
HloModule module
ENTRY entry {
op1 = add(f16[10] parameter(0), f16[10] parameter(1))
op2 = add(u16[10] parameter(2), u16[10] parameter(3))
ROOT tup = (f16[10], u16[10]) tuple(op1, op2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr));
HloPredicate matcher = HloPredicateTrue;
ChangeOpDataType pass({{F16, F32}, {U16, U32}}, matcher);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kTuple);
EXPECT_EQ(root->operand_count(), 2);
EXPECT_THAT(
root->operand(0),
GmockMatch(
m::Convert(m::Add(m::Convert(m::Parameter(0)).WithShape(F32, {10}),
m::Convert(m::Parameter(1)).WithShape(F32, {10})))
.WithShape(F16, {10})));
EXPECT_THAT(
root->operand(1),
GmockMatch(
m::Convert(m::Add(m::Convert(m::Parameter(2)).WithShape(U32, {10}),
m::Convert(m::Parameter(3)).WithShape(U32, {10})))
.WithShape(U16, {10})));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/change_op_data_type.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/change_op_data_type_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2292926a-6678-41a1-8dd4-0c5f8f61af9c | cpp | tensorflow/tensorflow | collective_permute_decomposer | third_party/xla/xla/service/collective_permute_decomposer.cc | third_party/xla/xla/service/collective_permute_decomposer_test.cc | #include "xla/service/collective_permute_decomposer.h"
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/graphcycles/graphcycles.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
using SourceTargetPair = std::pair<int64_t, int64_t>;
using SourceTargetPairs = std::vector<SourceTargetPair>;
bool HasCycles(const SourceTargetPairs& pairs) {
GraphCycles graph;
absl::flat_hash_map<int64_t, int32_t> replica_to_node_id;
auto get_node_id = [&](int64_t replica) {
auto it_and_inserted = replica_to_node_id.emplace(replica, -1);
auto it = it_and_inserted.first;
auto inserted = it_and_inserted.second;
if (inserted) {
it->second = graph.NewNode();
}
return it->second;
};
for (auto pair : pairs) {
auto source = get_node_id(pair.first);
auto target = get_node_id(pair.second);
VLOG(3) << "See source " << source << " -> target " << target;
if (!graph.InsertEdge(source, target)) {
VLOG(3) << "Detected cycles";
return true;
}
}
return false;
}
bool ShouldDecompose(const HloCollectivePermuteInstruction& collective_permute,
int64_t threshold_in_bytes) {
if (!collective_permute.channel_id().has_value()) {
return false;
}
const Shape& result_shape = collective_permute.shape();
if (!result_shape.IsArray()) {
return false;
}
if (ShapeUtil::ByteSizeOf(result_shape) < threshold_in_bytes) {
return false;
}
return !HasCycles(collective_permute.source_target_pairs());
}
bool MayPipeline(const HloCollectivePermuteInstruction& collective_permute) {
const HloInstruction* data = collective_permute.operand(0);
return (data->opcode() == HloOpcode::kGetTupleElement &&
data->operand(0)->opcode() == HloOpcode::kParameter);
}
absl::Status DecomposeCollectivePermute(
HloCollectivePermuteInstruction* collective_permute,
HloComputation* computation, const std::string& pipeline_decision) {
int64_t channel_id = collective_permute->channel_id().value();
HloInstruction* data = collective_permute->mutable_operand(0);
const Shape& data_shape = data->shape();
const OpMetadata& metadata = collective_permute->metadata();
const xla::FrontendAttributes& old_attributes =
collective_permute->frontend_attributes();
xla::FrontendAttributes attributes;
std::string source_target_pairs_string =
"{" +
absl::StrJoin(collective_permute->source_target_pairs(), ",",
absl::PairFormatter(
[](std::string* out, int64_t value) {
absl::StrAppend(out, "{", value);
},
",",
[](std::string* out, int64_t value) {
absl::StrAppend(out, value, "}");
})) +
"}";
attributes.mutable_map()->insert(old_attributes.map().begin(),
old_attributes.map().end());
(*attributes.mutable_map())[kSendRecvSourceTargetPairsAttr] =
source_target_pairs_string;
HloInstruction* after_all =
computation->AddInstruction(HloInstruction::CreateToken());
HloInstruction* recv = computation->AddInstruction(
HloInstruction::CreateRecv(data_shape, after_all, channel_id));
recv->add_frontend_attributes(attributes);
recv->set_metadata(metadata);
HloInstruction* send = computation->AddInstruction(
HloInstruction::CreateSend(data, after_all, channel_id));
send->add_frontend_attributes(attributes);
send->set_metadata(metadata);
HloInstruction* recv_done =
computation->AddInstruction(HloInstruction::CreateRecvDone(recv));
HloInstruction* send_done =
computation->AddInstruction(HloInstruction::CreateSendDone(send));
TF_RETURN_IF_ERROR(send->AddControlDependencyTo(recv_done));
HloInstruction* recv_data = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(recv_done, 0));
TF_RETURN_IF_ERROR(collective_permute->ReplaceAllUsesWith(recv_data));
TF_RETURN_IF_ERROR(
computation->RemoveInstructionAndUnusedOperands(collective_permute));
if (!pipeline_decision.empty()) {
xla::FrontendAttributes attributes;
(*attributes.mutable_map())[kSendRecvPipelineAttr] = pipeline_decision;
send->add_frontend_attributes(attributes);
send_done->add_frontend_attributes(attributes);
recv->add_frontend_attributes(attributes);
recv_done->add_frontend_attributes(attributes);
}
return absl::OkStatus();
}
bool IsForwardCycle(const SourceTargetPair& backedge,
const SourceTargetPairs& others) {
int64_t num_pairs = others.size() + 1;
if (backedge.first != num_pairs - 1 || backedge.second != 0) {
return false;
}
for (int64_t i = 0; i < num_pairs - 1; ++i) {
const SourceTargetPair& pair = others[i];
if (pair.first != i || pair.second != i + 1) {
return false;
}
}
return true;
}
bool IsBackwardCycle(const SourceTargetPair& backedge,
const SourceTargetPairs& others) {
int64_t num_pairs = others.size() + 1;
if (backedge.first != 0 || backedge.second != num_pairs - 1) {
return false;
}
for (int64_t i = 0; i < num_pairs - 1; ++i) {
const SourceTargetPair& pair = others[i];
if (pair.first != i + 1 || pair.second != i) {
return false;
}
}
return true;
}
std::optional<std::pair<HloCollectivePermuteInstruction*,
HloCollectivePermuteInstruction*>>
CheckCyclePatterns(HloCollectivePermuteInstruction* cp0,
HloCollectivePermuteInstruction* cp1) {
const SourceTargetPairs& cp0_pairs = cp0->source_target_pairs();
const SourceTargetPairs& cp1_pairs = cp1->source_target_pairs();
if (cp0_pairs.size() == 1) {
if (IsForwardCycle(cp0_pairs.front(), cp1_pairs) ||
IsBackwardCycle(cp0_pairs.front(), cp1_pairs)) {
return std::make_pair(cp0, cp1);
}
}
if (cp1_pairs.size() == 1) {
if (IsForwardCycle(cp1_pairs.front(), cp0_pairs) ||
IsBackwardCycle(cp1_pairs.front(), cp0_pairs)) {
return std::make_pair(cp1, cp0);
}
}
return std::nullopt;
}
}
absl::StatusOr<bool> CollectivePermuteDecomposer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
std::vector<HloComputation*> all_computations =
module->MakeComputationPostOrder(execution_threads);
absl::flat_hash_set<HloComputation*> while_bodies;
for (auto iter = all_computations.rbegin(); iter != all_computations.rend();
++iter) {
HloComputation* computation = *iter;
bool may_pipeline = while_bodies.contains(computation);
std::vector<HloCollectivePermuteInstruction*> cps_to_decompose;
HloCollectivePermuteInstruction* cp0_to_pipeline = nullptr;
HloCollectivePermuteInstruction* cp1_to_pipeline = nullptr;
for (HloInstruction* hlo : computation->MakeInstructionPostOrder()) {
if (hlo->opcode() == HloOpcode::kWhile) {
while_bodies.insert(hlo->while_body());
continue;
}
if (hlo->opcode() != HloOpcode::kCollectivePermute) {
continue;
}
HloCollectivePermuteInstruction* cp =
Cast<HloCollectivePermuteInstruction>(hlo);
if (!ShouldDecompose(*cp, threshold_in_bytes_)) {
continue;
}
cps_to_decompose.push_back(cp);
if (!while_bodies.contains(computation) || !may_pipeline) {
continue;
}
if (cp0_to_pipeline != nullptr && cp1_to_pipeline != nullptr) {
continue;
}
if (!MayPipeline(*cp)) {
continue;
}
if (cp0_to_pipeline == nullptr) {
cp0_to_pipeline = cp;
continue;
}
auto optional_pair = CheckCyclePatterns(cp0_to_pipeline, cp);
if (optional_pair.has_value()) {
cp0_to_pipeline = optional_pair.value().first;
cp1_to_pipeline = optional_pair.value().second;
}
}
for (HloCollectivePermuteInstruction* cp : cps_to_decompose) {
std::string pipeline_decision;
if (cp0_to_pipeline == cp) {
pipeline_decision = "0";
} else if (cp1_to_pipeline == cp) {
pipeline_decision = "1";
}
TF_RETURN_IF_ERROR(
DecomposeCollectivePermute(cp, computation, pipeline_decision));
}
if (!cps_to_decompose.empty()) {
changed = true;
}
}
return changed;
}
} | #include "xla/service/collective_permute_decomposer.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
using ::testing::HasSubstr;
namespace op = xla::testing::opcode_matchers;
using CollectivePermuteDecomposerTest = HloTestBase;
TEST_F(CollectivePermuteDecomposerTest, WithCycleNotTransformed) {
const absl::string_view kModuleStr = R"(
HloModule test
ENTRY test_computation {
p = u32[] replica-id()
ROOT cp = u32[] collective-permute(p), channel_id=1,
source_target_pairs={{0,1}, {1,0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectivePermuteDecomposerTest, WithContextDataNotTransformed) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
p = u32[] replica-id()
ROOT cp = (u32[], u32[], u32[], u32[]) collective-permute(p), channel_id=1,
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectivePermuteDecomposerTest, TransformedExplicitChannelId) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
p = u32[] replica-id()
ROOT cp = u32[] collective-permute(p), channel_id=1,
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}},
metadata={op_name="op1/op2/add" source_file="foo/bar/mysource.py" source_line=35}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
auto check_metadata = [](const HloInstruction* inst) {
EXPECT_EQ(inst->metadata().op_name(), "op1/op2/add");
EXPECT_EQ(inst->metadata().source_file(), "foo/bar/mysource.py");
EXPECT_EQ(inst->metadata().source_line(), 35);
};
auto check_not_pipelined = [](const HloInstruction* instr) {
const FrontendAttributes& attributes = instr->frontend_attributes();
EXPECT_EQ(attributes.map().end(),
attributes.map().find(kSendRecvPipelineAttr));
};
HloInstruction* after_all = FindInstruction(module.get(), "after-all");
HloInstruction* recv = FindInstruction(module.get(), "recv");
EXPECT_EQ(recv->operand(0), after_all);
EXPECT_EQ(recv->channel_id().value(), 1);
EXPECT_THAT(
recv->ToString(),
HasSubstr(
"_xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}"));
check_metadata(recv);
check_not_pipelined(recv);
HloInstruction* recv_done = FindInstruction(module.get(), "recv-done");
EXPECT_EQ(recv_done->operand(0), recv);
HloInstruction* send = FindInstruction(module.get(), "send");
EXPECT_EQ(send->operand(1), after_all);
EXPECT_EQ(send->channel_id().value(), 1);
EXPECT_THAT(
send->ToString(),
HasSubstr(
"_xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}"));
check_metadata(send);
check_not_pipelined(send);
HloInstruction* send_done = FindInstruction(module.get(), "send-done");
EXPECT_EQ(send_done->operand(0), send);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::GetTupleElement(recv_done, 0));
}
TEST_F(CollectivePermuteDecomposerTest, NotTransformedDefaultChannelId) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
p = u32[] replica-id()
ROOT cp = u32[] collective-permute(p),
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectivePermuteDecomposerTest, ThresholdNotTransformed) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
p = u32[] replica-id()
ROOT cp = u32[] collective-permute(p), channel_id=1,
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}},
metadata={op_name="op1/op2/add" source_file="foo/bar/mysource.py" source_line=35}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteDecomposer decomposer(8);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectivePermuteDecomposerTest, Pipeline1) {
const char* const kModuleStr = R"(
HloModule module
cond {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(2)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
send-data = get-tuple-element(param), index=1
recv-data = u32[2] collective-permute(send-data), channel_id=1,
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}},
frontend_attributes={_xla_other_attribute="xyz"}
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
r = u32[2] broadcast(c1), dimensions={}
s = u32[2] add(r, recv-data)
ROOT result = (u32[], u32[2]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
c1 = u32[] constant(1)
r = u32[] replica-id()
a = u32[] add(c1, r)
init = u32[2] broadcast(a), dimensions={}
while_init = (u32[], u32[2]) tuple(c0, init)
while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond
ROOT result = u32[2] get-tuple-element(while_result), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* recv = FindInstruction(module.get(), "recv");
EXPECT_EQ(recv->channel_id().value(), 1);
EXPECT_THAT(
recv->ToString(),
HasSubstr(
"_xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}"));
EXPECT_THAT(recv->ToString(), HasSubstr("_xla_send_recv_pipeline=\"0\""));
EXPECT_THAT(recv->ToString(), HasSubstr("_xla_other_attribute=\"xyz\""));
HloInstruction* recv_done = FindInstruction(module.get(), "recv-done");
EXPECT_THAT(recv_done->ToString(),
HasSubstr("_xla_send_recv_pipeline=\"0\""));
HloInstruction* send = FindInstruction(module.get(), "send");
EXPECT_EQ(send->channel_id().value(), 1);
EXPECT_THAT(
send->ToString(),
HasSubstr(
"_xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}"));
EXPECT_THAT(send->ToString(), HasSubstr("_xla_send_recv_pipeline=\"0\""));
EXPECT_THAT(send->ToString(), HasSubstr("_xla_other_attribute=\"xyz\""));
HloInstruction* send_done = FindInstruction(module.get(), "send-done");
EXPECT_THAT(send_done->ToString(),
HasSubstr("_xla_send_recv_pipeline=\"0\""));
EXPECT_FALSE(recv_done->control_predecessors().empty());
EXPECT_EQ(recv_done->control_predecessors()[0], send);
}
TEST_F(CollectivePermuteDecomposerTest, ForwardPipeline2) {
const char* const kModuleStr = R"(
HloModule module
cond {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(2)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
send-data = get-tuple-element(param), index=1
recv-data.0 = u32[2] collective-permute(send-data), channel_id=1,
source_target_pairs={{3,0}}
recv-data.1 = u32[2] collective-permute(send-data), channel_id=2,
source_target_pairs={{0,1}, {1,2}, {2,3}}
replica = u32[] replica-id()
constant0 = u32[] constant(0)
compare0 = pred[] compare(replica, constant0), direction=EQ
compare = pred[2] broadcast(compare0), dimensions={}
recv-data = u32[2] select(compare, recv-data.0, recv-data.1)
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
r = u32[2] broadcast(c1), dimensions={}
s = u32[2] add(r, recv-data)
ROOT result = (u32[], u32[2]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
c1 = u32[] constant(1)
r = u32[] replica-id()
a = u32[] add(c1, r)
init = u32[2] broadcast(a), dimensions={}
while_init = (u32[], u32[2]) tuple(c0, init)
while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond
ROOT result = u32[2] get-tuple-element(while_result), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* recv = FindInstruction(module.get(), "recv");
EXPECT_EQ(recv->channel_id().value(), 1);
EXPECT_THAT(recv->ToString(),
HasSubstr("_xla_send_recv_source_target_pairs={{3,0}}"));
EXPECT_THAT(recv->ToString(), HasSubstr("_xla_send_recv_pipeline=\"0\""));
HloInstruction* send = FindInstruction(module.get(), "send");
EXPECT_THAT(send->ToString(),
HasSubstr("_xla_send_recv_source_target_pairs={{3,0}}"));
EXPECT_THAT(send->ToString(), HasSubstr("_xla_send_recv_pipeline=\"0\""));
HloInstruction* recv1 = FindInstruction(module.get(), "recv.1");
EXPECT_EQ(recv1->channel_id().value(), 2);
EXPECT_THAT(
recv1->ToString(),
HasSubstr("_xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3}}"));
EXPECT_THAT(recv1->ToString(), HasSubstr("_xla_send_recv_pipeline=\"1\""));
HloInstruction* recv_done1 = FindInstruction(module.get(), "recv-done.1");
EXPECT_THAT(recv_done1->ToString(),
HasSubstr("_xla_send_recv_pipeline=\"1\""));
HloInstruction* send1 = FindInstruction(module.get(), "send.1");
EXPECT_THAT(
send1->ToString(),
HasSubstr("_xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3}}"));
EXPECT_THAT(send1->ToString(), HasSubstr("_xla_send_recv_pipeline=\"1\""));
HloInstruction* send_done1 = FindInstruction(module.get(), "send-done.1");
EXPECT_THAT(send_done1->ToString(),
HasSubstr("_xla_send_recv_pipeline=\"1\""));
}
TEST_F(CollectivePermuteDecomposerTest, ForwardPipelineWithMatmul) {
const char* const kModuleStr = R"(
HloModule test
while_body {
inputs = (u32[], f32[2,2], f32[2,2]) parameter(0)
iter = u32[] get-tuple-element(inputs), index=0
iter_increment = u32[] constant(1)
next_iter = u32[] add(iter, iter_increment)
partition-id = u32[] partition-id()
zero = u32[] constant(0)
compare = pred[] compare(partition-id, zero), direction=EQ
broadcast = pred[2,2] broadcast(compare), dimensions={}
weights = f32[2,2] get-tuple-element(inputs), index=2
data = f32[2,2] get-tuple-element(inputs), index=1
cp_back = f32[2,2] collective-permute(data), channel_id=1,
source_target_pairs={{3,0}},
frontend_attributes={_xla_send_recv_validation="{{3,10}}"}
cp_forward = f32[2,2] collective-permute(data), channel_id=2,
source_target_pairs={{0,1},{1,2},{2,3}},
frontend_attributes={_xla_send_recv_validation="{{0,7},{1,8},{2,9}}"}
select = f32[2,2] select(broadcast, cp_back, cp_forward)
matmul = f32[2,2] dot(weights, select), lhs_contracting_dims={1},
rhs_contracting_dims={0}
ROOT result = (u32[], f32[2,2], f32[2,2]) tuple(next_iter, matmul, weights)
}
while_cond {
inputs = (u32[], f32[2,2], f32[2,2]) parameter(0)
iter = u32[] get-tuple-element(inputs), index=0
max_iter = u32[] constant(3)
ROOT compare = pred[] compare(iter, max_iter), direction=LT
}
ENTRY test_computation {
start_iter = u32[] constant(0)
input_data = f32[2,2] parameter(0)
input_weights = f32[2,2] parameter(1)
input = (u32[], f32[2,2], f32[2,2]) tuple(start_iter, input_data,
input_weights)
while_result = (u32[], f32[2,2], f32[2,2]) while(input),
condition=while_cond, body=while_body
ROOT data_out = f32[2,2] get-tuple-element(while_result), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
HloModule* transformed_module = module.get();
HloComputation* while_body =
FindComputation(transformed_module, "while_body");
HloInstruction* recv_bwd = hlo_query::FindInstruction(while_body, "recv");
EXPECT_EQ(recv_bwd->channel_id().value(), 1);
auto recv_bwd_frontend_attributes = recv_bwd->frontend_attributes().map();
EXPECT_EQ(recv_bwd_frontend_attributes.size(), 3);
EXPECT_EQ(recv_bwd_frontend_attributes.at(kSendRecvValidationAttr),
"{{3,10}}");
EXPECT_EQ(recv_bwd_frontend_attributes.at(kSendRecvPipelineAttr), "0");
EXPECT_EQ(recv_bwd_frontend_attributes.at(kSendRecvSourceTargetPairsAttr),
"{{3,0}}");
HloInstruction* send_bwd = hlo_query::FindInstruction(while_body, "send");
auto send_bwd_frontend_attributes = send_bwd->frontend_attributes().map();
EXPECT_THAT(send_bwd_frontend_attributes.at(kSendRecvSourceTargetPairsAttr),
"{{3,0}}");
HloInstruction* recv_fwd = hlo_query::FindInstruction(while_body, "recv.1");
EXPECT_EQ(recv_fwd->channel_id().value(), 2);
auto recv_fwd_frontend_attributes = recv_fwd->frontend_attributes().map();
EXPECT_EQ(recv_fwd_frontend_attributes.size(), 3);
EXPECT_EQ(recv_fwd_frontend_attributes.at(kSendRecvPipelineAttr), "1");
EXPECT_EQ(recv_fwd_frontend_attributes.at(kSendRecvSourceTargetPairsAttr),
"{{0,1},{1,2},{2,3}}");
HloInstruction* send_fwd = hlo_query::FindInstruction(while_body, "send.1");
auto send_fwd_frontend_attributes = send_fwd->frontend_attributes().map();
EXPECT_EQ(send_fwd_frontend_attributes.size(), 3);
EXPECT_EQ(send_fwd_frontend_attributes.at(kSendRecvPipelineAttr), "1");
EXPECT_EQ(send_fwd_frontend_attributes.at(kSendRecvSourceTargetPairsAttr),
"{{0,1},{1,2},{2,3}}");
EXPECT_NE(while_body, nullptr);
HloInstruction* recv_done_fwd =
hlo_query::FindInstruction(while_body, "recv-done");
HloInstruction* recv_done_bwd =
hlo_query::FindInstruction(while_body, "recv-done.1");
EXPECT_EQ(recv_done_fwd->control_predecessors()[0], send_bwd);
EXPECT_EQ(recv_done_bwd->control_predecessors()[0], send_fwd);
}
TEST_F(CollectivePermuteDecomposerTest, BackwardPipeline2) {
const char* const kModuleStr = R"(
HloModule module
cond {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(2)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
send-data = get-tuple-element(param), index=1
recv-data.0 = u32[2] collective-permute(send-data), channel_id=1,
source_target_pairs={{1,0},{2,1},{3,2}}
recv-data.1 = u32[2] collective-permute(send-data), channel_id=2,
source_target_pairs={{0,3}}
replica = u32[] replica-id()
constant0 = u32[] constant(0)
compare0 = pred[] compare(replica, constant0), direction=NE
compare = pred[2] broadcast(compare0), dimensions={}
recv-data = u32[2] select(compare, recv-data.0, recv-data.1)
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
r = u32[2] broadcast(c1), dimensions={}
s = u32[2] add(r, recv-data)
ROOT result = (u32[], u32[2]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
c1 = u32[] constant(1)
r = u32[] replica-id()
a = u32[] add(c1, r)
init = u32[2] broadcast(a), dimensions={}
while_init = (u32[], u32[2]) tuple(c0, init)
while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond
ROOT result = u32[2] get-tuple-element(while_result), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* recv = FindInstruction(module.get(), "recv");
EXPECT_EQ(recv->channel_id().value(), 1);
EXPECT_THAT(
recv->ToString(),
HasSubstr("_xla_send_recv_source_target_pairs={{1,0},{2,1},{3,2}}"));
EXPECT_THAT(recv->ToString(), HasSubstr("_xla_send_recv_pipeline=\"1\""));
HloInstruction* send = FindInstruction(module.get(), "send");
EXPECT_THAT(
send->ToString(),
HasSubstr("_xla_send_recv_source_target_pairs={{1,0},{2,1},{3,2}}"));
EXPECT_THAT(send->ToString(), HasSubstr("_xla_send_recv_pipeline=\"1\""));
HloInstruction* recv1 = FindInstruction(module.get(), "recv.1");
EXPECT_EQ(recv1->channel_id().value(), 2);
EXPECT_THAT(recv1->ToString(),
HasSubstr("_xla_send_recv_source_target_pairs={{0,3}}"));
EXPECT_THAT(recv1->ToString(), HasSubstr("_xla_send_recv_pipeline=\"0\""));
HloInstruction* send1 = FindInstruction(module.get(), "send.1");
EXPECT_THAT(send1->ToString(),
HasSubstr("_xla_send_recv_source_target_pairs={{0,3}}"));
EXPECT_THAT(send1->ToString(), HasSubstr("_xla_send_recv_pipeline=\"0\""));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_permute_decomposer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_permute_decomposer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
50cd28dd-fb5f-48ac-8b47-b0f528cfc036 | cpp | tensorflow/tensorflow | collective_transformation_reorderer | third_party/xla/xla/service/collective_transformation_reorderer.cc | third_party/xla/xla/service/collective_transformation_reorderer_test.cc | #include "xla/service/collective_transformation_reorderer.h"
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_dce.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
struct CollectiveTransformation {
HloInstruction* hlo;
int64_t transformed_collective_dimension;
};
std::optional<std::vector<CollectiveTransformation>>
GetAllGatherTransformations(HloInstruction* all_gather) {
std::vector<HloInstruction*> transformation_hlos;
{
HloInstruction* transformation_hlo = all_gather;
bool found_unsupported_transformation = false;
while (transformation_hlo->user_count() == 1 &&
!found_unsupported_transformation) {
transformation_hlo = transformation_hlo->users()[0];
switch (transformation_hlo->opcode()) {
case HloOpcode::kReshape: {
transformation_hlos.push_back(transformation_hlo);
break;
}
default:
found_unsupported_transformation = true;
}
}
}
if (transformation_hlos.empty()) {
return std::nullopt;
}
auto get_reshaped_all_gather_dimension =
[](const Shape& all_gather_shape, int64_t all_gather_dimension,
HloInstruction* transformation_hlo) -> std::optional<int64_t> {
int64_t all_gather_num_strides = absl::c_accumulate(
all_gather_shape.dimensions().subspan(0, all_gather_dimension), 1,
[](int64_t product, int64_t dimension_size) {
return product * dimension_size;
});
int64_t reshaped_all_gather_dimension = 0;
int64_t reshaped_num_strides = 1;
while (reshaped_all_gather_dimension <
transformation_hlo->shape().dimensions_size() &&
reshaped_num_strides < all_gather_num_strides) {
reshaped_num_strides *=
transformation_hlo->shape().dimensions(reshaped_all_gather_dimension);
++reshaped_all_gather_dimension;
}
if (reshaped_num_strides != all_gather_num_strides) {
return std::nullopt;
}
if (transformation_hlo->shape().dimensions(reshaped_all_gather_dimension) !=
all_gather_shape.dimensions(all_gather_dimension)) {
return std::nullopt;
}
return reshaped_all_gather_dimension;
};
std::vector<CollectiveTransformation> transformations;
HloAllGatherInstruction* all_gather_instruction =
DynCast<HloAllGatherInstruction>(all_gather);
Shape all_gather_shape = all_gather_instruction->shape();
int64_t all_gather_dimension = all_gather_instruction->all_gather_dimension();
CHECK(all_gather_instruction != nullptr);
for (HloInstruction* transformation_hlo : transformation_hlos) {
bool found_unsupported_transformation = false;
switch (transformation_hlo->opcode()) {
case HloOpcode::kReshape: {
std::optional<int64_t> reshaped_all_gather_dimension =
get_reshaped_all_gather_dimension(
all_gather_shape, all_gather_dimension, transformation_hlo);
if (reshaped_all_gather_dimension.has_value()) {
transformations.push_back(
{transformation_hlo, *reshaped_all_gather_dimension});
all_gather_shape = transformation_hlo->shape();
all_gather_dimension = *reshaped_all_gather_dimension;
} else {
found_unsupported_transformation = true;
}
break;
}
default:
return std::nullopt;
}
if (found_unsupported_transformation) {
break;
}
}
if (transformations.empty()) {
return std::nullopt;
}
return transformations;
}
std::vector<HloInstruction*> GetAllReduceTransformations(
HloInstruction* all_reduce) {
HloAllReduceInstruction* all_reduce_instruction =
DynCast<HloAllReduceInstruction>(all_reduce);
CHECK_NE(all_reduce_instruction, nullptr);
if (all_reduce_instruction->constrain_layout()) {
return {};
}
std::vector<HloInstruction*> transformation_hlos;
HloInstruction* transformation_hlo = all_reduce->mutable_operand(0);
while (transformation_hlo->opcode() == HloOpcode::kReshape &&
transformation_hlo->user_count() == 1) {
transformation_hlos.push_back(transformation_hlo);
transformation_hlo = transformation_hlo->mutable_operand(0);
}
return transformation_hlos;
}
}
absl::StatusOr<bool>
CollectiveTransformationReorder::ReorderAllGatherTransformations(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
HloInstructionMap<std::vector<CollectiveTransformation>>
all_gather_to_transformations;
for (HloComputation* computation :
module->MakeComputationPostOrder(execution_threads)) {
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (instruction->opcode() == HloOpcode::kAllGather) {
if (instruction->operand_count() != 1) {
continue;
}
std::optional<std::vector<CollectiveTransformation>>
all_gather_transformations =
GetAllGatherTransformations(instruction);
if (all_gather_transformations.has_value()) {
all_gather_to_transformations[instruction] =
*std::move(all_gather_transformations);
}
}
}
}
if (all_gather_to_transformations.empty()) {
return false;
}
auto reshape_all_gather_operand =
[](HloInstruction* all_gather_operand,
int64_t original_all_gather_dimension,
const CollectiveTransformation& transformation) {
Shape reshaped_all_gather_operand_shape = transformation.hlo->shape();
int64_t operand_all_gather_dimension_size =
all_gather_operand->shape().dimensions(
original_all_gather_dimension);
reshaped_all_gather_operand_shape.set_dimensions(
transformation.transformed_collective_dimension,
operand_all_gather_dimension_size);
HloComputation* computation = all_gather_operand->parent();
return computation->AddInstruction(HloInstruction::CreateReshape(
reshaped_all_gather_operand_shape, all_gather_operand));
};
for (auto& [instruction, transformations] : all_gather_to_transformations) {
HloAllGatherInstruction* all_gather =
DynCast<HloAllGatherInstruction>(instruction);
int64_t all_gather_dimension = all_gather->all_gather_dimension();
int64_t original_all_gather_dimension_size =
all_gather->shape().dimensions(all_gather_dimension);
HloInstruction* all_gather_operand = instruction->mutable_operand(0);
for (const CollectiveTransformation& transformation : transformations) {
all_gather_operand = reshape_all_gather_operand(
all_gather_operand, all_gather_dimension, transformation);
all_gather_dimension = transformation.transformed_collective_dimension;
}
Shape new_all_gather_shape = all_gather_operand->shape();
new_all_gather_shape.set_dimensions(all_gather_dimension,
original_all_gather_dimension_size);
HloComputation* computation = all_gather_operand->parent();
HloInstruction* new_all_gather =
computation->AddInstruction(HloInstruction::CreateAllGather(
new_all_gather_shape, {all_gather_operand}, all_gather_dimension,
all_gather->device_list(), all_gather->constrain_layout(),
all_gather->channel_id(), all_gather->use_global_device_ids()));
TF_RETURN_IF_ERROR(
transformations.back().hlo->ReplaceAllUsesWith(new_all_gather));
if (computation->root_instruction() == transformations.back().hlo) {
computation->set_root_instruction(new_all_gather);
}
}
return true;
}
absl::StatusOr<bool>
CollectiveTransformationReorder::ReorderAllReduceTransformations(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
HloInstructionMap<std::vector<HloInstruction*>> all_reduce_to_transformations;
for (HloComputation* computation :
module->MakeComputationPostOrder(execution_threads)) {
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (instruction->opcode() == HloOpcode::kAllReduce) {
if (instruction->user_count() != 1 ||
computation->root_instruction() == instruction) {
continue;
}
std::vector<HloInstruction*> reshapes =
GetAllReduceTransformations(instruction);
if (reshapes.empty()) {
continue;
}
all_reduce_to_transformations[instruction] = std::move(reshapes);
}
}
}
if (all_reduce_to_transformations.empty()) {
return false;
}
for (auto& [inst, reshapes] : all_reduce_to_transformations) {
HloComputation* computation = inst->parent();
HloAllReduceInstruction* all_reduce =
DynCast<HloAllReduceInstruction>(inst);
CHECK(!reshapes.empty());
HloInstruction* cur_operand = reshapes.back()->mutable_operand(0);
HloInstruction* new_all_reduce =
computation->AddInstruction(HloInstruction::CreateAllReduce(
cur_operand->shape(), {cur_operand}, all_reduce->to_apply(),
all_reduce->device_list(), all_reduce->constrain_layout(),
all_reduce->channel_id(), all_reduce->use_global_device_ids()));
cur_operand = new_all_reduce;
for (int64_t i = reshapes.size() - 1; i >= 0; --i) {
cur_operand = computation->AddInstruction(
HloInstruction::CreateReshape(reshapes[i]->shape(), cur_operand));
}
TF_RETURN_IF_ERROR(
computation->ReplaceInstruction(all_reduce, cur_operand));
}
return true;
}
absl::StatusOr<bool> CollectiveTransformationReorder::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_ASSIGN_OR_RETURN(bool ag_changed, ReorderAllGatherTransformations(
module, execution_threads));
TF_ASSIGN_OR_RETURN(bool ar_changed, ReorderAllReduceTransformations(
module, execution_threads));
if (ag_changed || ar_changed) {
HloDCE dce;
TF_RETURN_IF_ERROR(dce.Run(module, execution_threads).status());
}
return ag_changed || ar_changed;
}
} | #include "xla/service/collective_transformation_reorderer.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_verifier.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class CollectiveTransformationReordererTest : public HloTestBase {
public:
absl::StatusOr<bool> RunCollectiveTransformationReorderer(HloModule* module) {
CollectiveTransformationReorder reorderer;
return reorderer.Run(module, {});
}
};
TEST_F(CollectiveTransformationReordererTest,
ReshapeWithinShardAfterAllGatherDim) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,4,1024] parameter(0)
all-gather = bf16[8,32,1024] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
ROOT reshape = bf16[8,32,8,128] reshape(all-gather)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunCollectiveTransformationReorderer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::AllGather(op::Reshape(op::Parameter())));
HloInstruction* all_gather = module->entry_computation()->root_instruction();
EXPECT_THAT(all_gather->dimensions(), ::testing::ElementsAre(1));
}
TEST_F(CollectiveTransformationReordererTest,
ReshapeWithinShardBeforeAllGatherDim) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,32,8,4,1024] parameter(0)
all-gather = bf16[8,32,8,32,1024] all-gather(param), dimensions={3}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
ROOT reshape = bf16[2048,32,1024] reshape(all-gather)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunCollectiveTransformationReorderer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::AllGather(op::Reshape(op::Parameter())));
HloInstruction* all_gather = module->entry_computation()->root_instruction();
EXPECT_THAT(all_gather->dimensions(), ::testing::ElementsAre(1));
}
TEST_F(CollectiveTransformationReordererTest,
ReshapeWithinShardBeforeAndAfterAllGatherDim) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,32,8,4,1024] parameter(0)
all-gather = bf16[8,32,8,32,1024] all-gather(param), dimensions={3}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
ROOT reshape = bf16[2048,32,8,128] reshape(all-gather)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunCollectiveTransformationReorderer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::AllGather(op::Reshape(op::Parameter())));
HloInstruction* all_gather = module->entry_computation()->root_instruction();
EXPECT_THAT(all_gather->dimensions(), ::testing::ElementsAre(1));
}
TEST_F(CollectiveTransformationReordererTest, ReshapeAcrossShards) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,1,8,128] parameter(0)
all-gather = bf16[8,8,8,128] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
ROOT reshape = bf16[64,8,128] reshape(all-gather)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunCollectiveTransformationReorderer(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectiveTransformationReordererTest, MergeAllGatherDimensionWithNext) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,8,16,16] parameter(0)
all-gather = bf16[64,8,16,16] all-gather(param), dimensions={0}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
ROOT reshape = bf16[512,16,16] reshape(all-gather)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunCollectiveTransformationReorderer(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectiveTransformationReordererTest,
MergeAllGatherDimensionWithPrevious) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,8,16,16] parameter(0)
all-gather = bf16[8,64,16,16] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
ROOT reshape = bf16[512,16,16] reshape(all-gather)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunCollectiveTransformationReorderer(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectiveTransformationReordererTest, AllReduceSingleReshape) {
absl::string_view hlo_string = R"(
HloModule module
add {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT s = bf16[] add(a, b)
}
ENTRY entry {
param = bf16[16384,6144] parameter(0)
reshape = bf16[1,16384,6144] reshape(param)
all-reduce = bf16[1,16384,6144] all-reduce(reshape), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=add
constant = s32[] constant(0)
ROOT dynamic-slice = bf16[1,16384,384] dynamic-slice(all-reduce, constant, constant, constant), dynamic_slice_sizes={1,16384,384}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunCollectiveTransformationReorderer(module.get()));
EXPECT_TRUE(changed);
TF_ASSERT_OK(HloVerifier(false,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::DynamicSlice(op::Reshape(op::AllReduce(op::Parameter())),
op::Constant(), op::Constant(), op::Constant()));
}
TEST_F(CollectiveTransformationReordererTest, AllReduceTwoReshapes) {
absl::string_view hlo_string = R"(
HloModule module
add {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT s = bf16[] add(a, b)
}
ENTRY entry {
param = bf16[16384,3072,2] parameter(0)
reshape.1 = bf16[16384,6144] reshape(param)
reshape.2 = bf16[1,16384,6144] reshape(reshape.1)
all-reduce = bf16[1,16384,6144] all-reduce(reshape.2), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=add
constant = s32[] constant(0)
ROOT dynamic-slice = bf16[1,16384,384] dynamic-slice(all-reduce, constant, constant, constant), dynamic_slice_sizes={1,16384,384}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunCollectiveTransformationReorderer(module.get()));
EXPECT_TRUE(changed);
TF_ASSERT_OK(HloVerifier(false,
true)
.Run(module.get())
.status());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::DynamicSlice(op::Reshape(op::Reshape(op::AllReduce(op::Parameter()))),
op::Constant(), op::Constant(), op::Constant()));
}
TEST_F(CollectiveTransformationReordererTest, AllReduceReshapeWithTwoUsers) {
absl::string_view hlo_string = R"(
HloModule module
add {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT s = bf16[] add(a, b)
}
ENTRY entry {
param = bf16[16384,6144] parameter(0)
reshape = bf16[1,16384,6144] reshape(param)
all-reduce = bf16[1,16384,6144] all-reduce(reshape), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=add
constant = s32[] constant(0)
dynamic-slice = bf16[1,16384,384] dynamic-slice(all-reduce, constant, constant, constant), dynamic_slice_sizes={1,16384,384}
copy = bf16[1,16384,6144] copy(reshape)
ROOT tuple = (bf16[1,16384,6144], bf16[1,16384,384]) tuple(copy, dynamic-slice)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunCollectiveTransformationReorderer(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectiveTransformationReordererTest, AllReduceWithTwoUsersReshape) {
absl::string_view hlo_string = R"(
HloModule module
add {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT s = bf16[] add(a, b)
}
ENTRY entry {
param = bf16[16384,6144] parameter(0)
reshape = bf16[1,16384,6144] reshape(param)
all-reduce = bf16[1,16384,6144] all-reduce(reshape), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=add
constant = s32[] constant(0)
dynamic-slice = bf16[1,16384,384] dynamic-slice(all-reduce, constant, constant, constant), dynamic_slice_sizes={1,16384,384}
copy = bf16[1,16384,6144] copy(all-reduce)
ROOT tuple = (bf16[1,16384,6144], bf16[1,16384,384]) tuple(copy, dynamic-slice)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunCollectiveTransformationReorderer(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectiveTransformationReordererTest, AllReduceConstrainLayout) {
absl::string_view hlo_string = R"(
HloModule module
add {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT s = bf16[] add(a, b)
}
ENTRY entry {
param = bf16[16384,6144] parameter(0)
reshape = bf16[1,16384,6144] reshape(param)
all-reduce = bf16[1,16384,6144] all-reduce(reshape), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, constrain_layout=true, to_apply=add
constant = s32[] constant(0)
ROOT dynamic-slice = bf16[1,16384,384] dynamic-slice(all-reduce, constant, constant, constant), dynamic_slice_sizes={1,16384,384}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunCollectiveTransformationReorderer(module.get()));
EXPECT_FALSE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_transformation_reorderer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_transformation_reorderer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
df627059-2ef8-47c7-880a-5779a2e8d0a0 | cpp | tensorflow/tensorflow | hlo_unstacker | third_party/xla/xla/service/hlo_unstacker.cc | third_party/xla/xla/service/hlo_unstacker_test.cc | #include "xla/service/hlo_unstacker.h"
#include <algorithm>
#include <cstdint>
#include <deque>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/map_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/tuple_util.h"
#include "xla/service/while_loop_unroller.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
enum class PatternType {
DSFusionNoBitcastPattern,
DSFusionPattern,
NestedDSFusionPattern,
Other,
};
static std::string PatternTypeToString(PatternType pattern_type) {
switch (pattern_type) {
case PatternType::DSFusionNoBitcastPattern:
return "DSFusionNoBitcastPattern";
case PatternType::DSFusionPattern:
return "DSFusionPattern";
case PatternType::NestedDSFusionPattern:
return "NestedDSFusionPattern";
case PatternType::Other:
return "Other";
}
}
struct PatternInfo {
PatternType type;
std::vector<const HloInstruction*> unstacked_instrs;
const HloInstruction* instr;
Shape unstacked_shape;
HloComputation* unstacking_computation;
std::string ToString() const {
if (unstacking_computation == nullptr) {
return absl::StrCat("type: \n\t", PatternTypeToString(type), "\n",
"instr: \n\t", instr->name(), "\n", "shape: \n\t",
unstacked_shape.ToString(true));
} else {
return absl::StrCat("type: \n\t", PatternTypeToString(type), "\n",
"instr: \n\t", instr->name(), "\n", "shape: \n\t",
unstacked_shape.ToString(true), "\n", "comp: \n",
unstacking_computation->name());
}
}
};
struct UnstackerMetadata {
static absl::StatusOr<UnstackerMetadata> Create(
HloModule* module, std::function<bool(HloInstruction*)> unfuse_slice) {
UnstackerMetadata metadata;
TF_ASSIGN_OR_RETURN(
bool prepared,
WhileLoopUnroller::PrepareModuleForUnrolling(module, {}));
if (prepared) {
VLOG(3) << "Prepared module: " << module->name() << " for unstacking.";
}
std::vector<std::pair<HloInstruction*, WhileLoopConfig>> loops =
WhileLoopUnroller::GetUnrollableLoops(module, {},
std::nullopt);
for (const auto& [instr, while_loop_config] : loops) {
metadata.unrollable_loop_bodies[instr->while_body()] = while_loop_config;
metadata.bodies[instr->while_body()] = instr;
}
metadata.unfuse_slice = unfuse_slice;
return metadata;
}
absl::flat_hash_map<HloComputation*, WhileLoopConfig> unrollable_loop_bodies;
absl::flat_hash_map<const HloComputation*, HloInstruction*> bodies;
std::vector<
std::pair<std::function<std::optional<PatternInfo>(
const UnstackerMetadata&, const HloInstruction*, int64_t)>,
std::function<absl::Status(HloInstruction*, const Shape&)>>>
custom_handlers;
std::function<bool(HloInstruction*)> unfuse_slice;
};
class UnstackerTransformer {
public:
explicit UnstackerTransformer(const UnstackerMetadata& metadata)
: metadata_(metadata) {}
std::vector<const HloInstruction*> HandleInstruction(
const HloInstruction* instr, int64_t changed_idx) {
if (instr->opcode() != HloOpcode::kFusion) {
return {};
}
VLOG(3) << "HandleInstruction(" << instr->shape().ToString()
<< instr->name() << ", " << changed_idx << ")";
for (const auto& [custom_pattern, custom_handler] :
metadata_.custom_handlers) {
std::optional<PatternInfo> stacked_user =
custom_pattern(metadata_, instr, changed_idx);
if (!stacked_user.has_value()) {
continue;
}
PatternInfo& pattern_info = stacked_user.value();
pattern_type_ = pattern_info.type;
VLOG(3) << "PatternInfo:" << "\n" << pattern_info.ToString();
if (pattern_info.unstacking_computation != nullptr &&
unstacking_computation_ != nullptr) {
if (!absl::EqualsIgnoreCase(
pattern_info.unstacking_computation->ToString(
HloPrintOptions::Fingerprint()),
unstacking_computation_->ToString(
HloPrintOptions::Fingerprint()))) {
VLOG(3) << "Seen multiple unstacking computations, cannot handle: "
<< "\n previous computations: \n"
<< unstacking_computation_->ToString(
HloPrintOptions::Fingerprint())
<< "\n current computations: \n"
<< pattern_info.unstacking_computation->ToString(
HloPrintOptions::Fingerprint());
return {};
}
}
if (pattern_info.unstacking_computation != nullptr) {
unstacking_computation_ = pattern_info.unstacking_computation;
}
unstacked_shape_ = std::make_unique<Shape>(pattern_info.unstacked_shape);
unstacked_instrs_.push_back(instr);
std::function<absl::Status()> unstack_wrapper =
[&custom_handler = custom_handler,
pattern_info]() mutable -> absl::Status {
HloInstruction* mutable_dynamic_slicing_fusion =
const_cast<HloInstruction*>(pattern_info.instr);
return custom_handler(mutable_dynamic_slicing_fusion,
pattern_info.unstacked_shape.tuple_shapes(0));
};
body_changes_.push_back(unstack_wrapper);
return pattern_info.unstacked_instrs;
}
return {};
}
const UnstackerMetadata& GetMetadata() const { return metadata_; }
std::vector<const HloInstruction*>& GetUnstackedInstructions() {
return unstacked_instrs_;
}
const Shape* GetUnstackedShape() const { return unstacked_shape_.get(); }
HloComputation* GetUnstackingComputation() const {
return unstacking_computation_;
}
std::vector<std::function<void(const UnstackerTransformer&)>>&
GetLoopChanges() {
return loop_changes_;
}
std::vector<std::function<absl::Status()>>& GetBodyChanges() {
return body_changes_;
}
absl::flat_hash_map<HloInstruction*, std::vector<int64_t>>&
GetOperandChanges() {
return operand_changes_;
}
void AddOperandChange(HloInstruction* instr, int64_t index) {
operand_changes_[instr].push_back(index);
}
void AddLoopChange(
std::function<void(const UnstackerTransformer&)> loop_change) {
loop_changes_.push_back(loop_change);
}
PatternType GetPatternType() const { return pattern_type_; }
private:
PatternType pattern_type_;
const UnstackerMetadata& metadata_;
std::unique_ptr<Shape> unstacked_shape_ = nullptr;
HloComputation* unstacking_computation_ = nullptr;
std::vector<std::function<void(const UnstackerTransformer&)>> loop_changes_;
std::vector<std::function<absl::Status()>> body_changes_;
absl::flat_hash_map<HloInstruction*, std::vector<int64_t>> operand_changes_;
std::vector<const HloInstruction*> unstacked_instrs_;
};
bool CanUnstackWhileOperand(const HloInstruction* while_instr,
UnstackerTransformer& unstacker, int64_t index);
bool UnstackWhileOperandAtIndex(
const UnstackerMetadata& metadata, HloInstruction* while_instr,
int64_t index, std::vector<const HloInstruction*>& unstacked_instructions);
bool PropagateGteShapeChange(HloInstruction* gte,
UnstackerTransformer& unstacker) {
VLOG(5) << "PropagateGteShapeChange(" << gte->name() << ")";
std::vector<const HloInstruction*> handled_instrs;
absl::flat_hash_map<HloInstruction*, int64_t> visited;
std::deque<HloInstruction*> worklist;
worklist.push_back(gte);
visited.insert({gte, gte->tuple_index()});
while (!worklist.empty()) {
HloInstruction* changed_instr_to_propagate = worklist.front();
int64_t changed_operand_index =
FindOrDie(visited, changed_instr_to_propagate);
worklist.pop_front();
for (HloInstruction* user : changed_instr_to_propagate->users()) {
if (ContainsKey(visited, user)) {
continue;
}
if (user->opcode() == HloOpcode::kGetTupleElement) {
if (user->tuple_index() != changed_operand_index) {
continue;
}
visited.insert({user, changed_operand_index});
worklist.push_back(user);
} else if (user->opcode() == HloOpcode::kTuple) {
int64_t use_index = user->operand_index(changed_instr_to_propagate);
visited.insert({user, {use_index}});
worklist.push_back(user);
} else if (user->opcode() == HloOpcode::kWhile) {
bool changed_nested_while =
CanUnstackWhileOperand(user, unstacker, changed_operand_index);
if (!changed_nested_while) {
return false;
}
visited.insert({user, changed_operand_index});
worklist.push_back(user);
} else {
if (absl::c_find(handled_instrs, user) != handled_instrs.end()) {
continue;
}
if (user->IsCustomCall("DynamicGte") ||
user->IsCustomCall("DynamicTuple")) {
continue;
}
int64_t use_index = user->operand_index(changed_instr_to_propagate);
std::vector<const HloInstruction*> curr_handled_instrs =
unstacker.HandleInstruction(user, use_index);
if (curr_handled_instrs.empty()) {
VLOG(3) << "Custom unstacker not found for " << user->name();
return false;
}
for (const HloInstruction* instr : curr_handled_instrs) {
for (HloInstruction* handled_instr_user : instr->users()) {
if (user->shape() == gte->shape()) {
visited.insert({handled_instr_user, changed_operand_index});
worklist.push_back(handled_instr_user);
}
}
handled_instrs.push_back(instr);
}
}
}
}
for (const auto& [instr, index] : visited) {
unstacker.AddOperandChange(instr, index);
}
return true;
}
bool CanPropagateGteShapeChangesInComputation(
const HloComputation* comp, const HloInstruction* operand,
UnstackerTransformer& shape_transformer, int64_t idx) {
VLOG(3) << "Propagating shape change of index " << idx
<< " in : " << comp->name();
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kGetTupleElement &&
instr->tuple_index() == idx) {
if (instr->operand(0) != operand) {
continue;
}
bool can_propagate = PropagateGteShapeChange(instr, shape_transformer);
if (!can_propagate) {
VLOG(3) << "Failed to propagate shape change for " << instr->name();
return false;
}
}
}
VLOG(3) << "Finish propagating shape change of index " << idx
<< " in: " << comp->name();
return true;
}
void UnstackWhileInput(const UnstackerTransformer& unstacker,
HloInstruction* while_instr, int64_t index) {
VLOG(3) << "Unstacking while input: " << while_instr->name() << " at "
<< index;
const Shape* new_shape = unstacker.GetUnstackedShape();
HloComputation* unstacking_computation = unstacker.GetUnstackingComputation();
const Shape& slice_shape = new_shape->tuple_shapes(0);
HloInstruction* old_while_input =
while_instr->while_init()->mutable_operand(index);
if (old_while_input->shape().IsTuple()) {
VLOG(3) << "Input is already unstacked: " << old_while_input->name();
return;
}
std::vector<HloInstruction*> slices;
if (old_while_input->IsCustomCall("AllocateBuffer")) {
for (int64_t i = 0; i < new_shape->tuple_shapes_size(); ++i) {
slices.push_back(while_instr->AddInstruction(
HloInstruction::CreateCustomCall(slice_shape, {}, "AllocateBuffer")));
}
} else {
for (int64_t i = 0; i < new_shape->tuple_shapes_size(); ++i) {
HloInstruction* root_instr = unstacking_computation->root_instruction();
HloInstruction* slice = nullptr;
if (unstacker.GetPatternType() == PatternType::DSFusionPattern ||
unstacker.GetPatternType() == PatternType::NestedDSFusionPattern ||
unstacker.GetPatternType() == PatternType::DSFusionNoBitcastPattern) {
HloInstruction* dynamic_slice = nullptr;
if (unstacker.GetPatternType() == PatternType::DSFusionPattern ||
unstacker.GetPatternType() == PatternType::NestedDSFusionPattern) {
dynamic_slice = root_instr->mutable_operand(0);
} else if (unstacker.GetPatternType() ==
PatternType::DSFusionNoBitcastPattern) {
dynamic_slice = root_instr;
}
std::vector<int64_t> new_start_indices;
new_start_indices.reserve(dynamic_slice->shape().rank());
std::vector<int64_t> new_limit_indices;
new_limit_indices.reserve(dynamic_slice->shape().rank());
std::vector<int64_t> new_strides;
new_strides.reserve(dynamic_slice->shape().rank());
new_start_indices.push_back(i);
new_limit_indices.push_back(i + 1);
new_strides.push_back(1);
for (int64_t j = 1; j < dynamic_slice->shape().rank(); ++j) {
new_start_indices.push_back(0);
new_limit_indices.push_back(
dynamic_slice->mutable_operand(0)->shape().dimensions(j));
new_strides.push_back(1);
}
slice = while_instr->AddInstruction(HloInstruction::CreateSlice(
dynamic_slice->shape(), old_while_input, new_start_indices,
new_limit_indices, new_strides));
}
if (slice == nullptr || !unstacker.GetMetadata().unfuse_slice(slice)) {
std::vector<HloInstruction*> operands = {
old_while_input,
while_instr->AddInstruction(MakeScalarConstantWithShape(
unstacking_computation->parameter_instruction(1)->shape(), i))};
slice = while_instr->AddInstruction(HloInstruction::CreateFusion(
slice_shape, HloInstruction::FusionKind::kLoop, operands,
while_instr->GetModule()->AddEmbeddedComputation(
unstacking_computation->Clone()),
"hoisted"));
}
slices.push_back(slice);
}
}
HloInstruction* new_operand_element =
while_instr->AddInstruction(HloInstruction::CreateTuple(slices));
HloInstruction* new_while_init =
TupleUtil::ReplaceTupleWith(new_operand_element,
while_instr->while_init(), {index}, false)
.value();
CHECK_OK(while_instr->ReplaceOperandWithDifferentShape(0, new_while_init));
}
bool CanUnstackWhileOperand(const HloInstruction* while_instr,
UnstackerTransformer& unstacker, int64_t index) {
VLOG(5) << "ReplaceWhileOperandShape: " << while_instr->name() << " at "
<< index;
bool body_changes_collected = CanPropagateGteShapeChangesInComputation(
while_instr->while_body(),
while_instr->while_body()->parameter_instruction(0), unstacker, index);
if (!body_changes_collected) {
return false;
}
bool condition_changes_collected = CanPropagateGteShapeChangesInComputation(
while_instr->while_condition(),
while_instr->while_condition()->parameter_instruction(0), unstacker,
index);
if (!condition_changes_collected) {
return false;
}
bool parent_changes_collected = CanPropagateGteShapeChangesInComputation(
while_instr->parent(), while_instr, unstacker, index);
if (!parent_changes_collected) {
VLOG(3) << "Failed: parent_changes_collected";
return false;
}
HloInstruction* root_operand =
while_instr->while_body()->root_instruction()->mutable_operand(index);
if (root_operand == nullptr) {
return false;
}
HloInstruction* gte_operand = nullptr;
if (Match(root_operand, match::GetTupleElement(match::Op(>e_operand)))) {
if (Match(gte_operand, match::While())) {
VLOG(3) << "Faced a gte originating from loop: "
<< root_operand->ToString();
bool loop_feeding_root_changes_collected = CanUnstackWhileOperand(
root_operand->operand(0), unstacker, root_operand->tuple_index());
if (!loop_feeding_root_changes_collected) {
VLOG(3) << "Failed: loop " << root_operand->operand(0)->name()
<< " output at " << index << " is not unstackable";
return false;
}
} else if (!Match(gte_operand, match::Parameter().WithParameterNum(0))) {
VLOG(3) << "Failed: root operand of while_body at " << index
<< " is not a parameter";
return false;
}
}
auto loop_change = [=](const UnstackerTransformer& unstacker,
HloInstruction* loop, int64_t idx) mutable {
Shape old_shape = ShapeUtil::MakeStaticShape(
loop->while_body()->parameter_instruction(0)->shape());
ShapeUtil::UpdateTupleShape(*unstacker.GetUnstackedShape(), idx,
&old_shape);
loop->while_body()->ReplaceParameter(
0, HloInstruction::CreateParameter(0, old_shape, "unstacked"));
loop->while_condition()->ReplaceParameter(
0, HloInstruction::CreateParameter(0, old_shape, "unstacked"));
CHECK_NE(unstacker.GetUnstackingComputation(), nullptr);
UnstackWhileInput(unstacker, loop, idx);
*loop->mutable_shape() = old_shape;
};
auto loop_change_wrapper = [&loop_change, while_instr,
index](const UnstackerTransformer& unstacker) {
HloInstruction* mutable_loop = const_cast<HloInstruction*>(while_instr);
loop_change(unstacker, mutable_loop, index);
};
unstacker.AddLoopChange(loop_change_wrapper);
return true;
}
bool UnstackWhileOperandAtIndex(
const UnstackerMetadata& metadata, HloInstruction* while_instr,
int64_t index, std::vector<const HloInstruction*>& unstacked_instructions) {
UnstackerTransformer unstacker = UnstackerTransformer(metadata);
bool can_unstack = CanUnstackWhileOperand(while_instr, unstacker, index);
if (!can_unstack) {
VLOG(3) << "Unstacking failed for " << while_instr->name() << " at "
<< index;
return false;
}
if (unstacker.GetUnstackedShape() == nullptr) {
VLOG(3) << "Failed: unstacked shape is null";
return false;
}
if (unstacker.GetUnstackingComputation() == nullptr) {
VLOG(3) << "Failed: unstacking computation is null";
return false;
}
for (auto& [instr, indices] : unstacker.GetOperandChanges()) {
switch (instr->opcode()) {
case HloOpcode::kGetTupleElement:
VLOG(3) << "Changing shape of: " << instr->name();
*instr->mutable_shape() = *unstacker.GetUnstackedShape();
break;
case HloOpcode::kTuple: {
for (int64_t index : indices) {
VLOG(3) << "Changing shape of: " << instr->name() << " at " << index;
*instr->mutable_shape()->mutable_tuple_shapes(index) =
*unstacker.GetUnstackedShape();
}
break;
}
case HloOpcode::kWhile:
for (int64_t index : indices) {
VLOG(3) << "Changing shape of: " << instr->name() << " at " << index;
ShapeUtil::UpdateTupleShape(*unstacker.GetUnstackedShape(), index,
instr->mutable_shape());
}
break;
default:
LOG(FATAL) << "Unsupported opcode: " << instr->name();
}
}
for (const auto& body_change : unstacker.GetBodyChanges()) {
CHECK_OK(body_change());
}
for (auto& loop_change : unstacker.GetLoopChanges()) {
loop_change(unstacker);
}
for (const HloInstruction* instr : unstacker.GetUnstackedInstructions()) {
unstacked_instructions.push_back(instr);
}
return true;
}
Shape MakeUnstackedShapeFromSlice(const Shape& slice_shape, int64_t layers) {
std::vector<Shape> shapes;
shapes.reserve(layers);
for (int64_t i = 0; i < layers; ++i) {
shapes.push_back(slice_shape);
}
return ShapeUtil::MakeTupleShape(shapes);
}
std::optional<WhileLoopConfig> IsFusionInsideUnrollableLoopWithNumParameter(
const UnstackerMetadata& metadata, const HloInstruction* instr,
int64_t num_fusion_params) {
if (instr->opcode() != HloOpcode::kFusion) {
return std::nullopt;
}
if (instr->fused_parameters().size() != num_fusion_params) {
VLOG(3) << "Fusion has different number of parameters";
return std::nullopt;
}
if (!metadata.unrollable_loop_bodies.contains(instr->parent())) {
VLOG(5) << "Fusion not inside unrollable while body, " << instr->name()
<< " inside " << instr->parent()->name();
return std::nullopt;
}
return metadata.unrollable_loop_bodies.at(instr->parent());
}
HloInstruction* GetMostMajorEffectivelyStaticDynamicSliceInFusion(
const UnstackerMetadata& metadata, const HloInstruction* instr,
int64_t num_fusion_params, int64_t stacked_operand_idx) {
std::optional<WhileLoopConfig> while_instr_config =
IsFusionInsideUnrollableLoopWithNumParameter(metadata, instr,
num_fusion_params);
if (!while_instr_config.has_value()) {
return nullptr;
}
for (HloInstruction* fused_instr :
instr->fused_instructions_computation()->MakeInstructionPostOrder()) {
std::optional<int64_t> dynamic_index =
MatchEffectivelyStaticDynamicSliceInsideLoop(
fused_instr,
instr->fused_instructions_computation()->parameter_instruction(
stacked_operand_idx),
while_instr_config.value());
if (dynamic_index.has_value() && dynamic_index.value() == 0) {
return fused_instr;
}
}
return nullptr;
}
HloInstruction* GetMostMajorShapeCoveringDynamicIndexInFusion(
const UnstackerMetadata& metadata, const HloInstruction* instr,
HloOpcode opcode, int64_t num_fusion_params, int64_t stacked_operand_idx) {
std::optional<WhileLoopConfig> while_instr_config =
IsFusionInsideUnrollableLoopWithNumParameter(metadata, instr,
num_fusion_params);
if (!while_instr_config.has_value()) {
return nullptr;
}
for (HloInstruction* fused_instr :
instr->fused_instructions_computation()->MakeInstructionPostOrder()) {
if (fused_instr->opcode() != opcode) {
continue;
}
std::optional<int64_t> dynamic_index =
MatchShapeCoveringDynamicIndexInstruction(
fused_instr,
instr->fused_instructions_computation()->parameter_instruction(
stacked_operand_idx),
opcode, while_instr_config.value());
if (dynamic_index.has_value() && dynamic_index.value() == 0) {
return fused_instr;
}
}
return nullptr;
}
std::optional<PatternInfo> GetDSFusionPattern(const UnstackerMetadata& metadata,
const HloInstruction* instr,
int64_t stacked_operand_idx) {
VLOG(3) << "Checking DSFusion";
HloInstruction* shape_covering_instr =
GetMostMajorEffectivelyStaticDynamicSliceInFusion(metadata, instr, 2,
stacked_operand_idx);
if (shape_covering_instr == nullptr) {
return std::nullopt;
}
HloInstruction* bitcast_operand = nullptr;
if (Match(instr->fused_instructions_computation()->root_instruction(),
match::Bitcast(match::Op(&bitcast_operand)))) {
if (bitcast_operand == shape_covering_instr) {
PatternInfo pattern_info;
pattern_info.type = PatternType::DSFusionPattern;
pattern_info.instr = instr;
const Shape& slice_shape = shape_covering_instr->shape();
const int64_t num_layers = instr->operand(0)->shape().dimensions(0);
pattern_info.unstacked_shape =
MakeUnstackedShapeFromSlice(slice_shape, num_layers);
pattern_info.unstacking_computation =
instr->fused_instructions_computation();
pattern_info.unstacked_instrs.push_back(instr);
return pattern_info;
}
}
return std::nullopt;
}
absl::Status UnstackDSFusionPattern(
HloInstruction* mutable_dynamic_slicing_fusion, const Shape& slice_shape) {
HloComputation* parent_loop = mutable_dynamic_slicing_fusion->parent();
HloInstruction* stacked = mutable_dynamic_slicing_fusion->mutable_operand(0);
HloInstruction* offset = mutable_dynamic_slicing_fusion->mutable_operand(1);
HloInstruction* new_operand =
parent_loop->AddInstruction(HloInstruction::CreateCustomCall(
slice_shape, {stacked, offset}, "DynamicGte"));
HloInstruction* bitcast = mutable_dynamic_slicing_fusion->AddInstruction(
HloInstruction::CreateBitcast(mutable_dynamic_slicing_fusion->shape(),
new_operand));
return mutable_dynamic_slicing_fusion->ReplaceAllUsesWithDifferentShape(
bitcast);
}
std::optional<PatternInfo> GetDSFusionNoBitcastPattern(
const UnstackerMetadata& metadata, const HloInstruction* instr,
int64_t stacked_operand_idx) {
VLOG(3) << "Checking DSFusionNoBitcast";
HloInstruction* shape_covering_instr =
GetMostMajorEffectivelyStaticDynamicSliceInFusion(metadata, instr, 2,
stacked_operand_idx);
if (shape_covering_instr == nullptr) {
return std::nullopt;
}
if (instr->fused_instructions_computation()->root_instruction() !=
shape_covering_instr) {
return std::nullopt;
}
PatternInfo pattern_info;
pattern_info.type = PatternType::DSFusionNoBitcastPattern;
pattern_info.instr = instr;
const Shape& slice_shape = shape_covering_instr->shape();
const int64_t num_layers = instr->operand(0)->shape().dimensions(0);
pattern_info.unstacked_shape =
MakeUnstackedShapeFromSlice(slice_shape, num_layers);
pattern_info.unstacking_computation = instr->fused_instructions_computation();
pattern_info.unstacked_instrs.push_back(instr);
return pattern_info;
}
absl::Status UnstackDSFusionNoBitcastPattern(
HloInstruction* mutable_dynamic_slicing_fusion, const Shape& slice_shape) {
HloComputation* parent_loop = mutable_dynamic_slicing_fusion->parent();
HloInstruction* stacked = mutable_dynamic_slicing_fusion->mutable_operand(0);
HloInstruction* offset = mutable_dynamic_slicing_fusion->mutable_operand(1);
HloInstruction* new_operand =
parent_loop->AddInstruction(HloInstruction::CreateCustomCall(
slice_shape, {stacked, offset}, "DynamicGte"));
return mutable_dynamic_slicing_fusion->ReplaceAllUsesWithDifferentShape(
new_operand);
}
std::optional<PatternInfo> GetDUSFusionPattern(
const UnstackerMetadata& metadata, const HloInstruction* instr,
int64_t stacked_operand_idx) {
VLOG(3) << "Checking DUSFusion";
HloInstruction* shape_covering_instr =
GetMostMajorShapeCoveringDynamicIndexInFusion(
metadata, instr, HloOpcode::kDynamicUpdateSlice, 3,
stacked_operand_idx);
if (shape_covering_instr == nullptr) {
return std::nullopt;
}
if (Match(shape_covering_instr->operand(1),
match::Bitcast(match::Parameter()))) {
if (shape_covering_instr->parent()->root_instruction() ==
shape_covering_instr) {
PatternInfo pattern_info;
pattern_info.type = PatternType::Other;
pattern_info.instr = instr;
pattern_info.unstacked_shape = MakeUnstackedShapeFromSlice(
instr->operand(2)->shape(), instr->operand(0)->shape().dimensions(0));
pattern_info.unstacking_computation = nullptr;
pattern_info.unstacked_instrs.push_back(instr);
return pattern_info;
}
}
return std::nullopt;
}
absl::Status UnstackDUSFusionPattern(
HloInstruction* mutable_dynamic_update_slicing_fusion,
const Shape& slice_shape) {
HloComputation* parent_loop = mutable_dynamic_update_slicing_fusion->parent();
HloInstruction* stacked =
mutable_dynamic_update_slicing_fusion->mutable_operand(0);
HloInstruction* offset =
mutable_dynamic_update_slicing_fusion->mutable_operand(1);
HloInstruction* update =
mutable_dynamic_update_slicing_fusion->mutable_operand(2);
HloInstruction* new_operand =
parent_loop->AddInstruction(HloInstruction::CreateCustomCall(
stacked->shape(), {stacked, update, offset}, "DynamicTuple"));
for (HloInstruction* user : mutable_dynamic_update_slicing_fusion->users()) {
TF_RETURN_IF_ERROR(
mutable_dynamic_update_slicing_fusion->ReplaceUseWithDifferentShape(
user, new_operand));
}
return absl::OkStatus();
}
std::optional<PatternInfo> GetDUSFusionWithPadPattern(
const UnstackerMetadata& metadata, const HloInstruction* instr,
int64_t stacked_operand_idx) {
VLOG(3) << "Checking DUSFusionWithPad";
HloInstruction* shape_covering_instr =
GetMostMajorShapeCoveringDynamicIndexInFusion(
metadata, instr, HloOpcode::kDynamicUpdateSlice, 3,
stacked_operand_idx);
if (shape_covering_instr == nullptr) {
return std::nullopt;
}
if (Match(
shape_covering_instr->operand(1),
match::Bitcast(match::Pad(match::Parameter(), match::Constant())))) {
if (shape_covering_instr->parent()->root_instruction() ==
shape_covering_instr) {
const HloInstruction* pad_instr =
shape_covering_instr->operand(1)->operand(0);
PatternInfo pattern_info;
pattern_info.type = PatternType::Other;
pattern_info.instr = instr;
pattern_info.unstacked_shape = MakeUnstackedShapeFromSlice(
pad_instr->shape(),
shape_covering_instr->operand(0)->shape().dimensions(0));
pattern_info.unstacking_computation = nullptr;
pattern_info.unstacked_instrs.push_back(instr);
return pattern_info;
}
}
return std::nullopt;
}
absl::Status UnstackDUSFusionWithPadPattern(
HloInstruction* mutable_dynamic_update_slicing_fusion,
const Shape& slice_shape) {
HloComputation* parent_loop = mutable_dynamic_update_slicing_fusion->parent();
HloComputation* fused_computation =
mutable_dynamic_update_slicing_fusion->fused_instructions_computation();
HloInstruction* stacked =
mutable_dynamic_update_slicing_fusion->mutable_operand(
fused_computation->root_instruction()
->mutable_operand(0)
->parameter_number());
HloInstruction* offset =
mutable_dynamic_update_slicing_fusion->mutable_operand(
fused_computation->root_instruction()
->mutable_operand(2)
->parameter_number());
HloInstruction* pad_instr = fused_computation->root_instruction()
->mutable_operand(1)
->mutable_operand(0);
fused_computation->set_root_instruction(pad_instr, true);
*mutable_dynamic_update_slicing_fusion->mutable_shape() = pad_instr->shape();
HloInstruction* new_operand =
parent_loop->AddInstruction(HloInstruction::CreateCustomCall(
stacked->shape(),
{stacked, mutable_dynamic_update_slicing_fusion, offset},
"DynamicTuple"));
for (HloInstruction* user : mutable_dynamic_update_slicing_fusion->users()) {
if (user != new_operand) {
TF_RETURN_IF_ERROR(
mutable_dynamic_update_slicing_fusion->ReplaceUseWithDifferentShape(
user, new_operand));
}
}
return absl::OkStatus();
}
std::optional<PatternInfo> GetDSFusionWithAddPattern(
const UnstackerMetadata& metadata, const HloInstruction* instr,
int64_t stacked_operand_idx) {
VLOG(3) << "Checking DSFusionWithAdd";
HloInstruction* shape_covering_instr =
GetMostMajorShapeCoveringDynamicIndexInFusion(
metadata, instr, HloOpcode::kDynamicSlice, 2, stacked_operand_idx);
if (shape_covering_instr == nullptr) {
return std::nullopt;
}
HloComputation* fused_computation = instr->fused_instructions_computation();
HloInstruction* fusion_root = fused_computation->root_instruction();
HloInstruction* add_operand;
if (Match(fusion_root,
match::Reduce(match::Add(match::Op(&add_operand),
match::Broadcast(match::Constant())),
match::Constant()))) {
if (add_operand == shape_covering_instr) {
const int64_t num_layers = instr->operand(0)->shape().dimensions(0);
PatternInfo pattern_info;
pattern_info.type = PatternType::Other;
pattern_info.instr = instr;
pattern_info.unstacked_shape =
MakeUnstackedShapeFromSlice(instr->shape(), num_layers);
HloComputation::Builder builder("unstack_add");
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(
0, fused_computation->parameter_instruction(0)->shape(), "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(
1, fused_computation->parameter_instruction(1)->shape(), "p1"));
HloInstruction* zero =
builder.AddInstruction(MakeScalarConstantWithShape(p1->shape(), 0));
std::vector<HloInstruction*> slice_starts;
slice_starts.reserve(shape_covering_instr->shape().rank());
slice_starts.push_back(p1);
for (int64_t i = 0; i < shape_covering_instr->shape().rank() - 1; i++) {
slice_starts.push_back(zero);
}
HloInstruction* slice =
builder.AddInstruction(HloInstruction::CreateDynamicSlice(
shape_covering_instr->shape(), p0, slice_starts,
shape_covering_instr->dynamic_slice_sizes()));
HloInstruction* zero_reduce =
builder.AddInstruction(MakeScalarConstantWithShape(
ShapeUtil::MakeScalarShape(slice->shape().element_type()), 0));
HloInstruction* reduce =
builder.AddInstruction(HloInstruction::CreateReduce(
instr->shape(), slice, zero_reduce, fusion_root->dimensions(),
fused_computation->root_instruction()->to_apply()));
HloComputation* unstack_add =
instr->GetModule()->AddEmbeddedComputation(builder.Build());
unstack_add->set_root_instruction(reduce);
pattern_info.unstacking_computation = unstack_add;
pattern_info.unstacked_instrs.push_back(instr);
return pattern_info;
}
}
return std::nullopt;
}
absl::Status UnstackDSFusionWithAddPattern(
HloInstruction* mutable_dynamic_slice_with_add_fusion,
const Shape& slice_shape) {
HloComputation* parent_loop = mutable_dynamic_slice_with_add_fusion->parent();
HloInstruction* stacked =
mutable_dynamic_slice_with_add_fusion->mutable_operand(0);
HloInstruction* offset =
mutable_dynamic_slice_with_add_fusion->mutable_operand(1);
HloInstruction* new_operand =
parent_loop->AddInstruction(HloInstruction::CreateCustomCall(
slice_shape, {stacked, offset}, "DynamicGte"));
HloInstruction* one = parent_loop->AddInstruction(MakeScalarConstantWithShape(
ShapeUtil::MakeScalarShape(slice_shape.element_type()), 1));
HloInstruction* broadcast = parent_loop->AddInstruction(
HloInstruction::CreateBroadcast(slice_shape, one, {}));
HloInstruction* add = mutable_dynamic_slice_with_add_fusion->AddInstruction(
HloInstruction::CreateBinary(new_operand->shape(), HloOpcode::kAdd,
new_operand, broadcast));
TF_RETURN_IF_ERROR(
mutable_dynamic_slice_with_add_fusion->ReplaceAllUsesWith(add));
return absl::OkStatus();
}
std::optional<PatternInfo> GetNestedDSFusionPattern(
const UnstackerMetadata& metadata, const HloInstruction* instr,
int64_t stacked_operand_idx) {
if (instr->opcode() != HloOpcode::kFusion) {
return std::nullopt;
}
if (!metadata.unrollable_loop_bodies.contains(instr->parent())) {
VLOG(5) << "Instruction not inside unrollable while body, " << instr->name()
<< " inside " << instr->parent()->name();
return std::nullopt;
}
WhileLoopConfig while_instr_config =
metadata.unrollable_loop_bodies.at(instr->parent());
VLOG(3) << "Checking NestedDSFusionPattern";
HloInstruction* inner_fusion_user = nullptr;
for (HloInstruction* fused_instr :
instr->fused_instructions_computation()->MakeInstructionPostOrder()) {
if (Match(fused_instr, match::Parameter(stacked_operand_idx))) {
if (fused_instr->user_count() != 1) {
return std::nullopt;
}
if (Match(fused_instr->users()[0],
match::Fusion(match::Op(), match::Op()))) {
inner_fusion_user = fused_instr->users()[0];
break;
}
}
}
if (inner_fusion_user == nullptr) {
return std::nullopt;
}
for (HloInstruction* inner_fusion_instr :
inner_fusion_user->fused_instructions_computation()
->MakeInstructionPostOrder()) {
if (!Match(inner_fusion_instr, match::DynamicSlice())) {
continue;
}
std::optional<int64_t> dynamic_index =
MatchEffectivelyStaticDynamicSliceInsideLoop(
inner_fusion_instr,
inner_fusion_user->fused_instructions_computation()
->parameter_instruction(0),
while_instr_config);
if (dynamic_index.has_value() && dynamic_index.value() == 0) {
const int64_t num_layers =
inner_fusion_user->operand(0)->shape().dimensions(0);
PatternInfo pattern_info;
pattern_info.type = PatternType::NestedDSFusionPattern;
pattern_info.instr = inner_fusion_user;
pattern_info.unstacked_shape =
MakeUnstackedShapeFromSlice(inner_fusion_instr->shape(), num_layers);
pattern_info.unstacking_computation =
inner_fusion_user->fused_instructions_computation();
pattern_info.unstacked_instrs.push_back(inner_fusion_user);
return pattern_info;
}
}
return std::nullopt;
}
absl::Status UnstackNestedDSFusionPattern(
HloInstruction* mutable_dynamic_slicing_fusion, const Shape& slice_shape) {
HloInstruction* parent_fusion =
mutable_dynamic_slicing_fusion->parent()->FusionInstruction();
HloInstruction* stacked_in_ds_fusion =
mutable_dynamic_slicing_fusion->mutable_operand(0);
CHECK_EQ(stacked_in_ds_fusion->opcode(), HloOpcode::kParameter);
int64_t stacked_param_number = stacked_in_ds_fusion->parameter_number();
HloInstruction* stacked =
parent_fusion->mutable_operand(stacked_param_number);
HloInstruction* offset_in_ds_fusion =
mutable_dynamic_slicing_fusion->mutable_operand(1);
CHECK_EQ(offset_in_ds_fusion->opcode(), HloOpcode::kParameter);
HloInstruction* offset =
parent_fusion->mutable_operand(offset_in_ds_fusion->parameter_number());
HloInstruction* sliced_param =
parent_fusion->fused_instructions_computation()->ReplaceParameter(
stacked_param_number,
HloInstruction::CreateParameter(stacked_param_number, slice_shape,
"sliced"));
HloInstruction* bitcast = mutable_dynamic_slicing_fusion->AddInstruction(
HloInstruction::CreateBitcast(mutable_dynamic_slicing_fusion->shape(),
sliced_param));
HloInstruction* bitcast_fusion =
mutable_dynamic_slicing_fusion->AddInstruction(
HloInstruction::CreateFusion(mutable_dynamic_slicing_fusion->shape(),
HloInstruction::FusionKind::kLoop,
bitcast));
TF_RETURN_IF_ERROR(
mutable_dynamic_slicing_fusion->ReplaceAllUsesWith(bitcast_fusion));
HloInstruction* new_operand =
parent_fusion->AddInstruction(HloInstruction::CreateCustomCall(
slice_shape, {stacked, offset}, "DynamicGte"));
return parent_fusion->ReplaceOperandWithDifferentShape(
sliced_param->parameter_number(), new_operand);
}
std::optional<PatternInfo> GetDSAndDUSPattern(const UnstackerMetadata& metadata,
const HloInstruction* instr,
int64_t stacked_operand_idx) {
VLOG(3) << "Checking DSAndDUSPattern";
if (instr->opcode() != HloOpcode::kFusion) {
return std::nullopt;
}
const HloInstruction* stacked = instr->operand(stacked_operand_idx);
if (stacked->user_count() != 2) {
return std::nullopt;
}
HloInstruction* shape_covering_ds_instr =
GetMostMajorShapeCoveringDynamicIndexInFusion(
metadata, instr, HloOpcode::kDynamicSlice, 2, stacked_operand_idx);
if (shape_covering_ds_instr == nullptr) {
return std::nullopt;
}
HloInstruction* bitcast_operand = nullptr;
if (!Match(instr->fused_instructions_computation()->root_instruction(),
match::Bitcast(match::Op(&bitcast_operand)))) {
return std::nullopt;
}
if (bitcast_operand != shape_covering_ds_instr) {
return std::nullopt;
}
if (!GetDUSFusionPattern(metadata, stacked->users()[1],
stacked->users()[1]->operand_index(stacked))) {
return std::nullopt;
}
PatternInfo pattern_info;
pattern_info.type = PatternType::Other;
pattern_info.instr = instr;
const Shape& slice_shape = instr->shape();
const int64_t num_layers = instr->operand(0)->shape().dimensions(0);
pattern_info.unstacked_shape =
MakeUnstackedShapeFromSlice(slice_shape, num_layers);
pattern_info.unstacking_computation = instr->fused_instructions_computation();
pattern_info.unstacked_instrs.push_back(instr);
pattern_info.unstacked_instrs.push_back(stacked->users()[1]);
return pattern_info;
}
absl::Status UnstackDSAndDUSPattern(HloInstruction* mutable_dynamic_slice,
const Shape& slice_shape) {
HloInstruction* stacked_gte = mutable_dynamic_slice->mutable_operand(0);
int64_t stacked_gte_index = stacked_gte->tuple_index();
HloComputation* parent = stacked_gte->parent();
ShapeUtil::UpdateTupleShape(stacked_gte->shape(), stacked_gte_index,
parent->root_instruction()->mutable_shape());
HloComputation* parent_loop = mutable_dynamic_slice->parent();
HloInstruction* stacked = mutable_dynamic_slice->mutable_operand(0);
HloInstruction* offset = mutable_dynamic_slice->mutable_operand(1);
HloInstruction* new_operand =
parent_loop->AddInstruction(HloInstruction::CreateCustomCall(
slice_shape, {stacked, offset}, "DynamicGte"));
TF_RETURN_IF_ERROR(
mutable_dynamic_slice->ReplaceAllUsesWithDifferentShape(new_operand));
HloInstruction* mutable_dynamic_update_slice = stacked_gte->users()[1];
TF_RETURN_IF_ERROR(
UnstackDUSFusionPattern(mutable_dynamic_update_slice, slice_shape));
return absl::OkStatus();
}
std::optional<PatternInfo> GetReduceFusionPattern(
const UnstackerMetadata& metadata, const HloInstruction* instr,
int64_t stacked_operand_idx) {
VLOG(3) << "Checking ReduceFusion";
HloInstruction* shape_covering_instr =
GetMostMajorShapeCoveringDynamicIndexInFusion(
metadata, instr, HloOpcode::kDynamicSlice, 2, stacked_operand_idx);
if (shape_covering_instr == nullptr) {
return std::nullopt;
}
HloInstruction* reduce_operand = nullptr;
HloInstruction* fusion_root =
instr->fused_instructions_computation()->root_instruction();
if (Match(fusion_root, match::Reduce(match::Op(&reduce_operand),
match::ConstantScalar())) &&
Match(fusion_root->to_apply()->root_instruction(),
match::Add(match::Parameter(), match::Parameter()))) {
if (reduce_operand == shape_covering_instr) {
PatternInfo pattern_info;
pattern_info.type = PatternType::Other;
pattern_info.instr = instr;
const Shape& slice_shape = instr->shape();
const int64_t num_layers = instr->operand(0)->shape().dimensions(0);
pattern_info.unstacked_shape =
MakeUnstackedShapeFromSlice(slice_shape, num_layers);
pattern_info.unstacking_computation =
instr->fused_instructions_computation();
pattern_info.unstacked_instrs.push_back(instr);
return pattern_info;
}
}
return std::nullopt;
}
absl::Status UnstackReduceFusionPattern(HloInstruction* mutable_reduce_fusion,
const Shape& slice_shape) {
HloComputation* parent_loop = mutable_reduce_fusion->parent();
HloInstruction* stacked = mutable_reduce_fusion->mutable_operand(0);
HloInstruction* offset = mutable_reduce_fusion->mutable_operand(1);
HloInstruction* new_operand =
parent_loop->AddInstruction(HloInstruction::CreateCustomCall(
slice_shape, {stacked, offset}, "DynamicGte"));
return mutable_reduce_fusion->ReplaceAllUsesWithDifferentShape(new_operand);
}
};
absl::StatusOr<bool> HloUnstacker::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_ASSIGN_OR_RETURN(auto metadata,
UnstackerMetadata::Create(module, unfuse_slice_));
metadata.custom_handlers.push_back(
std::make_pair(GetDSAndDUSPattern, UnstackDSAndDUSPattern));
metadata.custom_handlers.push_back(
std::make_pair(GetDSFusionPattern, UnstackDSFusionPattern));
metadata.custom_handlers.push_back(
std::make_pair(GetDUSFusionPattern, UnstackDUSFusionPattern));
metadata.custom_handlers.push_back(std::make_pair(
GetDUSFusionWithPadPattern, UnstackDUSFusionWithPadPattern));
metadata.custom_handlers.push_back(
std::make_pair(GetDSFusionWithAddPattern, UnstackDSFusionWithAddPattern));
metadata.custom_handlers.push_back(
std::make_pair(GetReduceFusionPattern, UnstackReduceFusionPattern));
metadata.custom_handlers.push_back(
std::make_pair(GetNestedDSFusionPattern, UnstackNestedDSFusionPattern));
metadata.custom_handlers.push_back(std::make_pair(
GetDSFusionNoBitcastPattern, UnstackDSFusionNoBitcastPattern));
std::vector<HloInstruction*> entry_loops;
for (HloInstruction* instr :
module->entry_computation()->MakeInstructionPostOrder()) {
if (Match(instr, match::While(match::Tuple())) &&
Match(instr->while_body()->root_instruction(), match::Tuple())) {
entry_loops.push_back(instr);
}
}
bool unstacked = false;
std::vector<const HloInstruction*> unstacked_instructions;
for (HloInstruction* loop : entry_loops) {
for (int64_t i = 0; i < loop->shape().tuple_shapes_size(); ++i) {
if (loop->while_init()->operand(i)->shape().IsTuple()) {
continue;
}
VLOG(3) << "Attempting to unstack " << loop->name() << " at " << i
<< " = " << loop->while_init()->operand(i)->shape().ToString(true)
<< loop->while_init()->operand(i)->ToShortString();
unstacked |=
UnstackWhileOperandAtIndex(metadata, loop, i, unstacked_instructions);
VLOG(3) << "###################";
}
}
if (!unstacked) {
return false;
}
TF_RETURN_IF_ERROR(module->RemoveUnusedComputations());
std::vector<HloInstruction*> loops_to_unroll;
for (const HloInstruction* instr : unstacked_instructions) {
HloInstruction* loop = metadata.bodies[instr->parent()];
if (std::find(loops_to_unroll.begin(), loops_to_unroll.end(), loop) ==
loops_to_unroll.end()) {
loops_to_unroll.push_back(loop);
}
}
for (int64_t i = loops_to_unroll.size() - 1; i >= 0; --i) {
HloInstruction* loop = loops_to_unroll[i];
TF_ASSIGN_OR_RETURN(UnrollResult unroll_result,
WhileLoopUnroller::UnrollAndReturnReplacement(
loop, -1,
false,
true, false));
bool unrolled = unroll_result.unrolled;
CHECK(unrolled);
}
VLOG(3) << "after unstacking \n" << module->ToString();
return true;
}
} | #include "xla/service/hlo_unstacker.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using UnstackerTest = HloTestBase;
int64_t GetInstrCountWithOpcodeInEntry(HloModule* module, HloOpcode opcode) {
int64_t instr_with_opcode_count = 0;
for (HloInstruction* instr :
module->entry_computation()->MakeInstructionPostOrder()) {
if (instr->opcode() == opcode) {
instr_with_opcode_count++;
}
}
return instr_with_opcode_count;
}
TEST_F(UnstackerTest, UnstackDSFusionPattern) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[3,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
%fusion.67830 = s8[128,128] fusion(s8[3,128,128] p1, i), kind=kLoop, calls=%fused_computation.slice
conv = bf16[8,128] convolution(bf16[8,128] p0, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, conv, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[3,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[3,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 3);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kFusion),
0);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, NotUnstackDSFusionPattern) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.tuple {
%param_0.51117 = s8[3,128,128] parameter(0)
mult = multiply(param_0.51117, param_0.51117)
ROOT out = tuple(param_0.51117, mult)
}
%while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[3,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
%fusion.67830 = s8[128,128] fusion(s8[3,128,128] p1, i), kind=kLoop, calls=%fused_computation.slice
conv = bf16[8,128] convolution(bf16[8,128] p0, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
fusion_mult = (s8[3,128,128], s8[3,128,128]) fusion(s8[3,128,128] p1), kind=kLoop, calls=%fused_computation.tuple
mult = s8[3,128,128] get-tuple-element(fusion_mult), index=1
ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, conv, mult)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[3,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[3,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_FALSE(unstacked);
}
TEST_F(UnstackerTest, UnstackReduceFusionPattern) {
std::string hlo_string = R"(
HloModule SimpleLoop
dynamic-slice.609.reduce_sub_computation {
lhs.53 = s8[] parameter(0)
rhs.53 = s8[] parameter(1)
ROOT add.3090 = s8[] add(lhs.53, rhs.53)
}
fused_computation.1096.clone {
param_0.5572 = s8[3,128,128] parameter(0)
param_1.6711 = s32[]{:T(128)} parameter(1)
constant.12008 = s32[]{:T(128)} constant(0)
dynamic-slice.1545 = s8[1,128,128] dynamic-slice(param_0.5572, param_1.6711, constant.12008, constant.12008), dynamic_slice_sizes={1,128, 128}
constant.12009 = s8[] constant(-0)
ROOT reduce.919 = s8[128,128] reduce(dynamic-slice.1545, constant.12009), dimensions={0}, to_apply=dynamic-slice.609.reduce_sub_computation
}
%while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[3,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
%fusion.67830 = s8[128,128] fusion(s8[3,128,128] p1, i), kind=kLoop, calls=%fused_computation.1096.clone
conv = bf16[8,128] convolution(bf16[8,128] p0, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, conv, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[3,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[3,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackDSFusionPatternNoBitcast) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[1,128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
ROOT %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
}
%while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[3,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
%fusion.67830 = s8[1,128,128] fusion(s8[3,128,128] p1, i), kind=kLoop, calls=%fused_computation.slice
bitcast.102 = s8[128,128] bitcast(s8[1,128,128] %fusion.67830)
conv = bf16[8,128] convolution(bf16[8,128] p0, s8[128,128] bitcast.102), dim_labels=bf_io->bf
ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, conv, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[3,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[3,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 3);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kFusion),
0);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackDSFusionPatternNoBitcastKeepFused) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[1,128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
ROOT %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
}
%while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[3,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
%fusion.67830 = s8[1,128,128] fusion(s8[3,128,128] p1, i), kind=kLoop, calls=%fused_computation.slice
bitcast.102 = s8[128,128] bitcast(s8[1,128,128] %fusion.67830)
conv = bf16[8,128] convolution(bf16[8,128] p0, s8[128,128] bitcast.102), dim_labels=bf_io->bf
ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, conv, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[3,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[3,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
auto unfuse = [](HloInstruction* instruction) { return false; };
TF_ASSERT_OK_AND_ASSIGN(bool unstacked,
HloUnstacker(unfuse).Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 0);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kFusion),
3);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackDSFusionPatternWithDifferentLayout) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.30.clone (param_0.153: bf16[32,4,64,64,3], param_1.123: s32[]) -> bf16[64,4,64,3] {
%param_0.153 = bf16[32,4,64,64,3]{2,1,4,3,0} parameter(0)
%param_1.123 = s32[]{:T(128)} parameter(1)
%constant.227 = s32[]{:T(128)} constant(0)
%dynamic-slice.5 = bf16[1,4,64,64,3]{2,1,4,3,0} dynamic-slice(bf16[32,4,64,64,3]{2,1,4,3,0} %param_0.153, s32[]{:T(128)} %param_1.123, s32[]{:T(128)} %constant.227, s32[]{:T(128)} %constant.227, s32[]{:T(128)} %constant.227, s32[]{:T(128)} %constant.227), dynamic_slice_sizes={1,4,64,64,3}
ROOT %bitcast.102 = bf16[64,4,64,3]{0,1,3,2} bitcast(bf16[1,4,64,64,3]{2,1,4,3,0} %dynamic-slice.5)
}
%while.body (wide_param: (s32[], bf16[8,128], bf16[32,4,64,64,3])) -> (s32[], bf16[8,128], bf16[32,4,64,64,3]) {
wide_p = (s32[], bf16[8,128], bf16[32,4,64,64,3]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = bf16[32,4,64,64,3]{2,1,4,3,0} get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
%fusion.67830 = bf16[64,4,64,3]{0,1,3,2} fusion(p1, i), kind=kLoop, calls=%fused_computation.30.clone
ROOT out = (s32[], bf16[8,128], bf16[32,4,64,64,3]) tuple(inc, p0, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], bf16[32,4,64,64,3])) -> pred[] {
wide_p = (s32[], bf16[8,128], bf16[32,4,64,64,3]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(32)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = bf16[32,4,64,64,3] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], bf16[32,4,64,64,3]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], bf16[32,4,64,64,3]) while(while.input), condition=%while.cond , body=%while.body
while_use = bf16[32,4,64,64,3] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice),
32);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kFusion),
0);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt));
}
TEST_F(UnstackerTest, UnstackNestedDSFusionPattern) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner (param_0.34523: bf16[8,128], param_1.30691: s8[3,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[3,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[3,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[3,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
fusion.conv = bf16[8,128] fusion(p0, p1, i), kind=kOutput, calls=%fused_computation.inner
ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, fusion.conv, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[3,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[3,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 3);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackNestedDSFusionPatternWithDynamicIndex) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[6,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[6,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[6,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner (param_0.34523: bf16[8,128], param_1.30691: s8[6,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[6,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[6,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body (wide_param: (s32[], bf16[8,128], s8[6,128,128])) -> (s32[], bf16[8,128], s8[6,128,128]) {
wide_p = (s32[], bf16[8,128], s8[6,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[6,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
two = s32[] constant(2)
mult = s32[] multiply(i, two)
fusion.conv = bf16[8,128] fusion(p0, p1, mult), kind=kOutput, calls=%fused_computation.inner
ROOT out = (s32[], bf16[8,128], s8[6,128,128]) tuple(inc, fusion.conv, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[6,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[6,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[6,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[6,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[6,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[6,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackNestedDSFusionPatternWithMultipleIndex) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice.1 (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[4,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.slice.2 (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[4,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner.1 (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[4,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice.1
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%fused_computation.inner.2 (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[4,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice.2
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body (wide_param: (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) {
wide_p = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[4,128,128] get-tuple-element(wide_p), index=2
p2 = s8[4,128,128] get-tuple-element(wide_p), index=3
one = s32[] constant(1)
inc = s32[] add(i, one)
fusion.conv.1 = bf16[8,128] fusion(p0, p1, i), kind=kOutput, calls=%fused_computation.inner.1
fusion.conv.2 = bf16[8,128] fusion(p0, p2, i), kind=kOutput, calls=%fused_computation.inner.2
plus = bf16[8,128] add(fusion.conv.1, fusion.conv.2)
ROOT out = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) tuple(inc, plus, p1, p2)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(4)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[4,128,128] parameter(0)
p1 = s8[4,128,128] parameter(1)
p2 = bf16[8,128] parameter(2)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) tuple(init, p2, p0, p1)
while.out = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) while(while.input), condition=%while.cond , body=%while.body
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 8);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackNestedDSFusionPatternWithDiffereOperandsOrder) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner (param_1.30691: s8[3,128,128], p2: s32[], param_0.34523: bf16[8,128]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(2)
%param_1.30691 = s8[3,128,128] parameter(0)
p2 = s32[] parameter(1)
%fusion.67830 = s8[128,128] fusion(s8[3,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[3,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
fusion.conv = bf16[8,128] fusion(p1, i, p0), kind=kOutput, calls=%fused_computation.inner
ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, fusion.conv, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[3,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[3,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 3);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackNestedDSFusionPatternWithSameUnstackingComps) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice.1 (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.slice.2 (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner.1 (param_0.34523: bf16[8,128], param_1.30691: s8[3,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[3,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[3,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice.1
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%fused_computation.inner.2 (param_0.34523: bf16[8,128], param_1.30691: s8[3,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[3,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[3,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice.2
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[3,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
fusion.conv1 = bf16[8,128] fusion(p0, p1, i), kind=kOutput, calls=%fused_computation.inner.1
fusion.conv2 = bf16[8,128] fusion(p0, p1, i), kind=kOutput, calls=%fused_computation.inner.2
add = bf16[8,128] add(fusion.conv1, fusion.conv2)
ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, add, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[3,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[3,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 3);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, NotUnstackNestedDSFusionPatternWithSameUnstackingComps) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice.1 (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[1,128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
ROOT %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
}
%fused_computation.slice.2 (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[3,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
%fusion.67831 = s8[128,128] fusion(p1, i), kind=kLoop, calls=%fused_computation.slice.2
%fusion.67830 = s8[1,128,128] fusion(p1, i), kind=kLoop, calls=%fused_computation.slice.1
%bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %fusion.67830)
ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, p0, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[3,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[3,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_FALSE(unstacked);
}
TEST_F(UnstackerTest, UnstackNestedDSFusionPatternSingleNestedLoop) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[4,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[4,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body.inner (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
inner_param_0 = bf16[8,128] get-tuple-element(wide_p), index=1
inner_param_1 = s8[4,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
fusion.conv = bf16[8,128] fusion(inner_param_0, inner_param_1, i), kind=kOutput, calls=%fused_computation.inner
ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(inc, fusion.conv, inner_param_1)
}
%while.cond.inner (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(4)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
%while.body (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
param0 = bf16[8,128] get-tuple-element(wide_p), index=1
param1 = s8[4,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(2)
zero = s32[] constant(0)
mult = s32[] multiply(i, one)
inner.in = (s32[], bf16[8,128], s8[4,128,128]) tuple(zero, param0, param1)
inner.out = (s32[], bf16[8,128], s8[4,128,128]) while(inner.in), condition=%while.cond.inner, body=%while.body.inner
fusion.conv.inner = bf16[8,128] get-tuple-element(inner.out), index=1
ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(mult, fusion.conv.inner, param1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(20)
add = s32[] add(%constant.12857, %constant.12857)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, add), direction=LT
}
ENTRY main {
weight = s8[4,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(1)
while.input = (s32[], bf16[8,128], s8[4,128,128]) tuple(init, p1, weight)
while.out = (s32[], bf16[8,128], s8[4,128,128]) while(while.input), condition=%while.cond , body=%while.body
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 4);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackNestedDSFusionPatternTwoNestedLoops) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice1 (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[4,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner1 (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[4,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice1
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body.inner1 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
inner_param_0 = bf16[8,128] get-tuple-element(wide_p), index=1
inner_param_1 = s8[4,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
fusion.conv = bf16[8,128] fusion(inner_param_0, inner_param_1, i), kind=kOutput, calls=%fused_computation.inner1
ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(inc, fusion.conv, inner_param_1)
}
%while.cond.inner1 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(4)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
%while.body1 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
param0 = bf16[8,128] get-tuple-element(wide_p), index=1
param1 = s8[4,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(2)
zero = s32[] constant(0)
mult = s32[] multiply(i, one)
inner.in.1 = (s32[], bf16[8,128], s8[4,128,128]) tuple(zero, param0, param1)
inner.out.1 = (s32[], bf16[8,128], s8[4,128,128]) while(inner.in.1), condition=%while.cond.inner1, body=%while.body.inner1
fusion.conv.inner = bf16[8,128] get-tuple-element(inner.out.1), index=1
ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(mult, fusion.conv.inner, param1)
}
%while.cond1 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(20)
add = s32[] add(%constant.12857, %constant.12857)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, add), direction=LT
}
%fused_computation.slice2 (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[4,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner2 (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[4,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice2
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body.inner2 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
inner_param_0 = bf16[8,128] get-tuple-element(wide_p), index=1
inner_param_1 = s8[4,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
fusion.conv = bf16[8,128] fusion(inner_param_0, inner_param_1, i), kind=kOutput, calls=%fused_computation.inner2
ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(inc, fusion.conv, inner_param_1)
}
%while.cond.inner2 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(4)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
%while.body2 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
param0 = bf16[8,128] get-tuple-element(wide_p), index=1
param1 = s8[4,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(2)
zero = s32[] constant(0)
mult = s32[] multiply(i, one)
inner.in.2 = (s32[], bf16[8,128], s8[4,128,128]) tuple(zero, param0, param1)
inner.out.2 = (s32[], bf16[8,128], s8[4,128,128]) while(inner.in.2), condition=%while.cond.inner2, body=%while.body.inner2
fusion.conv.inner = bf16[8,128] get-tuple-element(inner.out.2), index=1
ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(mult, fusion.conv.inner, param1)
}
%while.cond2 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(20)
add = s32[] add(%constant.12857, %constant.12857)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, add), direction=LT
}
ENTRY main {
weight = s8[4,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(1)
while.input = (s32[], bf16[8,128], s8[4,128,128]) tuple(init, p1, weight)
while.out = (s32[], bf16[8,128], s8[4,128,128]) while(while.input), condition=%while.cond1 , body=%while.body1
init2 = s32[] get-tuple-element(while.out), index=0
second.while.input = (s32[], bf16[8,128], s8[4,128,128]) tuple(init2, p1, weight)
second.while.out = (s32[], bf16[8,128], s8[4,128,128]) while(second.while.input), condition=%while.cond2 , body=%while.body2
out = bf16[8,128] get-tuple-element(while.out), index=1
second.out = bf16[8,128] get-tuple-element(second.while.out), index=1
ROOT result = bf16[8,128] add(out, second.out)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 8);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackDSAndDUSPattern) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s32[4,3], offset: s32[]) -> s32[3] {
%param_0.51117 = s32[4,3] parameter(0)
offset = s32[] parameter(1)
zero = s32[] constant(0)
%dynamic-slice.22040 = s32[1,3] dynamic-slice(s32[4,3] %param_0.51117, offset, zero), dynamic_slice_sizes={1,3}
ROOT %bitcast.31250 = s32[3] bitcast(s32[1,3] %dynamic-slice.22040)
}
%fused_computation.update.slice (param_0.51117: s32[4,3], p1: s32[], p2: s32[3]) -> s32[4,3] {
%param_0.51117 = s32[4,3] parameter(0)
%p1 = s32[] parameter(1)
%p2 = s32[3] parameter(2)
%zero = s32[] constant(0)
%bitcast.31250 = s32[1,3] bitcast(%p2)
ROOT output_dus = s32[4,3]{1,0} dynamic-update-slice(%param_0.51117, %bitcast.31250, %p1, zero)
}
SimpleLoop.body {
loop_var.1 = (s32[], s32[4,3]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
get-tuple-element.2 = s32[4,3] get-tuple-element(loop_var.1), index=1
zero = s32[] constant(0)
some_const = s32[3] constant({0,1,2})
constant.1 = s32[] constant(1)
idx = s32[] add(get-tuple-element.1, constant.1)
ds = s32[3]{0} fusion(get-tuple-element.2, get-tuple-element.1), kind=kLoop, calls=%fused_computation.slice
update = s32[3] add(ds, ds)
dus = s32[3] dynamic-update-slice(ds, update, zero)
output = s32[4,3] fusion(get-tuple-element.2, get-tuple-element.1, dus), kind=kLoop, calls=%fused_computation.update.slice
ROOT tuple = (s32[], s32[4,3]) tuple(idx, output)
}
SimpleLoop.condition {
loop_var.1 = (s32[], s32[4,3]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.2 = s32[] constant(4)
ROOT less-than = pred[] compare(get-tuple-element.1, constant.2), direction=LT
}
ENTRY SimpleLoop {
reference = s32[4,3] parameter(0)
zero = s32[] constant(0)
zero1 = s32[] constant(0)
one = s32[] constant(1)
tuple.1 = (s32[], s32[4,3]) tuple(zero, reference)
while = (s32[], s32[4,3]) while(tuple.1), condition=SimpleLoop.condition, body=SimpleLoop.body
ROOT out = s32[] get-tuple-element(while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackDSAndDUSPatternNestedLoop) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: bf16[4,1,8,257,128], offset: s32[]) -> bf16[1,8,257,128] {
%param_0.51117 = bf16[4,1,8,257,128] parameter(0)
offset = s32[] parameter(1)
zero = s32[] constant(0)
%dynamic-slice.22040 = bf16[1,1,8,257,128]
dynamic-slice(bf16[4,1,8,257,128] %param_0.51117, offset, zero, zero, zero, zero), dynamic_slice_sizes={1,1,8,257,128}
ROOT %bitcast.31250 = bf16[1,8,257,128] bitcast(%dynamic-slice.22040)
}
%fused_computation.slice.2 (param_0.51117: bf16[4,1,8,257,128], offset: s32[]) -> bf16[1,8,257,128] {
%param_0.51117 = bf16[4,1,8,257,128] parameter(0)
offset = s32[] parameter(1)
zero = s32[] constant(0)
%dynamic-slice.22040 = bf16[1,1,8,257,128] dynamic-slice(bf16[4,1,8,257,128] %param_0.51117, offset, zero, zero, zero, zero), dynamic_slice_sizes={1,1,8,257,128}
ROOT %bitcast.31250 = bf16[1,8,257,128] bitcast(%dynamic-slice.22040)
}
inner.body {
loop_var.1 = (s32[], bf16[4,1,8,257,128], bf16[4,1,8,257,128]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
get-tuple-element.2 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=1
get-tuple-element.3 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=2
sliced = bf16[1,8,257,128] fusion(get-tuple-element.2, get-tuple-element.1), kind=kLoop, calls=%fused_computation.slice
sliced.2 = bf16[1,8,257,128] fusion(get-tuple-element.3, get-tuple-element.1), kind=kLoop,calls=%fused_computation.slice.2
temp = bf16[1,8,257,128] add(sliced, sliced.2)
one = s32[] constant(1) idx = s32[] add(get-tuple-element.1, one)
ROOT out = tuple(idx, get-tuple-element.2, get-tuple-element.3)
}
inner.condition {
loop_var.1 = (s32[], bf16[4,1,8,257,128], bf16[4,1,8,257,128])
parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1),
index=0 constant.2 = s32[] constant(4) ROOT less-than = pred[]
compare(get-tuple-element.1, constant.2), direction=LT
}
outer.body {
loop_var.1 = (s32[], bf16[4,1,8,257,128], bf16[4,1,8,257,128]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
get-tuple-element.2 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=1
get-tuple-element.3 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=2
zero = s32[] constant(0)
buffer = bf16[4,1,8,257,128] custom-call(), custom_call_target="AllocateBuffer"
inner.input = tuple(zero, buffer, get-tuple-element.2)
inner = while(inner.input), condition=inner.condition, body=inner.body
out1 = bf16[4,1,8,257,128] get-tuple-element(inner), index=1
one = s32[] constant(1)
idx = s32[] add(get-tuple-element.1, one)
ROOT tuple = (s32[], bf16[4,1,8,257,128], bf16[4,1,8,257,128]) tuple(idx, out1, get-tuple-element.3)
}
outer.condition {
loop_var.1 = (s32[], bf16[4,1,8,257,128], bf16[4,1,8,257,128])
parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1),
index=0 constant.2 = s32[] constant(4) mul = s32[]
multiply(get-tuple-element.1, constant.2) ROOT less-than = pred[]
compare(get-tuple-element.1, mul), direction=LT
}
ENTRY SimpleLoop {
param1 = bf16[4,1,8,257,128] parameter(0)
param2 = bf16[4,1,8,257,128] parameter(1)
zero = s32[] constant(0)
zero1 = s32[] constant(0)
one = s32[] constant(1)
tuple.1 = tuple(zero, param1, param2)
while = while(tuple.1), condition=outer.condition, body=outer.body
ROOT out = s32[] get-tuple-element(while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackDSAndDUSPatternLoopFeedingLoop) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.update.slice (param_0.51117: bf16[4,1,8,257,128], p1: s32[], param_0.51118: bf16[1,8,257,128]) -> bf16[4,1,8,257,128] {
%param_0.51117 = bf16[4,1,8,257,128] parameter(0)
p1 = s32[] parameter(1)
%param_0.51118 = bf16[1,8,257,128] parameter(2)
bitcast = bf16[1,1,8,257,128] bitcast(param_0.51118)
%constant.85694 = s32[] constant(0)
ROOT %dynamic-update-slice.22040 = bf16[4,1,8,257,128] dynamic-update-slice(bf16[4,1,8,257,128] %param_0.51117, bitcast, p1, s32[] %constant.85694, s32[] %constant.85694, s32[] %constant.85694, s32[] %constant.85694)
}
%fused_computation.slice (param_0.51117: bf16[4,1,8,257,128], offset:s32[]) -> bf16[1,8,257,128] {
%param_0.51117 = bf16[4,1,8,257,128] parameter(0)
offset = s32[] parameter(1)
zero = s32[] constant(0)
%dynamic-slice.22040 = bf16[1,1,8,257,128] dynamic-slice(bf16[4,1,8,257,128] %param_0.51117, offset, zero, zero, zero, zero), dynamic_slice_sizes={1,1,8,257,128}
ROOT %bitcast.31250 = bf16[1,8,257,128] bitcast(%dynamic-slice.22040)
}
first.body {
loop_var.1 = (s32[], bf16[4,1,8,257,128]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1),index=0
get-tuple-element.2 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=1
constant = bf16[1,8,257,128] constant({...})
sliced = bf16[1,8,257,128] fusion(get-tuple-element.2, get-tuple-element.1), kind=kLoop, calls=%fused_computation.slice
tmp = bf16[1,8,257,128] add(sliced, sliced)
one = s32[] constant(1)
idx = s32[] add(get-tuple-element.1, one)
ROOT out = tuple(idx, get-tuple-element.2)
}
first.condition {
loop_var.1 = (s32[], bf16[4,1,8,257,128]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.2 = s32[] constant(4)
ROOT less-than = pred[] compare(get-tuple-element.1, constant.2), direction=LT
}
next.body {
loop_var.1 = (s32[], bf16[4,1,8,257,128]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1),index=0
get-tuple-element.2 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=1
constant = bf16[1,8,257,128] constant({...})
update.sliced = bf16[4,1,8,257,128] fusion(get-tuple-element.2, get-tuple-element.1, constant), kind=kLoop, calls=%fused_computation.update.slice
one = s32[] constant(1)
idx = s32[] add(get-tuple-element.1, one)
ROOT out = tuple(idx, update.sliced)
}
next.condition {
loop_var.1 = (s32[], bf16[4,1,8,257,128]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.2 = s32[] constant(4)
ROOT less-than = pred[] compare(get-tuple-element.1, constant.2), direction=LT
}
ENTRY SimpleLoop {
param1 = bf16[4,1,8,257,128] parameter(0)
param2 = bf16[4,1,8,257,128] parameter(1)
zero = s32[] constant(0)
zero1 = s32[] constant(0)
one = s32[] constant(1)
tuple.1 = tuple(zero, param1)
while = while(tuple.1), condition=first.condition, body=first.body
while.out = bf16[4,1,8,257,128] get-tuple-element(while), index=1
next.input = tuple(zero, while.out)
next = while(next.input), condition=next.condition, body=next.body
ROOT out = s32[] get-tuple-element(next), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
}
TEST_F(UnstackerTest, UnstackDUSFusionWithPadPatternLoopFeedingLoop) {
std::string hlo_string = R"(
HloModule SimpleLoop
fused_computation.75.clone {
param_0.5713 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} parameter(0)
param_2.4396 = bf16[1,8,257,128]{3,2,1,0:T(8,128)(2,1)} parameter(2)
constant.12166 = bf16[]{:T(256)} constant(0)
pad.496 = bf16[1,8,513,128]{3,2,1,0:T(8,128)(2,1)} pad(param_2.4396, constant.12166), padding=0_0x0_0x0_256x0_0
bitcast.1262 = bf16[1,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} bitcast(pad.496)
param_1.6823 = s32[]{:T(128)} parameter(1)
constant.12165 = s32[]{:T(128)} constant(0)
ROOT dynamic-update-slice.193 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} dynamic-update-slice(param_0.5713, bitcast.1262, param_1.6823, constant.12165, constant.12165, constant.12165, constant.12165)
}
fused_computation.1 {
param_0.5712 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}parameter(0)
param_1.6822 = s32[]{:T(128)} parameter(1)
constant.12164 = s32[]{:T(128)} constant(0)
dynamic-slice.1597 = bf16[1,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} dynamic-slice(param_0.5712, param_1.6822, constant.12164, constant.12164, constant.12164, constant.12164), dynamic_slice_sizes={1,1,8,513,128}
ROOT bitcast.1261 = bf16[1,8,513,128]{3,2,1,0:T(8,128)(2,1)} bitcast(dynamic-slice.1597)
}
first.body {
wide.param.29 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) parameter(0)
get-tuple-element.12177 = s32[]{:T(128)} get-tuple-element(wide.param.29), index=0
constant.12144..sunk.2 = s32[]{:T(128)} constant(1)
add.4517 = s32[]{:T(128)} add(get-tuple-element.12177, constant.12144..sunk.2)
get-tuple-element.12178 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} get-tuple-element(wide.param.29), index=1
fusion.2381 = bf16[1,8,513,128]{3,2,1,0:T(8,128)(2,1)} fusion(get-tuple-element.12178, get-tuple-element.12177), kind=kLoop, calls=fused_computation.1
tmp = bf16[1,8,513,128]{3,2,1,0:T(8,128)(2,1)} add(fusion.2381, fusion.2381)
ROOT tuple.949 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) tuple(add.4517, get-tuple-element.12178)
}
first.cond {
wide.param.28 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) parameter(0)
get-tuple-element.12167 = s32[]{:T(128)} get-tuple-element(wide.param.28), index=0
constant.12162 = s32[]{:T(128)} constant(2)
ROOT compare.1815 = pred[]{:T(512)} compare(get-tuple-element.12167, constant.12162), direction=LT
}
wide.region_54.2652.clone_spmd {
wide.param.29 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) parameter(0)
get-tuple-element.12177 = s32[]{:T(128)} get-tuple-element(wide.param.29), index=0
constant.12144..sunk.2 = s32[]{:T(128)} constant(1)
add.4517 = s32[]{:T(128)} add(get-tuple-element.12177, constant.12144..sunk.2)
get-tuple-element.12178 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} get-tuple-element(wide.param.29), index=1
update = bf16[1,8,257,128]{3,2,1,0:T(8,128)(2,1)} constant({...})
fusion.2382 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} fusion(get-tuple-element.12178, get-tuple-element.12177, update), kind=kLoop, calls=fused_computation.75.clone
ROOT tuple.949 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) tuple(add.4517, fusion.2382)
}
wide.region_55.2732.clone_spmd {
wide.param.28 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) parameter(0)
get-tuple-element.12167 = s32[]{:T(128)} get-tuple-element(wide.param.28), index=0
constant.12162 = s32[]{:T(128)} constant(2)
ROOT compare.1815 = pred[]{:T(512)} compare(get-tuple-element.12167, constant.12162), direction=LT
}
ENTRY main {
p0 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} parameter(0)
init = s32[]{:T(128)} constant(0)
first.input = tuple(init, p0)
first.out = while(first.input), condition=first.cond , body=first.body
o1 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} get-tuple-element(first.out), index=1
input = tuple(init, o1)
out = while(input), condition=wide.region_55.2732.clone_spmd , body=wide.region_54.2652.clone_spmd
ROOT res = s32[]{:T(128)} get-tuple-element(out), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
}
TEST_F(UnstackerTest, UnstackDUSFusionWithAddPattern) {
std::string hlo_string = R"(
HloModule SimpleLoop
add.2771.reduce_sub_computation {
lhs.44 = bf16[] parameter(0)
rhs.44 = bf16[] parameter(1)
ROOT add.3079 = bf16[] add(lhs.44, rhs.44)
}
fused_computation.75.clone {
param_0.31658 = bf16[2,4096]{1,0:T(8,128)(2,1)} parameter(0)
param_1.26202 = s32[]{:T(128)} parameter(1)
constant.47557 = s32[]{:T(128)} constant(0)
dynamic-slice.12289 = bf16[1,4096]{1,0:T(2,128)(2,1)} dynamic-slice(param_0.31658, param_1.26202, constant.47557), dynamic_slice_sizes={1,4096}
constant.47559 = bf16[]{:T(256)} constant(1)
broadcast.39214 = bf16[1,4096]{1,0:T(2,128)(2,1)} broadcast(constant.47559), dimensions={}
add.13176 = bf16[1,4096]{1,0:T(2,128)(2,1)} add(dynamic-slice.12289, broadcast.39214)
constant.47558 = bf16[] constant(-0)
ROOT reduce.8210 = bf16[4096]{0:T(1024)(128)(2,1)} reduce(add.13176, constant.47558), dimensions={0}, to_apply=add.2771.reduce_sub_computation
}
first.body {
wide.param.29 = (s32[]{:T(128)}, bf16[2,4096]{1,0:T(8,128)(2,1)}) parameter(0)
get-tuple-element.12177 = s32[]{:T(128)} get-tuple-element(wide.param.29), index=0
constant.12144..sunk.2 = s32[]{:T(128)} constant(1)
add.4517 = s32[]{:T(128)} add(get-tuple-element.12177, constant.12144..sunk.2)
get-tuple-element.12178 = bf16[2,4096]{1,0:T(8,128)(2,1)} get-tuple-element(wide.param.29), index=1
fusion.2381 = bf16[4096]{0:T(1024)(128)(2,1)} fusion(get-tuple-element.12178, get-tuple-element.12177), kind=kLoop, calls=fused_computation.75.clone
tmp = bf16[4096]{0:T(1024)(128)(2,1)} add(fusion.2381, fusion.2381)
ROOT tuple.949 = (s32[]{:T(128)}, bf16[2,4096]{1,0:T(8,128)(2,1)}) tuple(add.4517, get-tuple-element.12178)
}
first.cond {
wide.param.28 = (s32[]{:T(128)}, bf16[2,4096]{1,0:T(8,128)(2,1)}) parameter(0)
get-tuple-element.12167 = s32[]{:T(128)} get-tuple-element(wide.param.28), index=0
constant.12162 = s32[]{:T(128)} constant(2)
ROOT compare.1815 = pred[]{:T(512)} compare(get-tuple-element.12167, constant.12162), direction=LT
}
ENTRY main {
p0 = bf16[2,4096]{1,0:T(8,128)(2,1)} parameter(0)
init = s32[]{:T(128)} constant(0)
first.input = tuple(init, p0)
first.out = while(first.input), condition=first.cond , body=first.body
ROOT o1 = s32[]{:T(128)} get-tuple-element(first.out), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_unstacker.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_unstacker_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9bda38a9-493d-4316-85e9-9599ccb99c9a | cpp | tensorflow/tensorflow | dynamic_padder | third_party/xla/xla/service/dynamic_padder.cc | third_party/xla/xla/service/dynamic_padder_test.cc | #include "xla/service/dynamic_padder.h"
#include <cstdint>
#include <functional>
#include <iterator>
#include <set>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/dynamic_parameter_binding.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/dynamic_dimension_inference.h"
#include "xla/service/dynamic_window_utils.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/shape_inference.h"
#include "xla/service/tuple_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tsl/lib/monitoring/gauge.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
auto* dynamic_padding_gauge = tsl::monitoring::Gauge<bool, 0>::New(
"/tensorflow/core/use_dynamic_padding_gauge",
"Tracks if dynamic padder is used.");
absl::StatusOr<HloInstruction*> ChooseIdentityValue(HloInstruction* inst,
int64_t operand_number) {
if (inst->IsElementwise()) {
return nullptr;
}
if (inst->opcode() == HloOpcode::kSelectAndScatter ||
inst->IsCustomCall("DynamicSelectAndScatterSamePadding")) {
if (operand_number == 1) {
return inst->mutable_operand(2);
}
TF_RET_CHECK(operand_number == 0);
HloComputation* select = inst->called_computations()[0];
if (Match(select->root_instruction(),
match::Compare(match::Parameter(), match::Parameter())
.WithComparisonDirection(ComparisonDirection::kGe))) {
return inst->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::MinValue(inst->operand(0)->shape().element_type())));
} else {
return Unimplemented(
"Only select and scatter with `max` as select function is "
"supported, got %s",
select->ToString());
}
}
switch (inst->opcode()) {
case HloOpcode::kReduce: {
auto* reduce = Cast<HloReduceInstruction>(inst);
TF_RET_CHECK(operand_number < reduce->input_count())
<< "Only data operand with dynamic dimension is valid.";
int64_t init_value_index = reduce->input_count() + operand_number;
return inst->mutable_operand(init_value_index);
}
case HloOpcode::kReduceWindow: {
auto* reduce_window = Cast<HloReduceWindowInstruction>(inst);
TF_RET_CHECK(operand_number < reduce_window->input_count())
<< "Only data operand with dynamic dimension is valid.";
int64_t init_value_index = reduce_window->input_count() + operand_number;
return inst->mutable_operand(init_value_index);
}
case HloOpcode::kConvolution:
case HloOpcode::kDot: {
PrimitiveType ptype = inst->operand(0)->shape().element_type();
return inst->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(ptype)));
}
case HloOpcode::kPad:
return inst->mutable_operand(1);
case HloOpcode::kScatter: {
if (operand_number != 1) {
return nullptr;
}
PrimitiveType indices_ptype =
inst->operand(operand_number)->shape().element_type();
return inst->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::MaxValue(indices_ptype)));
}
case HloOpcode::kParameter:
case HloOpcode::kGather:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kGetDimensionSize:
case HloOpcode::kSetDimensionSize:
case HloOpcode::kConcatenate:
case HloOpcode::kReshape:
case HloOpcode::kReverse:
case HloOpcode::kTuple:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kBroadcast:
case HloOpcode::kTranspose:
case HloOpcode::kSort:
case HloOpcode::kSlice:
case HloOpcode::kDomain:
return nullptr;
case HloOpcode::kCustomCall:
return nullptr;
default:
return UnimplementedStrCat("Unimplemented padding for instruction: ",
inst->ToString());
}
}
absl::StatusOr<bool> ReplaceGetSize(
HloInstruction* instr,
DynamicDimensionInference* dynamic_dimension_inference) {
if (instr->opcode() != HloOpcode::kGetDimensionSize) {
return false;
}
HloComputation* computation = instr->parent();
TF_ASSIGN_OR_RETURN(auto legal_shape,
ShapeInference::InferGetDimensionSizeShape(
instr->operand(0)->shape(), instr->dimension()));
TF_RET_CHECK(ShapeUtil::Equal(instr->shape(), legal_shape))
<< "instr->shape() " << instr->shape().ToString() << " , "
<< "legal_shape " << legal_shape.ToString();
TF_RET_CHECK(ShapeUtil::HasPrimitiveType(instr->shape(), S32));
HloInstruction* operand = instr->mutable_operand(0);
int64_t dim = instr->dimension();
HloInstruction* dynamic_size =
dynamic_dimension_inference->GetDynamicSize(operand, {}, dim);
if (dynamic_size != nullptr) {
TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(dynamic_size));
dynamic_dimension_inference->ReplaceAllDynamicDimensionUsesWith(
instr, dynamic_size);
} else {
int32_t size = instr->operand(0)->shape().dimensions(dim);
HloInstruction* new_instr = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(size)));
TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(new_instr));
dynamic_dimension_inference->ReplaceAllDynamicDimensionUsesWith(instr,
new_instr);
}
return true;
}
absl::StatusOr<bool> ReplaceSetSize(HloInstruction* instr) {
if (instr->opcode() != HloOpcode::kSetDimensionSize) {
return false;
}
TF_RET_CHECK(Shape::Equal().IgnoreDynamicDimension()(
instr->shape(), instr->operand(0)->shape()))
<< "instr->shape() " << instr->shape().ToString() << " , "
<< "instruction operand shape " << instr->operand(0)->shape();
HloInstruction* operand = instr->mutable_operand(0);
TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(operand));
return true;
}
absl::StatusOr<bool> ReplaceSetBound(HloInstruction* instr) {
if (instr->opcode() != HloOpcode::kCustomCall ||
instr->custom_call_target() != "SetBound") {
return false;
}
TF_RET_CHECK(Shape::Equal().IgnoreDynamicDimension()(
instr->shape(), instr->operand(0)->shape()))
<< "instr->shape() " << instr->shape().ToString() << " , "
<< "instruction operand shape " << instr->operand(0)->shape();
HloInstruction* operand = instr->mutable_operand(0);
TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(operand));
return true;
}
bool ShouldSkipPadOnOperand(
const HloInstruction* inst, int64_t operand_num, int64_t dimension,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
switch (inst->opcode()) {
case HloOpcode::kConvolution: {
if (operand_num == 0) {
if (dimension ==
inst->convolution_dimension_numbers().input_batch_dimension()) {
return true;
}
const auto& spatial_dims =
inst->convolution_dimension_numbers().input_spatial_dimensions();
for (int64_t spatial_dim = 0; spatial_dim < spatial_dims.size();
++spatial_dim) {
if (spatial_dims[spatial_dim] == dimension &&
inst->window().dimensions(spatial_dim).size() == 1) {
return true;
}
}
}
return operand_num == 1 &&
(dimension == inst->convolution_dimension_numbers()
.kernel_output_feature_dimension());
}
case HloOpcode::kDot: {
if (operand_num == 0) {
return !absl::c_linear_search(
inst->dot_dimension_numbers().lhs_contracting_dimensions(),
dimension);
}
return !absl::c_linear_search(
inst->dot_dimension_numbers().rhs_contracting_dimensions(),
dimension);
}
case HloOpcode::kReduce:
return !absl::c_linear_search(inst->dimensions(), dimension);
case HloOpcode::kSelectAndScatter:
case HloOpcode::kReduceWindow:
return inst->window().dimensions(dimension).size() == 1;
case HloOpcode::kAsyncStart:
if (!HloInstruction::IsThreadIncluded(inst->async_execution_thread(),
execution_threads)) {
return true;
}
return false;
default:
return false;
}
}
HloInstruction* PadWithScalar(HloInstruction* inst, int64_t dim,
HloInstruction* dynamic_size,
HloInstruction* padding_scalar) {
CHECK(inst != nullptr && dynamic_size != nullptr &&
padding_scalar != nullptr);
const Shape mask_shape =
ShapeUtil::MakeShape(xla::S32, inst->shape().dimensions());
const Shape pred_shape =
ShapeUtil::MakeShape(xla::PRED, inst->shape().dimensions());
HloInstruction* iota =
inst->AddInstruction(HloInstruction::CreateIota(mask_shape, dim));
HloInstruction* broadcasted_effective_size = inst->AddInstruction(
HloInstruction::CreateBroadcast(mask_shape, dynamic_size, {}));
HloInstruction* pred = inst->AddInstruction(HloInstruction::CreateCompare(
pred_shape, iota, broadcasted_effective_size, ComparisonDirection::kLt));
HloInstruction* broadcasted_identity_value =
inst->AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeStaticShape(inst->shape()), padding_scalar, {}));
HloInstruction* padded = inst->AddInstruction(HloInstruction::CreateTernary(
ShapeUtil::MakeStaticShape(inst->shape()), HloOpcode::kSelect, pred, inst,
broadcasted_identity_value));
return padded;
}
HloInstruction* GenerateBinaryMask(
HloInstruction* reshape, int64_t input_dim,
absl::Span<const int64_t> output_dims,
absl::Span<HloInstruction*> output_dynamic_dims, HloInstruction* one,
HloInstruction* zero, bool split_input) {
Shape input_shape =
split_input ? reshape->operand(0)->shape() : reshape->shape();
Shape output_shape =
split_input ? reshape->shape() : reshape->operand(0)->shape();
const Shape mask_input_shape =
ShapeUtil::MakeShape(xla::S32, {input_shape.dimensions(input_dim)});
const Shape pred_input_shape =
ShapeUtil::MakeShape(xla::PRED, {input_shape.dimensions(input_dim)});
HloInstruction* pred_true = reshape->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloInstruction* input_shape_pred_mask = reshape->AddInstruction(
HloInstruction::CreateBroadcast(pred_input_shape, pred_true, {}));
bool need_rewrite = false;
HloInstruction* iota =
reshape->AddInstruction(HloInstruction::CreateIota(mask_input_shape, 0));
for (int64_t i = 1; i < output_dims.size(); ++i) {
if (output_dynamic_dims[output_dims[i]] != nullptr) {
need_rewrite = true;
break;
}
}
if (!need_rewrite) {
return nullptr;
}
for (int64_t i = output_dims.size() - 1; i > 0; i--) {
const int64_t output_dim = output_dims[i];
HloInstruction* dynamic_size = output_dynamic_dims[output_dim];
HloInstruction* static_output_dim_size = reshape->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
output_shape.dimensions(output_dim))));
HloInstruction* broadcasted_static_output_dim_size =
reshape->AddInstruction(HloInstruction::CreateBroadcast(
mask_input_shape, static_output_dim_size, {}));
if (dynamic_size != nullptr) {
HloInstruction* dim_index =
reshape->AddInstruction(HloInstruction::CreateBinary(
mask_input_shape, HloOpcode::kRemainder, iota,
broadcasted_static_output_dim_size));
HloInstruction* broadcasted_effective_size = reshape->AddInstruction(
HloInstruction::CreateBroadcast(mask_input_shape, dynamic_size, {}));
HloInstruction* selected =
reshape->AddInstruction(HloInstruction::CreateCompare(
pred_input_shape, dim_index, broadcasted_effective_size,
ComparisonDirection::kLt));
input_shape_pred_mask = reshape->AddInstruction(
HloInstruction::CreateBinary(pred_input_shape, HloOpcode::kAnd,
input_shape_pred_mask, selected));
}
iota = reshape->AddInstruction(
HloInstruction::CreateBinary(mask_input_shape, HloOpcode::kDivide, iota,
broadcasted_static_output_dim_size));
}
HloInstruction* broadcasted_one = reshape->AddInstruction(
HloInstruction::CreateBroadcast(mask_input_shape, one, {}));
HloInstruction* broadcasted_zero = reshape->AddInstruction(
HloInstruction::CreateBroadcast(mask_input_shape, zero, {}));
return reshape->AddInstruction(HloInstruction::CreateTernary(
mask_input_shape, HloOpcode::kSelect, input_shape_pred_mask,
broadcasted_one, broadcasted_zero));
}
absl::StatusOr<bool> RewriteDynamicReshapeSplitInput(
HloInstruction* reshape, int64_t input_dim,
absl::Span<const int64_t> output_dims,
absl::Span<HloInstruction*> output_dynamic_dims,
DynamicDimensionInference* dynamic_dimension_inference) {
VLOG(2) << "Reshaping input dim " << input_dim << " to "
<< VectorString(output_dims);
const Shape operand_shape = reshape->operand(0)->shape();
TF_RET_CHECK(output_dims.size() > 1);
const Shape mask_input_shape =
ShapeUtil::MakeShape(xla::S32, {operand_shape.dimensions(input_dim)});
const Shape pred_input_shape =
ShapeUtil::MakeShape(xla::PRED, {operand_shape.dimensions(input_dim)});
HloInstruction* zero = reshape->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));
HloInstruction* one = reshape->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::One(S32)));
HloInstruction* input_shape_binary_mask =
GenerateBinaryMask(reshape, input_dim, output_dims, output_dynamic_dims,
one, zero, true);
if (input_shape_binary_mask == nullptr) {
VLOG(2) << "No need to rewrite";
return false;
}
auto embedded_builder = HloComputation::Builder("add");
{
auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(S32, {}), "lhs"));
auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(S32, {}), "rhs"));
embedded_builder.AddInstruction(
HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));
}
HloComputation* add =
reshape->GetModule()->AddEmbeddedComputation(embedded_builder.Build());
Window cumsum_window;
WindowDimension* dim = cumsum_window.add_dimensions();
dim->set_size(operand_shape.dimensions(input_dim));
dim->set_stride(1);
dim->set_padding_low(operand_shape.dimensions(input_dim) - 1);
dim->set_padding_high(0);
dim->set_window_dilation(1);
dim->set_base_dilation(1);
HloInstruction* cumsum =
reshape->AddInstruction(HloInstruction::CreateReduceWindow(
mask_input_shape, input_shape_binary_mask, zero, cumsum_window, add));
HloInstruction* broadcast_ones = reshape->AddInstruction(
HloInstruction::CreateBroadcast(mask_input_shape, one, {}));
cumsum = reshape->AddInstruction(HloInstruction::CreateBinary(
mask_input_shape, HloOpcode::kSubtract, cumsum, broadcast_ones));
GatherDimensionNumbers gather_dim_numbers;
for (int64_t i = 0; i < operand_shape.dimensions_size(); ++i) {
if (i != input_dim) {
gather_dim_numbers.add_offset_dims(i);
}
}
gather_dim_numbers.add_start_index_map(input_dim);
gather_dim_numbers.set_index_vector_dim(1);
gather_dim_numbers.add_collapsed_slice_dims(input_dim);
HloInstruction* operand_static_dim_size =
reshape->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(operand_shape.dimensions(input_dim))));
HloInstruction* operand_static =
reshape->AddInstruction(HloInstruction::CreateSetDimensionSize(
operand_shape, reshape->mutable_operand(0), operand_static_dim_size,
input_dim));
std::vector<int64_t> slice_sizes(operand_shape.dimensions().begin(),
operand_shape.dimensions().end());
slice_sizes[input_dim] = 1;
HloInstruction* gather = reshape->AddInstruction(HloInstruction::CreateGather(
ShapeUtil::MakeShape(operand_shape.element_type(),
operand_shape.dimensions()),
operand_static, cumsum, gather_dim_numbers, slice_sizes, true));
TF_RETURN_IF_ERROR(reshape->ReplaceOperandWith(0, gather));
HloInstruction* reshape_dynamic = reshape;
auto users = reshape->users();
for (int64_t output_dim : output_dims) {
HloInstruction* output_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(reshape, {}, output_dim);
if (output_dynamic_size != nullptr) {
reshape_dynamic =
reshape->AddInstruction(HloInstruction::CreateSetDimensionSize(
reshape->shape(), reshape_dynamic, output_dynamic_size,
output_dim));
}
}
for (auto* user : users) {
TF_RETURN_IF_ERROR(reshape->ReplaceUseWith(user, reshape_dynamic));
}
TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(
reshape, reshape_dynamic, {}));
return true;
}
absl::StatusOr<bool> RewriteDynamicReshapeCombineInput(
HloInstruction* reshape, absl::Span<const int64_t> input_dims,
int64_t output_dim, absl::Span<HloInstruction*> input_dynamic_dims,
DynamicDimensionInference* dynamic_dimension_inference) {
HloInstruction* zero = reshape->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));
HloInstruction* one = reshape->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::One(S32)));
const Shape output_shape = reshape->shape();
const Shape input_shape = reshape->operand(0)->shape();
const Shape mask_output_shape =
ShapeUtil::MakeShape(xla::S32, {output_shape.dimensions(output_dim)});
HloInstruction* output_shape_binary_mask =
GenerateBinaryMask(reshape, output_dim, input_dims, input_dynamic_dims,
one, zero, false);
if (output_shape_binary_mask == nullptr) {
VLOG(2) << "No need to rewrite";
return false;
}
HloInstruction* iota =
reshape->AddInstruction(HloInstruction::CreateIota(mask_output_shape, 0));
HloComputation::Builder comp_builder("compare");
HloInstruction* lhs_key =
comp_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeScalarShape(S32), "lhs_key"));
HloInstruction* rhs_key =
comp_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeScalarShape(S32), "rhs_key"));
comp_builder.AddInstruction(HloInstruction::CreateParameter(
2, ShapeUtil::MakeScalarShape(S32), "lhs_value"));
comp_builder.AddInstruction(HloInstruction::CreateParameter(
3, ShapeUtil::MakeScalarShape(S32), "rhs_value"));
comp_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), lhs_key,
rhs_key, ComparisonDirection::kGt));
HloComputation* compare =
reshape->GetModule()->AddEmbeddedComputation(comp_builder.Build());
HloInstruction* sort = reshape->AddInstruction(HloInstruction::CreateSort(
ShapeUtil::MakeTupleShape({mask_output_shape, mask_output_shape}), 0,
{output_shape_binary_mask, iota}, compare,
true));
HloInstruction* gather_indices = reshape->AddInstruction(
HloInstruction::CreateGetTupleElement(mask_output_shape, sort, 1));
GatherDimensionNumbers gather_dim_numbers;
for (int64_t i = 0; i < output_shape.dimensions_size(); ++i) {
if (i != output_dim) {
gather_dim_numbers.add_offset_dims(i);
}
}
gather_dim_numbers.add_start_index_map(output_dim);
gather_dim_numbers.set_index_vector_dim(1);
gather_dim_numbers.add_collapsed_slice_dims(output_dim);
HloInstruction* static_dim_size = reshape->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
reshape->shape().dimensions(output_dim))));
Shape reshape_static_shape = reshape->shape();
reshape_static_shape.set_dynamic_dimension(output_dim, false);
HloInstruction* reshape_static =
reshape->AddInstruction(HloInstruction::CreateSetDimensionSize(
reshape_static_shape, reshape, static_dim_size, output_dim));
std::vector<int64_t> gather_slice_sizes(output_shape.dimensions().begin(),
output_shape.dimensions().end());
gather_slice_sizes[output_dim] = 1;
HloInstruction* gather = reshape->AddInstruction(HloInstruction::CreateGather(
output_shape, reshape_static, gather_indices, gather_dim_numbers,
gather_slice_sizes, true));
HloInstruction* output_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(reshape, {}, output_dim);
TF_RET_CHECK(output_dynamic_size != nullptr);
gather = reshape->AddInstruction(HloInstruction::CreateSetDimensionSize(
gather->shape(), gather, output_dynamic_size, output_dim));
auto users = reshape->users();
for (auto* user : users) {
if (user != reshape_static && user != output_dynamic_size) {
TF_RETURN_IF_ERROR(reshape->ReplaceUseWith(user, gather));
}
}
if (reshape == reshape->parent()->root_instruction()) {
reshape->parent()->set_root_instruction(gather);
}
TF_RETURN_IF_ERROR(
dynamic_dimension_inference->ForwardDynamicSize(reshape, gather, {}));
return true;
}
absl::StatusOr<bool> RewriteDynamicReshapeSingleGroup(
HloInstruction* reshape, absl::Span<const int64_t> input_dims,
absl::Span<const int64_t> output_dims,
absl::Span<HloInstruction*> input_dynamic_dims,
absl::Span<HloInstruction*> output_dynamic_dims,
DynamicDimensionInference* dynamic_dimension_inference) {
VLOG(2) << "Rewriting dynamic reshape " << reshape->ToString()
<< " input dims: " << VectorString(input_dims)
<< " output dims: " << VectorString(output_dims);
const Shape operand_shape = reshape->operand(0)->shape();
const Shape output_shape = reshape->shape();
if (input_dims.size() == 1) {
int64_t input_dim = input_dims[0];
if (operand_shape.dimensions()[input_dim] == 1) {
return false;
}
return RewriteDynamicReshapeSplitInput(reshape, input_dim, output_dims,
output_dynamic_dims,
dynamic_dimension_inference);
}
if (output_dims.size() == 1) {
int64_t output_dim = output_dims[0];
if (output_shape.dimensions()[output_dim] == 1) {
return false;
}
return RewriteDynamicReshapeCombineInput(reshape, input_dims, output_dim,
input_dynamic_dims,
dynamic_dimension_inference);
}
TF_RET_CHECK(false);
return false;
}
absl::StatusOr<bool> RewriteReverse(
HloInstruction* reverse,
DynamicDimensionInference* dynamic_dimension_inference) {
auto reverse_dims = reverse->dimensions();
const Shape& reverse_shape = reverse->shape();
std::set<int64_t> dynamic_reverse_dims;
for (int64_t reverse_dim : reverse_dims) {
HloInstruction* dynamic_size =
dynamic_dimension_inference->GetDynamicSize(reverse, {}, reverse_dim);
if (dynamic_size == nullptr) {
continue;
}
dynamic_reverse_dims.insert(reverse_dim);
}
if (dynamic_reverse_dims.empty()) {
return false;
}
PaddingConfig padding;
Shape pad_shape = reverse_shape;
for (int i = 0; i < reverse_shape.rank(); ++i) {
auto dimension = padding.add_dimensions();
if (dynamic_reverse_dims.count(i) > 0) {
dimension->set_edge_padding_low(0);
dimension->set_edge_padding_high(reverse_shape.dimensions(i));
dimension->set_interior_padding(0);
pad_shape.set_dimensions(i, 2 * pad_shape.dimensions(i));
}
}
HloInstruction* cloned_reverse = reverse->AddInstruction(reverse->Clone());
HloInstruction* zero = reverse->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(pad_shape.element_type())));
HloInstruction* pad = reverse->AddInstruction(
HloInstruction::CreatePad(pad_shape, cloned_reverse, zero, padding));
std::vector<HloInstruction*> start_indices;
start_indices.reserve(reverse_shape.rank());
for (int i = 0; i < reverse_shape.rank(); ++i) {
if (dynamic_reverse_dims.count(i) > 0) {
HloInstruction* bound_size =
reverse->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(reverse_shape.dimensions(i))));
HloInstruction* dynamic_size =
dynamic_dimension_inference->GetDynamicSize(reverse, {}, i);
HloInstruction* start_offset =
reverse->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeScalarShape(S32), HloOpcode::kSubtract, bound_size,
dynamic_size));
start_indices.push_back(start_offset);
} else {
HloInstruction* zero = reverse->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));
start_indices.push_back(zero);
}
}
HloInstruction* dynamic_reverse =
reverse->AddInstruction(HloInstruction::CreateDynamicSlice(
reverse_shape, pad, start_indices, reverse_shape.dimensions()));
TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(
reverse, dynamic_reverse, {}));
TF_RETURN_IF_ERROR(reverse->ReplaceAllUsesWith(dynamic_reverse));
return true;
}
HloInstruction* RewriteInputWithDynamicPadding(
HloInstruction* conv, HloInstruction* input, HloInstruction* padding_value,
absl::Span<HloInstruction*> padding_before, Window* input_window,
absl::FunctionRef<int64_t(int64_t)> window_dim_to_shape_dim) {
HloInstruction* zero_s32 = conv->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));
Shape padded_shape = input->shape();
PaddingConfig padding_configs;
for (int64_t i = 0; i < input->shape().rank(); ++i) {
PaddingConfig::PaddingConfigDimension padding_dim;
*padding_configs.add_dimensions() = padding_dim;
}
std::vector<HloInstruction*> start_indices(input->shape().rank(), zero_s32);
for (int64_t dim_index = 0; dim_index < input_window->dimensions_size();
++dim_index) {
if (padding_before[dim_index] == nullptr) {
continue;
}
int64_t shape_dim = window_dim_to_shape_dim(dim_index);
WindowDimension* window_dim = input_window->mutable_dimensions(dim_index);
auto* padding_dim = padding_configs.mutable_dimensions(shape_dim);
const int64_t dilated_window_size = window_util::DilatedBound(
window_dim->size(), window_dim->window_dilation());
padding_dim->set_edge_padding_low(dilated_window_size);
padding_dim->set_edge_padding_high(window_dim->padding_high() +
window_dim->padding_low());
padding_dim->set_interior_padding(window_dim->base_dilation() - 1);
HloInstruction* slicing_start =
conv->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeScalarShape(S32), HloOpcode::kSubtract,
conv->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
padding_dim->edge_padding_low()))),
padding_before[dim_index]));
start_indices[shape_dim] = slicing_start;
padded_shape.mutable_dimensions()[shape_dim] =
window_dim->padding_low() +
window_util::DilatedBound(padded_shape.dimensions(shape_dim),
window_dim->base_dilation()) +
window_dim->padding_high();
window_dim->clear_padding_high();
window_dim->clear_padding_low();
window_dim->set_base_dilation(1);
input->mutable_shape()->set_dynamic_dimension(shape_dim, false);
}
HloInstruction* pad =
MakePadHlo(input, padding_value, padding_configs).value();
input = conv->AddInstruction(HloInstruction::CreateDynamicSlice(
padded_shape, pad, start_indices, padded_shape.dimensions()));
return input;
}
absl::StatusOr<bool> RewriteDynamicConvolutionInputGrad(
HloInstruction* custom_call_conv,
DynamicDimensionInference* dynamic_dimension_inference) {
HloInstruction* grad = custom_call_conv->mutable_operand(1);
HloInstruction* kernel = custom_call_conv->mutable_operand(2);
TF_RET_CHECK(kernel->shape().is_static());
auto dnums = custom_call_conv->convolution_dimension_numbers();
Window window = custom_call_conv->window();
HloInstruction* zero =
custom_call_conv->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(custom_call_conv->shape().element_type())));
std::vector<HloInstruction*> padding_before(
dnums.input_spatial_dimensions_size(), nullptr);
for (int64_t spatial_dim_index = 0;
spatial_dim_index < dnums.input_spatial_dimensions_size();
++spatial_dim_index) {
int64_t input_spatial_dim =
dnums.input_spatial_dimensions(spatial_dim_index);
HloInstruction* operand_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(
custom_call_conv->mutable_operand(1), {}, input_spatial_dim);
if (operand_dynamic_size == nullptr) {
continue;
}
grad = PadWithScalar(grad, input_spatial_dim, operand_dynamic_size, zero);
HloInstruction* slice =
custom_call_conv->AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(S32, {1}),
custom_call_conv->mutable_operand(0), {input_spatial_dim},
{input_spatial_dim + 1}, {1}));
HloInstruction* dynamic_input_size = custom_call_conv->AddInstruction(
HloInstruction::CreateReshape(ShapeUtil::MakeScalarShape(S32), slice));
const WindowDimension& window_dim = window.dimensions(spatial_dim_index);
DynamicWindowDims dynamic_window_dims = GetWindowedInputGradSize(
dynamic_input_size, window_dim.size(),
window_dim.window_dilation(),
window_dim.base_dilation(),
custom_call_conv->padding_type());
padding_before[spatial_dim_index] = dynamic_window_dims.padding_before;
}
if (custom_call_conv->padding_type() == PaddingType::PADDING_SAME) {
grad = RewriteInputWithDynamicPadding(
custom_call_conv, grad, zero, absl::MakeSpan(padding_before), &window,
[&](int64_t dim) { return dnums.input_spatial_dimensions(dim); });
}
PrecisionConfig precision_config;
if (custom_call_conv->precision_config().operand_precision_size() == 3) {
*precision_config.mutable_operand_precision() = {
custom_call_conv->precision_config().operand_precision().begin() + 1,
custom_call_conv->precision_config().operand_precision().end()};
}
HloInstruction* static_conv =
custom_call_conv->AddInstruction(HloInstruction::CreateConvolve(
custom_call_conv->shape(), grad, kernel,
custom_call_conv->feature_group_count(),
custom_call_conv->batch_group_count(), window,
custom_call_conv->convolution_dimension_numbers(),
custom_call_conv->precision_config()));
TF_RETURN_IF_ERROR(custom_call_conv->ReplaceAllUsesWith(static_conv));
TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(
custom_call_conv, static_conv, {}));
return true;
}
absl::StatusOr<bool> RewriteDynamicConvolutionForward(
HloInstruction* custom_call_conv,
DynamicDimensionInference* dynamic_dimension_inference) {
HloInstruction* input = custom_call_conv->mutable_operand(0);
HloInstruction* kernel = custom_call_conv->mutable_operand(1);
Window window = custom_call_conv->window();
auto dnums = custom_call_conv->convolution_dimension_numbers();
HloInstruction* zero =
custom_call_conv->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(custom_call_conv->shape().element_type())));
std::vector<HloInstruction*> padding_before(
dnums.input_spatial_dimensions_size(), nullptr);
for (int64_t spatial_dim_index = 0;
spatial_dim_index < dnums.input_spatial_dimensions_size();
++spatial_dim_index) {
int64_t input_spatial_dim =
dnums.input_spatial_dimensions(spatial_dim_index);
HloInstruction* operand_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(
custom_call_conv->mutable_operand(0), {}, input_spatial_dim);
if (operand_dynamic_size == nullptr) {
continue;
}
input = PadWithScalar(input, input_spatial_dim, operand_dynamic_size, zero);
const WindowDimension& window_dim = window.dimensions(spatial_dim_index);
DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize(
operand_dynamic_size, window_dim.size(), window_dim.window_dilation(),
window_dim.stride(), custom_call_conv->padding_type());
padding_before[spatial_dim_index] = dynamic_window_dims.padding_before;
}
const int64_t input_feature_dim = dnums.input_feature_dimension();
if (HloInstruction* input_feature_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(
custom_call_conv->mutable_operand(0), {}, input_feature_dim)) {
input = PadWithScalar(input, input_feature_dim, input_feature_dynamic_size,
zero);
}
if (custom_call_conv->padding_type() == PaddingType::PADDING_SAME) {
input = RewriteInputWithDynamicPadding(
custom_call_conv, input, zero, absl::MakeSpan(padding_before), &window,
[&](int64_t dim) { return dnums.input_spatial_dimensions(dim); });
}
HloInstruction* static_conv =
custom_call_conv->AddInstruction(HloInstruction::CreateConvolve(
custom_call_conv->shape(), input, kernel,
custom_call_conv->feature_group_count(),
custom_call_conv->batch_group_count(), window,
custom_call_conv->convolution_dimension_numbers(),
custom_call_conv->precision_config()));
TF_RETURN_IF_ERROR(custom_call_conv->ReplaceAllUsesWith(static_conv));
TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(
custom_call_conv, static_conv, {}));
return true;
}
absl::StatusOr<bool> RewriteDynamicConvolutionKernelGrad(
HloInstruction* custom_call_conv,
DynamicDimensionInference* dynamic_dimension_inference) {
HloInstruction* activations = custom_call_conv->mutable_operand(0);
HloInstruction* gradients = custom_call_conv->mutable_operand(1);
TF_RET_CHECK(dynamic_dimension_inference->HasDynamicDimension(activations));
TF_RET_CHECK(dynamic_dimension_inference->HasDynamicDimension(gradients));
Window window = custom_call_conv->window();
auto dnums = custom_call_conv->convolution_dimension_numbers();
HloInstruction* zero =
custom_call_conv->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(custom_call_conv->shape().element_type())));
std::vector<HloInstruction*> padding_before(
dnums.input_spatial_dimensions_size(), nullptr);
for (int64_t spatial_dim_index = 0;
spatial_dim_index < dnums.input_spatial_dimensions_size();
++spatial_dim_index) {
int64_t input_spatial_dim =
dnums.input_spatial_dimensions(spatial_dim_index);
int64_t kernel_spatial_dim =
dnums.kernel_spatial_dimensions(spatial_dim_index);
HloInstruction* activations_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(
custom_call_conv->mutable_operand(0), {}, input_spatial_dim);
if (activations_dynamic_size != nullptr) {
activations = PadWithScalar(activations, input_spatial_dim,
activations_dynamic_size, zero);
}
HloInstruction* gradients_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(
custom_call_conv->mutable_operand(1), {}, kernel_spatial_dim);
if (gradients_dynamic_size != nullptr) {
gradients = PadWithScalar(gradients, kernel_spatial_dim,
gradients_dynamic_size, zero);
}
if (activations_dynamic_size == nullptr ||
gradients_dynamic_size == nullptr) {
TF_RET_CHECK(activations_dynamic_size == nullptr &&
gradients_dynamic_size == nullptr);
continue;
}
int64_t output_spatial_dim =
dnums.output_spatial_dimensions(spatial_dim_index);
const WindowDimension& window_dim = window.dimensions(spatial_dim_index);
DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize(
activations_dynamic_size,
custom_call_conv->shape().dimensions(output_spatial_dim),
window_dim.stride(),
window_dim.window_dilation(),
custom_call_conv->padding_type());
padding_before[spatial_dim_index] = dynamic_window_dims.padding_before;
}
const int64_t input_feature_dim = dnums.input_feature_dimension();
if (HloInstruction* input_feature_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(
custom_call_conv->mutable_operand(0), {}, input_feature_dim)) {
activations = PadWithScalar(activations, input_feature_dim,
input_feature_dynamic_size, zero);
}
if (custom_call_conv->padding_type() == PaddingType::PADDING_SAME) {
activations = RewriteInputWithDynamicPadding(
custom_call_conv, activations, zero, absl::MakeSpan(padding_before),
&window,
[&](int64_t dim) { return dnums.input_spatial_dimensions(dim); });
}
HloInstruction* static_conv =
custom_call_conv->AddInstruction(HloInstruction::CreateConvolve(
custom_call_conv->shape(), activations, gradients,
custom_call_conv->feature_group_count(),
custom_call_conv->batch_group_count(), window,
custom_call_conv->convolution_dimension_numbers(),
custom_call_conv->precision_config()));
TF_RETURN_IF_ERROR(custom_call_conv->ReplaceAllUsesWith(static_conv));
TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(
custom_call_conv, static_conv, {}));
return true;
}
absl::StatusOr<bool> RewriteDynamicReduceWindowSamePadding(
HloInstruction* hlo,
DynamicDimensionInference* dynamic_dimension_inference) {
if (hlo->shape().IsTuple()) {
return Unimplemented("DynamicReduceWindowSamePadding not yet supported.");
}
HloInstruction* input = hlo->mutable_operand(0);
HloInstruction* init = hlo->mutable_operand(1);
int64_t rank = hlo->shape().rank();
Window window = hlo->window();
std::vector<HloInstruction*> padding_before(hlo->shape().rank(), nullptr);
for (int64_t dim_index = 0; dim_index < rank; ++dim_index) {
HloInstruction* operand_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(hlo->mutable_operand(0), {},
dim_index);
if (operand_dynamic_size == nullptr) {
continue;
}
const WindowDimension& window_dim = window.dimensions(dim_index);
if (window_util::IsTrivialWindowDimension(window_dim)) {
continue;
}
input = PadWithScalar(input, dim_index, operand_dynamic_size, init);
DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize(
operand_dynamic_size, window_dim.size(), window_dim.window_dilation(),
window_dim.stride(), PaddingType::PADDING_SAME);
padding_before[dim_index] = dynamic_window_dims.padding_before;
}
input = RewriteInputWithDynamicPadding(
hlo, input, init, absl::MakeSpan(padding_before), &window,
[](int64_t dim) { return dim; });
HloInstruction* rewritten =
hlo->AddInstruction(HloInstruction::CreateReduceWindow(
hlo->shape(), input, init, window, hlo->called_computations()[0]));
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(rewritten));
TF_RETURN_IF_ERROR(
dynamic_dimension_inference->ForwardDynamicSize(hlo, rewritten, {}));
return true;
}
absl::StatusOr<bool> RewriteDynamicSelectAndScatterSamePadding(
HloInstruction* hlo,
DynamicDimensionInference* dynamic_dimension_inference) {
HloInstruction* input = hlo->mutable_operand(0);
HloInstruction* source = hlo->mutable_operand(1);
HloInstruction* init = hlo->mutable_operand(2);
TF_ASSIGN_OR_RETURN(HloInstruction * input_padding_value,
ChooseIdentityValue(hlo, 0));
int64_t rank = hlo->shape().rank();
Window window = hlo->window();
std::vector<HloInstruction*> padding_before(hlo->shape().rank(), nullptr);
for (int64_t dim_index = 0; dim_index < rank; ++dim_index) {
const WindowDimension& window_dim = window.dimensions(dim_index);
if (window_util::IsTrivialWindowDimension(window_dim)) {
continue;
}
HloInstruction* operand_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(hlo->mutable_operand(0), {},
dim_index);
if (operand_dynamic_size == nullptr) {
continue;
}
input = PadWithScalar(input, dim_index, operand_dynamic_size,
input_padding_value);
HloInstruction* source_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(hlo->mutable_operand(1), {},
dim_index);
if (source_dynamic_size == nullptr) {
continue;
}
source = PadWithScalar(source, dim_index, source_dynamic_size, init);
DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize(
operand_dynamic_size, window_dim.size(), window_dim.window_dilation(),
window_dim.stride(), PaddingType::PADDING_SAME);
padding_before[dim_index] = dynamic_window_dims.padding_before;
}
input = RewriteInputWithDynamicPadding(
hlo, input, input_padding_value, absl::MakeSpan(padding_before), &window,
[](int64_t dim) { return dim; });
HloInstruction* rewritten =
hlo->AddInstruction(HloInstruction::CreateSelectAndScatter(
input->shape(), input, hlo->called_computations()[0], window, source,
init, hlo->called_computations()[1]));
std::vector<HloInstruction*> start_indices(
input->shape().rank(), hlo->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(S32))));
PaddingConfig padding_configs;
for (int64_t dim_index = 0; dim_index < rank; ++dim_index) {
PaddingConfig::PaddingConfigDimension padding_dim;
if (padding_before[dim_index] != nullptr) {
const WindowDimension& window_dim = window.dimensions(dim_index);
const int64_t dilated_window_size = window_util::DilatedBound(
window_dim.size(), window_dim.window_dilation());
padding_dim.set_edge_padding_high(dilated_window_size);
start_indices[dim_index] = padding_before[dim_index];
}
*padding_configs.add_dimensions() = padding_dim;
}
HloInstruction* padded = MakePadHlo(rewritten, init, padding_configs).value();
rewritten = hlo->AddInstruction(HloInstruction::CreateDynamicSlice(
hlo->shape(), padded, start_indices, hlo->shape().dimensions()));
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(rewritten));
TF_RETURN_IF_ERROR(
dynamic_dimension_inference->ForwardDynamicSize(hlo, rewritten, {}));
return true;
}
absl::StatusOr<bool> RewriteDynamicConcat(
HloInstruction* concat,
DynamicDimensionInference* dynamic_dimension_inference) {
const int64_t concat_dim = concat->concatenate_dimension();
if (dynamic_dimension_inference->GetDynamicSize(concat, {}, concat_dim) ==
nullptr) {
return false;
}
std::vector<HloInstruction*> offsets;
offsets.reserve(concat->shape().dimensions_size());
for (int64_t i = 0; i < concat->shape().dimensions_size(); ++i) {
offsets.push_back(concat->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0))));
}
HloInstruction* rewritten_concat = concat;
auto prev_users = concat->users();
for (int64_t i = 0; i < concat->operand_count(); ++i) {
HloInstruction* operand = concat->mutable_operand(i);
rewritten_concat =
concat->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
rewritten_concat->shape(), rewritten_concat, operand, offsets));
HloInstruction* dynamic_size =
dynamic_dimension_inference->GetDynamicSize(operand, {}, concat_dim);
if (dynamic_size == nullptr) {
HloInstruction* static_size = concat->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
operand->shape().dimensions(concat_dim))));
offsets[concat_dim] = concat->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeScalarShape(S32), HloOpcode::kAdd, offsets[concat_dim],
static_size));
} else {
offsets[concat_dim] = concat->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeScalarShape(S32), HloOpcode::kAdd, offsets[concat_dim],
dynamic_size));
}
}
TF_RETURN_IF_ERROR(concat->ReplaceUsesWith(prev_users, rewritten_concat));
TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(
concat, rewritten_concat, {}));
return true;
}
absl::StatusOr<bool> RewriteDynamicSort(
HloInstruction* hlo,
DynamicDimensionInference* dynamic_dimension_inference) {
HloInstruction* dynamic_size = nullptr;
HloSortInstruction* sort = Cast<HloSortInstruction>(hlo);
int64_t sort_dim = sort->sort_dimension();
for (auto* operand : sort->operands()) {
if (dynamic_size == nullptr) {
dynamic_size =
dynamic_dimension_inference->GetDynamicSize(operand, {}, sort_dim);
}
}
if (dynamic_size == nullptr) {
return false;
}
Shape operand_shape =
ShapeUtil::ChangeElementType(sort->operand(0)->shape(), S32);
Shape broadcast_shape = ShapeUtil::MakeStaticShape(operand_shape);
HloInstruction* iota = hlo->AddInstruction(
HloInstruction::CreateIota(broadcast_shape, sort_dim));
HloInstruction* dynamic_size_broadcasted = hlo->AddInstruction(
HloInstruction::CreateBroadcast(broadcast_shape, dynamic_size, {}));
HloInstruction* lt = hlo->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::ChangeElementType(broadcast_shape, PRED), iota,
dynamic_size_broadcasted, ComparisonDirection::kLt));
sort->AppendOperand(lt);
const int64_t param_number_before_rewritten =
sort->called_computations()[0]->num_parameters();
auto new_param_0 = HloInstruction::CreateParameter(
param_number_before_rewritten, ShapeUtil::MakeScalarShape(PRED),
"inbound_lhs");
auto new_param_1 = HloInstruction::CreateParameter(
param_number_before_rewritten + 1, ShapeUtil::MakeScalarShape(PRED),
"inbound_rhs");
std::vector<const HloInstruction*> extra_parameters{new_param_0.get(),
new_param_1.get()};
HloComputation* sort_comp = sort->GetModule()->AddEmbeddedComputation(
sort->called_computations()[0]->CloneWithReplacements(
nullptr, extra_parameters));
auto inbound_lhs =
sort_comp->parameter_instruction(param_number_before_rewritten);
auto inbound_rhs =
sort_comp->parameter_instruction(param_number_before_rewritten + 1);
sort->ReplaceCalledComputations(
[&](HloComputation* comp) { return sort_comp; });
auto out_of_bound_rhs = sort_comp->AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeScalarShape(PRED), HloOpcode::kNot, inbound_rhs));
auto sort_comp_or_out_of_bound_rhs =
sort_comp->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeScalarShape(PRED), HloOpcode::kOr,
sort_comp->root_instruction(), out_of_bound_rhs));
auto new_root = sort_comp->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeScalarShape(PRED), HloOpcode::kAnd, inbound_lhs,
sort_comp_or_out_of_bound_rhs));
sort_comp->set_root_instruction(new_root);
if (sort->shape().IsTuple()) {
*sort->mutable_shape()->add_tuple_shapes() =
ShapeUtil::ChangeElementType(operand_shape, PRED);
} else {
auto sort_users = sort->users();
auto sort_clone = hlo->AddInstruction(sort->Clone());
*sort_clone->mutable_shape() = ShapeUtil::MakeTupleShape(
{sort->shape(), ShapeUtil::ChangeElementType(operand_shape, PRED)});
auto rewritten_sort = hlo->AddInstruction(
HloInstruction::CreateGetTupleElement(sort->shape(), sort_clone, 0));
for (HloInstruction* user : sort_users) {
TF_RETURN_IF_ERROR(sort->ReplaceUseWith(user, rewritten_sort));
}
TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(
sort, rewritten_sort, {}));
if (hlo->parent()->root_instruction() == sort) {
hlo->parent()->set_root_instruction(rewritten_sort);
}
}
return true;
}
absl::StatusOr<bool> RewriteDynamicBinaryOp(
HloInstruction* binary,
DynamicDimensionInference* dynamic_dimension_inference) {
HloInstruction* operand_0 = binary->mutable_operand(0);
HloInstruction* operand_1 = binary->mutable_operand(1);
TF_RET_CHECK(operand_0->shape().rank() == operand_1->shape().rank());
auto dims_0 = dynamic_dimension_inference->GetDynamicSizes(operand_0, {});
auto dims_1 = dynamic_dimension_inference->GetDynamicSizes(operand_1, {});
bool changed = false;
for (int64_t i = 0; i < dims_0.size(); ++i) {
HloInstruction* dim_0 = dims_0[i];
HloInstruction* dim_1 = dims_1[i];
if (dims_0[i] != dims_1[i] && dims_0[i] != nullptr &&
dims_1[i] != nullptr) {
changed = true;
auto rewrite_operand = [&](HloInstruction* pred,
HloInstruction* operand) -> HloInstruction* {
Shape static_shape = ShapeUtil::MakeStaticShape(operand->shape());
pred = binary->AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::ChangeElementType(static_shape, PRED), pred, {}));
Shape slice_shape = static_shape;
slice_shape.set_dimensions(i, 1);
std::vector<int64_t> start_indices(slice_shape.rank(), 0);
std::vector<int64_t> strides(slice_shape.rank(), 1);
HloInstruction* slice = binary->AddInstruction(
HloInstruction::CreateSlice(slice_shape, operand, start_indices,
slice_shape.dimensions(), strides));
Shape reshape_shape = ShapeUtil::DeleteDimension(i, slice_shape);
HloInstruction* reshape = binary->AddInstruction(
HloInstruction::CreateReshape(reshape_shape, slice));
std::vector<int64_t> broadcast_dims;
broadcast_dims.reserve(static_shape.rank() - 1);
for (int64_t j = 0; j < static_shape.rank(); ++j) {
if (j != i) {
broadcast_dims.push_back(j);
}
}
HloInstruction* broadcast = binary->parent()->AddInstruction(
HloInstruction::CreateBroadcast(static_shape, reshape,
broadcast_dims),
"implicit_broadcast");
HloInstruction* select =
binary->AddInstruction(HloInstruction::CreateTernary(
static_shape, HloOpcode::kSelect, pred, broadcast, operand));
return select;
};
HloInstruction* one = binary->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::One(S32)));
auto operand_0_needs_broadcast = binary->parent()->AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), dim_0,
dim_1, ComparisonDirection::kLt),
"lhs_less_than_rhs");
auto is_one = binary->parent()->AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), dim_0,
one, ComparisonDirection::kEq),
"lhs_is_one");
operand_0_needs_broadcast = binary->parent()->AddInstruction(
HloInstruction::CreateBinary(ShapeUtil::MakeShape(PRED, {}),
HloOpcode::kAnd, is_one,
operand_0_needs_broadcast),
"lhs_needs_implicit_broadcast");
operand_0 = rewrite_operand(operand_0_needs_broadcast, operand_0);
auto operand_1_needs_broadcast = binary->parent()->AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), dim_1,
dim_0, ComparisonDirection::kLt),
"rhs_less_than_lhs");
is_one = binary->parent()->AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), dim_1,
one, ComparisonDirection::kEq),
"rhs_is_one");
operand_1_needs_broadcast = binary->parent()->AddInstruction(
HloInstruction::CreateBinary(ShapeUtil::MakeShape(PRED, {}),
HloOpcode::kAnd, is_one,
operand_1_needs_broadcast),
"lhs_needs_implicit_broadcast");
operand_1 = rewrite_operand(operand_1_needs_broadcast, operand_1);
}
}
if (changed) {
TF_RETURN_IF_ERROR(binary->ReplaceOperandWith(0, operand_0));
TF_RETURN_IF_ERROR(binary->ReplaceOperandWith(1, operand_1));
}
return changed;
}
absl::StatusOr<bool> RewriteDynamicUpdateSlice(
HloInstruction* hlo,
DynamicDimensionInference* dynamic_dimension_inference) {
HloDynamicUpdateSliceInstruction* dus =
Cast<HloDynamicUpdateSliceInstruction>(hlo);
HloInstruction* update = dus->mutable_operand(1);
HloInstruction* base = dus->mutable_operand(0);
std::vector<HloInstruction*> dynamic_dims_in_partial_update(
update->shape().rank(), nullptr);
bool needs_rewrite = false;
for (int64_t i = 0; i < update->shape().rank(); ++i) {
if (update->shape().dimensions(i) < base->shape().dimensions(i)) {
HloInstruction* dynamic_dim =
dynamic_dimension_inference->GetDynamicSize(update, {}, i);
if (dynamic_dim != nullptr) {
dynamic_dims_in_partial_update[i] = dynamic_dim;
needs_rewrite = true;
}
}
}
if (!needs_rewrite) {
return false;
}
std::vector<HloInstruction*> indices;
indices.reserve(dus->operand_count() - 2);
for (int64_t i = 2; i < dus->operand_count(); ++i) {
indices.push_back(dus->mutable_operand(i));
}
HloInstruction* base_slice =
dus->AddInstruction(HloInstruction::CreateDynamicSlice(
update->shape(), base, indices, update->shape().dimensions()));
for (int64_t i = 0; i < dynamic_dims_in_partial_update.size(); ++i) {
HloInstruction* dynamic_dim = dynamic_dims_in_partial_update[i];
if (dynamic_dim != nullptr) {
Shape mask_shape_int = ShapeUtil::ChangeElementType(update->shape(), S32);
Shape mask_shape_pred =
ShapeUtil::ChangeElementType(update->shape(), PRED);
HloInstruction* iota =
dus->AddInstruction(HloInstruction::CreateIota(mask_shape_int, i));
HloInstruction* broadcast_dim = dus->AddInstruction(
HloInstruction::CreateBroadcast(mask_shape_int, dynamic_dim, {}));
HloInstruction* pred = dus->AddInstruction(HloInstruction::CreateCompare(
mask_shape_pred, iota, broadcast_dim, ComparisonDirection::kLt));
update = dus->AddInstruction(HloInstruction::CreateTernary(
update->shape(), HloOpcode::kSelect, pred, update, base_slice));
}
}
TF_RETURN_IF_ERROR(dus->ReplaceOperandWith(1, update));
return true;
}
absl::StatusOr<bool> RewriteDynamicReshape(
HloInstruction* reshape,
DynamicDimensionInference* dynamic_dimension_inference) {
bool changed = false;
HloInstruction* operand = reshape->mutable_operand(0);
std::vector<HloInstruction*> input_dynamic_dims;
input_dynamic_dims.reserve(operand->shape().dimensions_size());
for (int64_t dim = 0; dim < operand->shape().dimensions_size(); ++dim) {
input_dynamic_dims.push_back(
dynamic_dimension_inference->GetDynamicSize(operand, {}, dim));
}
std::vector<HloInstruction*> output_dynamic_dims;
output_dynamic_dims.reserve(reshape->shape().dimensions_size());
for (int64_t dim = 0; dim < reshape->shape().dimensions_size(); ++dim) {
output_dynamic_dims.push_back(
dynamic_dimension_inference->GetDynamicSize(reshape, {}, dim));
}
auto common_factors = CommonFactors(operand->shape().dimensions(),
reshape->shape().dimensions());
bool need_flatten_unflatten = false;
auto is_dynamic_dimension = [&](int64_t dim) {
HloInstruction* operand_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(reshape, {}, dim);
return operand_dynamic_size != nullptr ||
reshape->shape().is_dynamic_dimension(dim);
};
auto should_skip_common_factor_group = [&](DimensionVector input_dims,
DimensionVector output_dims) {
if (input_dims.empty() || output_dims.empty()) {
return true;
}
if (absl::c_none_of(output_dims, is_dynamic_dimension)) {
VLOG(2) << "All dimensions are static in this common factor group";
return true;
}
if (input_dims.size() == 1 && output_dims.size() == 1) {
return true;
}
return false;
};
for (int64_t i = 0; i < common_factors.size() - 1; ++i) {
auto start = common_factors[i];
auto end = common_factors[i + 1];
DimensionVector input_dims;
DimensionVector output_dims;
for (int64_t dim = start.first; dim < end.first; ++dim) {
input_dims.push_back(dim);
}
for (int64_t dim = start.second; dim < end.second; ++dim) {
output_dims.push_back(dim);
}
if (should_skip_common_factor_group(input_dims, output_dims)) {
continue;
}
if (input_dims.size() > 1 && output_dims.size() > 1) {
need_flatten_unflatten = true;
break;
}
}
if (need_flatten_unflatten) {
VLOG(2) << "Rewrite dynamic reshape to flatten-unflatten pair. "
<< reshape->ToString();
int64_t num_elements = ShapeUtil::ElementsIn(operand->shape());
Shape flattened_shape =
ShapeUtil::MakeShape(operand->shape().element_type(), {num_elements});
HloInstruction* flatten = operand->parent()->AddInstruction(
HloInstruction::CreateReshape(flattened_shape, operand),
absl::StrCat(reshape->name(), ".flatten"));
HloInstruction* dynamic_size =
operand->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(num_elements)));
for (int64_t i = 0; i < operand->shape().rank(); i++) {
HloInstruction* dynamic_dim_size =
dynamic_dimension_inference->GetDynamicSize(operand, {}, i);
if (dynamic_dim_size != nullptr) {
HloInstruction* static_dim_size = operand->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
operand->shape().dimensions(i))));
dynamic_size = operand->AddInstruction(HloInstruction::CreateBinary(
dynamic_size->shape(), HloOpcode::kDivide, dynamic_size,
static_dim_size));
dynamic_size = operand->AddInstruction(HloInstruction::CreateBinary(
dynamic_size->shape(), HloOpcode::kMultiply, dynamic_size,
dynamic_dim_size));
}
}
dynamic_dimension_inference->SetDynamicSize(flatten, {}, 0, dynamic_size);
Shape unflattened_shape = ShapeUtil::MakeStaticShape(reshape->shape());
HloInstruction* unflatten = reshape->parent()->AddInstruction(
HloInstruction::CreateReshape(unflattened_shape, flatten),
absl::StrCat(reshape->name(), ".unflatten"));
TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(
reshape, unflatten, {}));
TF_ASSIGN_OR_RETURN(
bool changed_unused,
RewriteDynamicReshape(flatten, dynamic_dimension_inference));
TF_ASSIGN_OR_RETURN(
changed_unused,
RewriteDynamicReshape(unflatten, dynamic_dimension_inference));
TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(
reshape, unflatten, {}));
TF_RETURN_IF_ERROR(reshape->ReplaceAllUsesWith(unflatten));
return true;
}
for (int64_t i = 0; i < common_factors.size() - 1; ++i) {
auto start = common_factors[i];
auto end = common_factors[i + 1];
DimensionVector input_dims;
DimensionVector output_dims;
for (int64_t dim = start.first; dim < end.first; ++dim) {
input_dims.push_back(dim);
}
for (int64_t dim = start.second; dim < end.second; ++dim) {
output_dims.push_back(dim);
}
VLOG(2) << "input_dims: " << VectorString(input_dims);
VLOG(2) << "output_dims: " << VectorString(output_dims);
if (should_skip_common_factor_group(input_dims, output_dims)) {
continue;
}
if (input_dims.size() > 1 && output_dims.size() > 1) {
return Internal(
"Should be handled by decomposing reshape into "
"flatten-unflatten pair. %s",
reshape->ToString());
}
TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicReshapeSingleGroup(
reshape, input_dims, output_dims,
absl::MakeSpan(input_dynamic_dims),
absl::MakeSpan(output_dynamic_dims),
dynamic_dimension_inference));
changed |= c;
}
if (reshape->opcode() == HloOpcode::kDynamicReshape) {
auto* static_reshape =
reshape->AddInstruction(HloInstruction::CreateReshape(
reshape->shape(), reshape->mutable_operand(0)));
TF_RETURN_IF_ERROR(reshape->ReplaceAllUsesWith(static_reshape));
TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(
reshape, static_reshape, {}));
changed = true;
}
return changed;
}
class DynamicShapeRemovingVisitor : public DfsHloRewriteVisitor {
public:
explicit DynamicShapeRemovingVisitor(
const OpSupportsDynamismHandler& op_supports_dynamism_handler,
DynamicDimensionInference* dynamic_dimension_inference,
const absl::flat_hash_set<absl::string_view>& execution_threads)
: op_supports_dynamism_handler_(op_supports_dynamism_handler),
dynamic_dimension_inference_(dynamic_dimension_inference),
execution_threads_(execution_threads) {}
absl::Status DefaultAction(HloInstruction* hlo) override;
absl::Status HandleCustomCall(HloInstruction* hlo) override;
absl::Status HandleTuple(HloInstruction* hlo) override;
absl::Status HandleGetTupleElement(HloInstruction* hlo) override;
absl::Status HandleParameter(HloInstruction* hlo) override;
absl::Status HandleInfeed(HloInstruction* hlo) override;
absl::Status HandleAsyncStart(HloInstruction* hlo) override;
absl::Status HandleAsyncUpdate(HloInstruction* hlo) override;
absl::Status HandleAsyncDone(HloInstruction* hlo) override;
absl::Status HandleWhile(HloInstruction* hlo) override;
absl::Status HandleConditional(HloInstruction* hlo) override;
absl::Status HandleGetDimensionSize(HloInstruction* hlo) override;
absl::Status HandleSetDimensionSize(HloInstruction* hlo) override;
static absl::StatusOr<bool> Run(
HloComputation* computation,
const OpSupportsDynamismHandler& op_supports_dynamism_handler,
DynamicDimensionInference* dynamic_shape_inference,
const absl::flat_hash_set<absl::string_view>& execution_threads,
bool require_dynamic_output) {
DynamicShapeRemovingVisitor visitor(op_supports_dynamism_handler,
dynamic_shape_inference,
execution_threads);
TF_RETURN_IF_ERROR(computation->Accept(&visitor));
if (require_dynamic_output) {
HloInstruction* root = computation->root_instruction();
if (dynamic_shape_inference->HasDynamicDimension(root)) {
TF_ASSIGN_OR_RETURN(HloInstruction * new_root,
visitor.ConvertToDynamic(root));
computation->set_root_instruction(new_root);
}
}
return visitor.changed();
}
private:
absl::StatusOr<HloInstruction*> ConvertToDynamic(HloInstruction* inst);
absl::Status ConvertOperandsToDynamic(HloInstruction* inst);
const OpSupportsDynamismHandler& op_supports_dynamism_handler_;
DynamicDimensionInference* dynamic_dimension_inference_;
absl::flat_hash_set<absl::string_view> execution_threads_;
};
absl::StatusOr<HloInstruction*> DynamicShapeRemovingVisitor::ConvertToDynamic(
HloInstruction* inst) {
if (!dynamic_dimension_inference_->HasDynamicDimension(inst)) {
return absl::OkStatus();
}
MarkAsChanged();
Shape shape = dynamic_dimension_inference_->GetDynamicShape(inst);
auto gtes = TupleUtil::DisassembleTupleInstruction(inst);
gtes.ForEachMutableElement([&](const ShapeIndex& index,
HloInstruction** element) {
const Shape& subshape = ShapeUtil::GetSubshape(shape, index);
if (!subshape.IsArray()) {
return;
}
if (!dynamic_dimension_inference_->HasDynamicDimension(inst, index)) {
return;
}
std::vector<HloInstruction*> slice_operand;
slice_operand.push_back(*element);
for (int64_t i = 0; i < subshape.dimensions_size(); ++i) {
auto dimension_size =
dynamic_dimension_inference_->GetDynamicSize(inst, index, i);
if (dimension_size == nullptr) {
dimension_size = inst->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(subshape.dimensions(i))));
}
slice_operand.push_back(dimension_size);
}
*element = inst->AddInstruction(HloInstruction::CreateCustomCall(
subshape, slice_operand, "SliceToDynamic"));
});
return TupleUtil::AssembleTupleInstruction(inst->parent(), std::move(gtes));
}
absl::Status DynamicShapeRemovingVisitor::ConvertOperandsToDynamic(
HloInstruction* inst) {
for (int64_t i = 0; i < inst->operand_count(); ++i) {
auto operand = inst->mutable_operand(i);
if (dynamic_dimension_inference_->HasDynamicDimension(operand)) {
TF_ASSIGN_OR_RETURN(auto dynamic_operand,
ConvertToDynamic(inst->mutable_operand(i)));
TF_RETURN_IF_ERROR(inst->ReplaceOperandWith(i, dynamic_operand));
MarkAsChanged();
}
}
return absl::OkStatus();
}
absl::Status DynamicShapeRemovingVisitor::DefaultAction(HloInstruction* hlo) {
OpDynamismSupport op_support = OpDynamismSupport::kNoSupport;
if (op_supports_dynamism_handler_) {
op_support = op_supports_dynamism_handler_(hlo);
}
if (op_support == OpDynamismSupport::kRequired) {
VLOG(1) << "op doesn't support static tensor: " << hlo->ToString();
return ConvertOperandsToDynamic(hlo);
}
const bool input_is_dynamic = absl::c_any_of(
hlo->operands(),
[](const HloInstruction* hlo) { return hlo->shape().is_dynamic(); });
if (!input_is_dynamic) {
return absl::OkStatus();
}
TF_RET_CHECK(op_support != OpDynamismSupport::kNoSupport)
<< "Dynamic input unexpectedly found for unsupported instruction: "
<< hlo->ToString();
return absl::OkStatus();
}
absl::Status DynamicShapeRemovingVisitor::HandleGetTupleElement(
HloInstruction* hlo) {
return absl::OkStatus();
}
absl::Status DynamicShapeRemovingVisitor::HandleTuple(HloInstruction* hlo) {
return absl::OkStatus();
}
absl::Status DynamicShapeRemovingVisitor::HandleInfeed(HloInstruction* hlo) {
return absl::OkStatus();
}
absl::Status DynamicShapeRemovingVisitor::HandleParameter(HloInstruction* hlo) {
return absl::OkStatus();
}
absl::Status DynamicShapeRemovingVisitor::HandleCustomCall(
HloInstruction* hlo) {
if (hlo->custom_call_target() == "SliceToDynamic" ||
hlo->custom_call_target() == "PadToStatic") {
return absl::OkStatus();
}
return DefaultAction(hlo);
}
absl::Status DynamicShapeRemovingVisitor::HandleAsyncStart(
HloInstruction* hlo) {
if (HloInstruction::IsThreadIncluded(hlo->async_execution_thread(),
execution_threads_)) {
return absl::OkStatus();
}
return ConvertOperandsToDynamic(hlo);
}
absl::Status DynamicShapeRemovingVisitor::HandleAsyncUpdate(
HloInstruction* hlo) {
return absl::OkStatus();
}
absl::Status DynamicShapeRemovingVisitor::HandleAsyncDone(HloInstruction* hlo) {
return absl::OkStatus();
}
absl::Status DynamicShapeRemovingVisitor::HandleWhile(HloInstruction* hlo) {
return absl::OkStatus();
}
absl::Status DynamicShapeRemovingVisitor::HandleConditional(
HloInstruction* hlo) {
return absl::OkStatus();
}
absl::Status DynamicShapeRemovingVisitor::HandleGetDimensionSize(
HloInstruction* hlo) {
return absl::OkStatus();
}
absl::Status DynamicShapeRemovingVisitor::HandleSetDimensionSize(
HloInstruction* hlo) {
*hlo->mutable_shape() = hlo->operand(0)->shape();
hlo->mutable_shape()->set_dynamic_dimension(hlo->dimension(), false);
return absl::OkStatus();
}
}
absl::StatusOr<bool> DynamicPadder::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "Pre DynamicPadder HLO:";
XLA_VLOG_LINES(2, module->ToString());
HloDCE dce;
TF_ASSIGN_OR_RETURN(bool changed, dce.Run(module, execution_threads));
TF_ASSIGN_OR_RETURN(
DynamicDimensionInference dynamic_dimension_inference,
DynamicDimensionInference::Run(
module, options_.op_supports_dynamism_handler,
options_.custom_call_handler, options_.shape_check_mode,
options_.assertion_generator, execution_threads));
changed |= dynamic_dimension_inference.changed();
std::vector<HloComputation*> computations =
module->MakeComputationPostOrder(execution_threads);
for (HloComputation* computation : computations) {
for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {
OpDynamismSupport has_dynamism_support = OpDynamismSupport::kNoSupport;
if (options_.op_supports_dynamism_handler != nullptr) {
has_dynamism_support = options_.op_supports_dynamism_handler(inst);
}
if (has_dynamism_support != OpDynamismSupport::kNoSupport) {
continue;
}
if (inst->opcode() == HloOpcode::kConcatenate) {
TF_ASSIGN_OR_RETURN(
bool c, RewriteDynamicConcat(inst, &dynamic_dimension_inference));
changed |= c;
continue;
}
if (inst->opcode() == HloOpcode::kReverse) {
TF_ASSIGN_OR_RETURN(bool c,
RewriteReverse(inst, &dynamic_dimension_inference));
changed |= c;
continue;
}
if (inst->opcode() == HloOpcode::kSort) {
TF_ASSIGN_OR_RETURN(
bool c, RewriteDynamicSort(inst, &dynamic_dimension_inference));
changed |= c;
continue;
}
if (inst->opcode() == HloOpcode::kReshape ||
inst->opcode() == HloOpcode::kDynamicReshape) {
TF_ASSIGN_OR_RETURN(
bool c, RewriteDynamicReshape(inst, &dynamic_dimension_inference));
changed |= c;
continue;
}
if (inst->IsElementwiseBinary()) {
TF_ASSIGN_OR_RETURN(
bool c, RewriteDynamicBinaryOp(inst, &dynamic_dimension_inference));
changed |= c;
continue;
}
if (inst->opcode() == HloOpcode::kDynamicUpdateSlice) {
TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicUpdateSlice(
inst, &dynamic_dimension_inference));
changed |= c;
continue;
}
if (inst->IsCustomCall("DynamicConvolutionInputGrad")) {
TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicConvolutionInputGrad(
inst, &dynamic_dimension_inference));
changed |= c;
continue;
}
if (inst->IsCustomCall("DynamicConvolutionForward")) {
TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicConvolutionForward(
inst, &dynamic_dimension_inference));
changed |= c;
continue;
}
if (inst->IsCustomCall("DynamicConvolutionKernelGrad")) {
TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicConvolutionKernelGrad(
inst, &dynamic_dimension_inference));
changed |= c;
continue;
}
if (inst->IsCustomCall("DynamicReduceWindowSamePadding")) {
TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicReduceWindowSamePadding(
inst, &dynamic_dimension_inference));
changed |= c;
continue;
}
if (inst->IsCustomCall("DynamicSelectAndScatterSamePadding")) {
TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicSelectAndScatterSamePadding(
inst, &dynamic_dimension_inference));
changed |= c;
continue;
}
for (int64_t operand_num = 0; operand_num < inst->operand_count();
++operand_num) {
HloInstruction* original_operand = inst->mutable_operand(operand_num);
HloInstruction* operand = original_operand;
if (!operand->shape().IsArray()) {
continue;
}
for (int64_t input_dim = 0; input_dim < operand->shape().rank();
++input_dim) {
HloInstruction* operand_dynamic_size =
dynamic_dimension_inference.GetDynamicSize(original_operand, {},
input_dim);
if (operand_dynamic_size == nullptr) {
continue;
}
VLOG(2) << "Has dynamic dimension of operand" << operand_num << " @"
<< input_dim;
if (ShouldSkipPadOnOperand(inst, operand_num, input_dim,
execution_threads)) {
continue;
}
TF_ASSIGN_OR_RETURN(HloInstruction * identity_value,
ChooseIdentityValue(inst, operand_num));
if (identity_value == nullptr) {
continue;
}
HloInstruction* padded = PadWithScalar(
operand, input_dim, operand_dynamic_size, identity_value);
TF_RETURN_IF_ERROR(inst->ReplaceOperandWith(operand_num, padded));
operand = inst->mutable_operand(operand_num);
changed = true;
}
}
}
}
auto call_graph = CallGraph::Build(module, execution_threads);
computations = module->MakeComputationPostOrder(execution_threads);
for (auto it = computations.rbegin(); it != computations.rend(); ++it) {
HloComputation* computation = *it;
if (!call_graph->CanReach(module->entry_computation(), computation)) {
continue;
}
bool require_dynamic_output = options_.slice_dynamic_output &&
computation == module->entry_computation();
changed |= require_dynamic_output;
TF_ASSIGN_OR_RETURN(bool c,
DynamicShapeRemovingVisitor::Run(
computation, options_.op_supports_dynamism_handler,
&dynamic_dimension_inference, execution_threads,
require_dynamic_output));
changed |= c;
}
if (changed) {
dynamic_padding_gauge->GetCell()->Set(changed);
module->set_is_dynamic(true);
}
for (auto* computation : module->computations(execution_threads)) {
if (!call_graph->CanReach(module->entry_computation(), computation)) {
continue;
}
for (auto instruction : computation->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(
bool c, ReplaceGetSize(instruction, &dynamic_dimension_inference));
changed |= c;
}
}
for (auto* computation : module->computations(execution_threads)) {
if (!call_graph->CanReach(module->entry_computation(), computation)) {
continue;
}
for (auto instruction : computation->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool c, ReplaceSetSize(instruction));
changed |= c;
TF_ASSIGN_OR_RETURN(c, ReplaceSetBound(instruction));
changed |= c;
}
}
if (changed) {
HloDCE dce;
TF_ASSIGN_OR_RETURN(bool c, dce.Run(module, execution_threads));
changed |= c;
}
VLOG(2) << "Post DynamicPadder HLO:";
XLA_VLOG_LINES(2, module->ToString());
return changed;
}
} | #include "xla/service/dynamic_padder.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_replace.h"
#include "absl/types/span.h"
#include "xla/error_spec.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/dynamic_dimension_inference.h"
#include "xla/service/dynamic_dimension_simplifier.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/llvm_irgen_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace xla {
namespace {
namespace m = ::xla::match;
namespace op = xla::testing::opcode_matchers;
OpDynamismSupport OpHasDynamismSupport(HloInstruction* hlo) {
if (hlo->opcode() != HloOpcode::kCustomCall) {
return OpDynamismSupport::kNoSupport;
}
if (hlo->custom_call_target() == "OpWithDynamicLowering") {
return OpDynamismSupport::kRequired;
}
return OpDynamismSupport::kNoSupport;
}
absl::Status CustomCallDynamicDimensionInference(
HloInstruction* hlo, DynamicDimensionInference* inferencer) {
if (hlo->custom_call_target() == "OpWithDynamicLowering") {
if (hlo->shape().IsTuple()) {
HloInstruction* dynamic_size =
inferencer->GetDynamicSize(hlo->mutable_operand(0), {1}, 0);
inferencer->SetDynamicSize(hlo, {1}, 0, dynamic_size);
} else {
HloInstruction* dynamic_size =
inferencer->GetDynamicSize(hlo->mutable_operand(0), {}, 0);
inferencer->SetDynamicSize(hlo, {}, 0, dynamic_size);
}
}
return absl::OkStatus();
}
class DynamicPadderTest : public HloTestBase {
protected:
DynamicPadderTest() : HloTestBase() { module_ = CreateNewVerifiedModule(); }
std::unique_ptr<HloModule> GetHloModule(const std::string& hlo_text) {
std::unique_ptr<HloModule> module =
ParseAndReturnVerifiedModule(hlo_text).value();
return module;
}
absl::StatusOr<bool> RunPadder(
bool slice_dynamic_output = false,
OpSupportsDynamismHandler op_supports_dynamism_handler =
OpHasDynamismSupport,
DynamicDimensionInference::CustomCallInferenceHandler
custom_call_handler = CustomCallDynamicDimensionInference) {
DynamicPadderOptions options;
options.slice_dynamic_output = slice_dynamic_output;
options.op_supports_dynamism_handler =
std::move(op_supports_dynamism_handler);
options.custom_call_handler = std::move(custom_call_handler);
DynamicPadder padder(std::move(options));
TF_ASSIGN_OR_RETURN(bool changed, RunHloPass(&padder, module_.get()));
if (!changed) return false;
TupleSimplifier tuple_simplifier;
TF_RETURN_IF_ERROR(RunHloPass(&tuple_simplifier, module_.get()).status());
AlgebraicSimplifier alg_simplifier(AlgebraicSimplifierOptions{});
TF_RETURN_IF_ERROR(RunHloPass(&alg_simplifier, module_.get()).status());
return true;
}
void ExpectPadded(const HloInstruction* inst) {
EXPECT_THAT(inst,
op::Select(op::Lt(op::Iota(), op::Broadcast(op::Parameter())),
::testing::_, op::Broadcast()));
}
HloComputation* GetScalarAddComputation() {
auto embedded_builder = HloComputation::Builder("add");
auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "lhs"));
auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "rhs"));
embedded_builder.AddInstruction(
HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));
return module_->AddEmbeddedComputation(embedded_builder.Build());
}
std::unique_ptr<HloModule> module_;
const Shape scalar_shape_ = ShapeUtil::MakeShape(S32, {});
};
class MemoryAlignmentTest : public HloTestBase {};
TEST_F(MemoryAlignmentTest, DISABLED_ON_CPU(TestDataTypeFP16)) {
const std::string hlo_text = R"(
HloModule TestDataTypeFP16
update_add (p0: f16[], p1: f16[]) -> f16[] {
p0 = f16[] parameter(0)
p1 = f16[] parameter(1)
ROOT out = f16[] add(p0, p1)
}
ENTRY main () -> f16[<=1,1] {
c1 = s32[1]{0} constant({1})
c2 = f16[1,1]{1,0} constant({ {0.099976} })
shape = s32[] reshape(s32[1]{0} c1)
dim_size = f16[<=1,1]{1,0} set-dimension-size(f16[1,1]{1,0} c2, s32[] shape),
dimensions={0}
ROOT out = f16[<=1,1]{1,0} scatter(f16[<=1,1]{1,0} dim_size, s32[1]{0} c1, f16[1,1]{1,0} c2),
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
to_apply=update_add
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
}
TEST_F(DynamicPadderTest, ReduceTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});
auto reduce_shape = ShapeUtil::MakeShape(F32, {2});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, false, true});
auto data_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "data_param"));
auto* size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param, size_param, 2));
auto negate = builder.AddInstruction(HloInstruction::CreateUnary(
dynamic_shape, HloOpcode::kNegate, data_param));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto reduce = builder.AddInstruction(HloInstruction::CreateReduce(
reduce_shape, negate, init, {0, 2}, GetScalarAddComputation()));
EXPECT_FALSE(module_->is_dynamic());
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunPadder().status());
ExpectPadded(reduce->operand(0));
EXPECT_TRUE(module_->is_dynamic());
}
TEST_F(DynamicPadderTest, DynamicLoweringTest) {
const std::string hlo_text = R"(
HloModule DynamicLowering
ENTRY main {
param = s32[5] parameter(0)
const = s32[] constant(3)
param_padded = s32[<=5] set-dimension-size(param, const),
dimensions={0}
custom-call.1 = s32[<=5] custom-call(param_padded),
custom_call_target="OpWithDynamicLowering"
custom-call.2 = s32[<=5] custom-call(custom-call.1),
custom_call_target="OpWithDynamicLowering"
ROOT negate = s32[<=5] negate(custom-call.2)
}
)";
module_ = GetHloModule(hlo_text);
TF_ASSERT_OK(RunPadder(true).status());
auto custom_call_1 =
module_->entry_computation()->GetInstructionWithName("custom-call.1");
auto custom_call_2 =
module_->entry_computation()->GetInstructionWithName("custom-call.2");
HloInstruction* slice_to_dynamic = custom_call_1->mutable_operand(0);
ASSERT_THAT(slice_to_dynamic->opcode(), HloOpcode::kCustomCall);
ASSERT_THAT(slice_to_dynamic->custom_call_target(), "SliceToDynamic");
ASSERT_EQ(custom_call_2->user_count(), 1);
HloInstruction* pad_to_static = custom_call_2->users()[0];
ASSERT_THAT(pad_to_static->opcode(), HloOpcode::kCustomCall);
ASSERT_THAT(pad_to_static->custom_call_target(), "PadToStatic");
slice_to_dynamic = module_->entry_computation()->root_instruction();
ASSERT_THAT(slice_to_dynamic->opcode(), HloOpcode::kCustomCall);
ASSERT_THAT(slice_to_dynamic->custom_call_target(), "SliceToDynamic");
}
TEST_F(DynamicPadderTest, DynamicLoweringTestTupleInput) {
const std::string hlo_text = R"(
HloModule DynamicLowering
ENTRY main {
param = s32[5] parameter(0)
const = s32[] constant(3)
param_padded = s32[<=5] set-dimension-size(param, const),
dimensions={0}
tuple_arg = (s32[], s32[<=5]) tuple(const, param_padded)
custom-call.1 = (s32[], s32[<=5]) custom-call(tuple_arg),
custom_call_target="OpWithDynamicLowering"
custom-call.2 = (s32[], s32[<=5]) custom-call(custom-call.1),
custom_call_target="OpWithDynamicLowering"
data = s32[<=5]{0} get-tuple-element(custom-call.2), index=1
ROOT negate = s32[<=5] negate(data)
}
)";
module_ = GetHloModule(hlo_text);
TF_ASSERT_OK(RunPadder(true).status());
auto* root = module_->entry_computation()->root_instruction();
EXPECT_THAT(root, op::CustomCall(
{"SliceToDynamic"}, op::Negate(),
op::GetTupleElement(op::CustomCall({"PadToStatic"}))));
HloInstruction* negate = root->mutable_operand(0);
EXPECT_THAT(
negate,
op::Negate(op::GetTupleElement(op::CustomCall(
{"PadToStatic"}, op::GetTupleElement(op::CustomCall(
{"OpWithDynamicLowering"}, ::testing::_))))));
auto custom_call_1 =
module_->entry_computation()->GetInstructionWithName("custom-call.1");
EXPECT_THAT(custom_call_1,
op::CustomCall({"OpWithDynamicLowering"},
op::Tuple(op::Constant(),
op::CustomCall({"SliceToDynamic"}))));
}
TEST_F(DynamicPadderTest, DynamicOutputNestedTuple) {
const std::string hlo_text = R"(
HloModule DynamicLowering
ENTRY main {
param = s32[5] parameter(0)
const = s32[] constant(3)
const2 = s32[] constant(4)
param_padded = s32[<=5] set-dimension-size(param, const),
dimensions={0}
tuple0 = (s32[], s32[<=5]) tuple(const, param_padded)
ROOT tuple1 = (s32[], (s32[], s32[<=5])) tuple(const2, tuple0)
}
)";
module_ = GetHloModule(hlo_text);
TF_ASSERT_OK(RunPadder(true).status());
TF_ASSERT_OK(TupleSimplifier().Run(module_.get()).status());
XLA_LOG_LINES(INFO, module_->ToString());
auto* root = module_->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Tuple(op::Constant(), op::Tuple()));
HloInstruction* nested_tuple = root->mutable_operand(1);
EXPECT_THAT(nested_tuple,
op::Tuple(op::Constant(), op::CustomCall({"SliceToDynamic"})));
}
TEST_F(DynamicPadderTest, ConvolutionTest) {
auto builder = HloComputation::Builder(TestName());
constexpr int xdim = 3;
constexpr int ydim = 2;
constexpr int zdim = 1;
auto xy_shape = ShapeUtil::MakeShape(F32, {xdim, ydim});
auto yz_shape = ShapeUtil::MakeShape(F32, {ydim, zdim});
auto zx_shape = ShapeUtil::MakeShape(F32, {zdim, xdim});
auto xy_shape_dynamic =
ShapeUtil::MakeShape(F32, {xdim, ydim}, {false, true});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, xy_shape, "A"));
auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, yz_shape, "B"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers(0);
dnums.set_kernel_input_feature_dimension(0);
dnums.set_kernel_output_feature_dimension(1);
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(1);
dnums.set_output_feature_dimension(0);
Window window;
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
xy_shape_dynamic, a_param, size_param, 1));
auto* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
zx_shape, a_param, b_param, 1,
1, window, dnums,
HloTestBase::DefaultPrecisionConfig(2)));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunPadder().status());
ExpectPadded(conv->operand(0));
}
TEST_F(DynamicPadderTest, ConvolutionNoPad) {
auto builder = HloComputation::Builder(TestName());
constexpr int xdim = 3;
constexpr int ydim = 2;
constexpr int zdim = 1;
auto xy_shape = ShapeUtil::MakeShape(F32, {xdim, ydim});
auto yz_shape = ShapeUtil::MakeShape(F32, {ydim, zdim});
auto zx_shape = ShapeUtil::MakeShape(F32, {zdim, xdim}, {false, true});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {xdim, ydim}, {true, false});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, xy_shape, "A"));
auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, yz_shape, "B"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param, 0));
auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers(0);
dnums.set_kernel_input_feature_dimension(0);
dnums.set_kernel_output_feature_dimension(1);
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(1);
dnums.set_output_feature_dimension(0);
Window window;
auto* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
zx_shape, a_param, b_param, 1,
1, window, dnums,
HloTestBase::DefaultPrecisionConfig(2)));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunPadder().status());
EXPECT_THAT(conv->operand(0), op::Parameter());
}
TEST_F(DynamicPadderTest, ReduceWindowNoPadForTrivialWindow) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {4, 5});
auto reduce_shape = ShapeUtil::MakeShape(F32, {3, 5}, {false, true});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {4, 5}, {false, true});
auto input = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "input"));
auto* size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
input = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, input, size_param, 1));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
TF_ASSERT_OK_AND_ASSIGN(Window window, ParseWindow("size=2x1 pad=0_0x0_0"));
auto output = builder.AddInstruction(HloInstruction::CreateReduceWindow(
reduce_shape, input, init, window, GetScalarAddComputation()));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunPadder().status());
EXPECT_THAT(output->operand(0), op::Parameter());
}
TEST_F(DynamicPadderTest, VariadicReduceWindowNoPadForTrivialWindow) {
const std::string hlo_text = R"(
HloModule VariadicReduceWindowNoPadForTrivialWindow
add_f32 (a: f32[], b: s32[], c: f32[], d: s32[]) -> (f32[], s32[]) {
a = f32[] parameter(0)
b = s32[] parameter(1)
c = f32[] parameter(2)
d = s32[] parameter(3)
add.0 = f32[] add(a, c)
add.1 = s32[] add(b, d)
ROOT out = tuple(add.0, add.1)
}
ENTRY main {
input.0 = f32[4, 5] parameter(0)
input.1 = s32[4, 5] parameter(1)
size_param.0 = s32[] parameter(2)
size_param.1 = s32[] parameter(3)
input_dynamic.0 = f32[4,<=5] set-dimension-size(input.0, size_param.0), dimensions={1}
input_dynamic.1 = s32[4,<=5] set-dimension-size(input.1, size_param.0), dimensions={1}
init.0 = f32[] constant(0.0)
init.1 = s32[] constant(0)
ROOT output = (f32[3, <=5], s32[3, <=5]) reduce-window(input_dynamic.0, input_dynamic.1, init.0, init.1), window={size=2x1 pad=0_0x0_0}, to_apply=add_f32
}
)";
const int kNumParams = 2;
module_ = ParseAndReturnVerifiedModule(hlo_text).value();
TF_ASSERT_OK(RunPadder().status());
for (int i = 0; i < kNumParams; ++i) {
EXPECT_THAT(module_->entry_computation()->root_instruction()->operand(i),
op::Parameter());
}
}
TEST_F(DynamicPadderTest, PadS8ToS32Dot) {
const std::string hlo_text = R"(
HloModule test
ENTRY test {
a = s8[<=16,32] parameter(0)
b = s8[32,64] parameter(1)
ROOT root = s32[<=16,64] dot(a, b), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
module_ = GetHloModule(hlo_text);
TF_ASSERT_OK(RunPadder(true).status());
EXPECT_THAT(module_->entry_computation()->root_instruction(),
GmockMatch(m::CustomCall({"SliceToDynamic"},
m::Dot(m::Op().WithShape(S8, {16, 32}),
m::Op().WithShape(S8, {32, 64}))
.WithShape(S32, {16, 64}),
m::Op(), m::Op())));
}
TEST_F(DynamicPadderTest, PadToStaticForCustomCall) {
const std::string hlo_text = R"(
HloModule test
ENTRY test {
a = f32[64] parameter(0)
ROOT c = f32[<=128] custom-call(a),
custom_call_target="UnknownOp"
}
)";
module_ = GetHloModule(hlo_text);
TF_ASSERT_OK(RunPadder(true).status());
EXPECT_THAT(module_->entry_computation()->root_instruction(),
GmockMatch(m::CustomCall({"UnknownOp"})));
}
TEST_F(DynamicPadderTest, WhileLoopDynamicShapeChangeToStatic) {
const std::string hlo_text = R"(
HloModule WhileLoopDynamicShapeChangeToStatic
%cond_wrapper.19447 {
param = (s32[], s32[], f32[], f32[<=32,216]{1,0}) parameter(0)
%get-tuple-element.184 = s32[] get-tuple-element(param), index=0
%get-tuple-element.185 = s32[] get-tuple-element(param), index=1
ROOT %compare.28 = pred[] compare(s32[] %get-tuple-element.184, s32[] %get-tuple-element.185), direction=LT
}
%while_body_78894_grad_83711__.18882 {
param = (s32[], s32[], f32[], f32[<=32,216]{1,0}) parameter(0)
%get-tuple-element.184 = s32[] get-tuple-element(param), index=0
%get-tuple-element.185 = s32[] get-tuple-element(param), index=1
%add.1 = s32[] add(get-tuple-element.184, get-tuple-element.184)
%gte.2 = f32[] get-tuple-element(param), index=2
%broadcast.19389 = f32[32,216]{1,0} broadcast(f32[] %gte.2), dimensions={}
%constant.32 = s32[] constant(32)
%set-dimension-size = f32[<=32,216]{1,0} set-dimension-size(f32[32,216]{1,0} %broadcast.19389, s32[] %constant.32), dimensions={0}
ROOT tuple = (s32[], s32[], f32[], f32[<=32,216]{1,0}) tuple(add.1, %get-tuple-element.185, %gte.2, %set-dimension-size)
}
ENTRY main {
param = f32[] parameter(0)
param.1 = f32[<=32,216]{1,0} parameter(1)
const = s32[] constant(3)
const2 = s32[] constant(4)
%tuple.18877 = (s32[], s32[], f32[], f32[<=32,216]{1,0}) tuple(const, const2, param, param.1)
%while.19451 = (s32[], s32[], f32[], f32[<=32,216]{1,0})
while((s32[], s32[], f32[], f32[<=32,216]{1,0})
%tuple.18877), condition=%cond_wrapper.19447, body=%while_body_78894_grad_83711__.18882
ROOT result = f32[<=32,216]{1,0} get-tuple-element(while.19451), index=3
}
)";
module_ = GetHloModule(hlo_text);
TF_ASSERT_OK(RunPadder(true).status());
XLA_LOG_LINES(INFO, module_->ToString());
auto* root = module_->entry_computation()->root_instruction();
EXPECT_EQ(root->shape(), ShapeUtil::MakeShape(F32, {32, 216}, {true, false}));
HloInstruction* while_inst = nullptr;
for (HloInstruction* inst :
module_->entry_computation()->MakeInstructionPostOrder()) {
if (inst->opcode() == HloOpcode::kWhile) {
ASSERT_EQ(while_inst, nullptr)
<< "while_inst: " << while_inst->name() << ", inst: " << inst->name();
while_inst = inst;
}
}
EXPECT_EQ(while_inst->shape(),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeScalarShape(S32),
ShapeUtil::MakeScalarShape(S32),
ShapeUtil::MakeScalarShape(F32),
ShapeUtil::MakeShape(F32, {32, 216}),
ShapeUtil::MakeScalarShape(S32)}));
}
TEST_F(DynamicPadderTest, WhileLoopCarriesRequiredDynamicShape) {
const std::string hlo_text = R"(
HloModule WhileLoopCarriesRequiredDynamicShape
%cond {
param = (f32[1024], f32[<=64], f32[32], f32[<=64], f32[32], s32[], s32[], token[]) parameter(0)
current = s32[] get-tuple-element(param), index=5
last = s32[] get-tuple-element(param), index=6
ROOT result = pred[] compare(current, last), direction=LT
}
%body {
param = (f32[1024], f32[<=64], f32[32], f32[<=64], f32[32], s32[], s32[], token[]) parameter(0)
var = f32[1024] get-tuple-element(param), index=0
input0 = f32[<=64] get-tuple-element(param), index=1
grad0 = f32[32] get-tuple-element(param), index=2
input1 = f32[<=64] get-tuple-element(param), index=3
act1 = f32[32] get-tuple-element(param), index=4
grad1 = f32[32] custom-call(act1), custom_call_target="ComputeGradients"
var1 = f32[1024] custom-call(var, input0, grad0), custom_call_target="ApplyGradients", output_to_operand_aliasing={{}: (0, {})}
token2 = token[] get-tuple-element(param), index=7
infeed2 = (f32[<=64], token[]) infeed(token2)
input2 = f32[<=64] get-tuple-element(infeed2), index=0
act2 = f32[32] custom-call(var1, input2), custom_call_target="ComputeActivations"
current = s32[] get-tuple-element(param), index=5
constant1 = s32[] constant(1)
add = s32[] add(current, constant1)
last = s32[] get-tuple-element(param), index=6
token3 = token[] get-tuple-element(infeed2), index=1
ROOT result = (f32[1024], f32[<=64], f32[32], f32[<=64], f32[32], s32[], s32[], token[]) tuple(var1, input1, grad1, input2, act2, add, last, token3)
}
ENTRY main {
last = s32[] parameter(0)
var = f32[1024] parameter(1)
token0 = token[] after-all()
infeed0 = (f32[<=64], token[]) infeed(token0)
input0 = f32[<=64] get-tuple-element(infeed0), index=0
act0 = f32[32] custom-call(var, input0), custom_call_target="ComputeActivations"
grad0 = f32[32] custom-call(act0), custom_call_target="ComputeGradients"
token1 = token[] get-tuple-element(infeed0), index=1
infeed1 = (f32[<=64], token[]) infeed(token1)
input1 = f32[<=64] get-tuple-element(infeed1), index=0
act1 = f32[32] custom-call(var, input1), custom_call_target="ComputeActivations"
token2 = token[] get-tuple-element(infeed1), index=1
zero = s32[] constant(0)
tuple = (f32[1024], f32[<=64], f32[32]{0}, f32[<=64], f32[32]{0}, s32[], s32[], token[]) tuple(var, input0, grad0, input1, act1, zero, last, token2)
while = (f32[1024], f32[<=64], f32[32]{0}, f32[<=64], f32[32]{0}, s32[], s32[], token[]) while(tuple), condition=%cond, body=%body
ROOT result = f32[1024] get-tuple-element(while), index=0
}
)";
module_ = GetHloModule(hlo_text);
auto op_supports_dynamism = [](HloInstruction* hlo) {
if (hlo->opcode() != HloOpcode::kCustomCall) {
return OpDynamismSupport::kNoSupport;
}
if (hlo->custom_call_target() == "ComputeActivations" ||
hlo->custom_call_target() == "ApplyGradients") {
return OpDynamismSupport::kRequired;
}
return OpDynamismSupport::kNoSupport;
};
auto custom_call_handler = [](HloInstruction* hlo,
DynamicDimensionInference* inference) {
return absl::OkStatus();
};
TF_ASSERT_OK(
RunPadder(
true,
std::move(op_supports_dynamism),
std::move(custom_call_handler))
.status());
XLA_VLOG_LINES(1, module_->ToString());
for (HloComputation* computation : module_->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kCustomCall) {
EXPECT_NE(instruction->custom_call_target(), "PadToStatic");
EXPECT_NE(instruction->custom_call_target(), "SliceToDynamic");
if (instruction->custom_call_target() == "ComputeActivations") {
EXPECT_TRUE(instruction->operand(1)->shape().is_dynamic());
} else if (instruction->custom_call_target() == "ApplyGradients") {
EXPECT_TRUE(instruction->operand(1)->shape().is_dynamic());
}
} else if (instruction->opcode() == HloOpcode::kWhile) {
const Shape& shape = instruction->shape();
EXPECT_TRUE(shape.tuple_shapes(1).is_dynamic());
EXPECT_TRUE(shape.tuple_shapes(3).is_dynamic());
}
}
}
}
TEST_F(DynamicPadderTest, HandleReshapeCheckPastReshape) {
auto hlo_text = R"(
HloModule ReshapeDynamicDimension
ENTRY main {
p0 = f32[4,511,432]{2,1,0} parameter(0)
p1 = s32[] parameter(1)
p2 = f32[432,337]{1,0:T(8,128)} parameter(2)
p0_dynamic = f32[<=4,511,432] set-dimension-size(p0, p1), dimensions={0}
reshape.4179 = f32[<=2044,432]{1,0} reshape(p0_dynamic)
dot.4180 = f32[<=2044,337]{1,0} dot(reshape.4179, p2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
transpose.4181 = f32[<=2044,337]{1,0} transpose(dot.4180), dimensions={0,1}
ROOT reshape.4183 = f32[<=4,511,337]{2,1,0} reshape(transpose.4181)
})";
module_ = GetHloModule(hlo_text);
TF_ASSERT_OK(RunPadder(true).status());
VLOG(3) << module_->ToString();
CHECK(module_->is_dynamic());
CHECK(module_->entry_computation()
->root_instruction()
->shape()
.is_dynamic_dimension(0));
}
class ExecutionTest : public HloTestBase {
protected:
std::unique_ptr<HloModule> GetHloModule(const std::string& hlo_text) {
std::unique_ptr<HloModule> module =
ParseAndReturnVerifiedModule(hlo_text).value();
return module;
}
absl::StatusOr<Literal> PadAndExecute(std::unique_ptr<HloModule> module,
absl::Span<Literal* const> arguments,
bool slice_dynamic_output = true) {
if (!slice_dynamic_output) {
auto new_config = module->config();
new_config.mutable_entry_computation_layout()
->mutable_result_layout()
->ClearDynamicShape();
module->set_config(new_config);
}
DynamicPadderOptions options;
options.slice_dynamic_output = slice_dynamic_output;
DynamicPadder padder(options);
TF_CHECK_OK(padder.Run(module.get()).status());
HloDCE dce;
TF_CHECK_OK(dce.Run(module.get()).status());
return Execute(std::move(module), {arguments});
}
};
XLA_TEST_F(ExecutionTest, ScatterUpdate) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[INDICES_BOUND] parameter(1)
updates = s32[INDICES_BOUND,3] parameter(2)
dynamic_size = s32[] parameter(3)
indices_dynamic = s32[<=INDICES_BOUND] set-dimension-size(indices, dynamic_size), dimensions={0}
updates_dynamic = s32[<=INDICES_BOUND,3] set-dimension-size(updates, dynamic_size), dimensions={0}
ROOT scatter = s32[3,3] scatter(operand, indices_dynamic, updates_dynamic),
to_apply=update_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
const std::string hlo_text_not_padded =
absl::StrReplaceAll(hlo_text, {{"INDICES_BOUND", "2"}});
auto module_not_padded = GetHloModule(hlo_text_not_padded);
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
Literal updates =
LiteralUtil::CreateR2<int32_t>({{10, 20, 30}, {70, 80, 90}});
Literal dynamic_size = LiteralUtil::CreateR0<int32_t>(2);
Literal not_padded =
ExecuteAndTransfer(std::move(module_not_padded),
{&operand, &scatter_indices, &updates, &dynamic_size});
const std::string hlo_text_padded =
absl::StrReplaceAll(hlo_text, {{"INDICES_BOUND", "4"}});
auto module_padded = GetHloModule(hlo_text_padded);
Literal scatter_indices_padded = LiteralUtil::CreateR1<int32_t>({0, 2, 0, 4});
Literal updates_padded = LiteralUtil::CreateR2<int32_t>(
{{10, 20, 30}, {70, 80, 90}, {30, 22, 11}, {-1, 20, -1}});
DynamicPadder padder;
TF_CHECK_OK(padder.Run(module_padded.get()).status());
TF_ASSERT_OK_AND_ASSIGN(Literal padded,
PadAndExecute(std::move(module_padded),
{&operand, &scatter_indices_padded,
&updates_padded, &dynamic_size}));
EXPECT_EQ(padded, not_padded);
}
XLA_TEST_F(ExecutionTest, ScatterUpdateWindowDim) {
const std::string hlo_text = R"(
HloModule ScatterUpdateWindowDim
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[1,2,3] parameter(0)
indices = s32[1] parameter(1)
updates = s32[2,3,1] parameter(2)
dynamic_size = s32[] constant(1)
operand_dynamic = s32[1, <=2, 3] set-dimension-size(operand, dynamic_size),
dimensions={1}
updates_dynamic = s32[<=2, 3, 1] set-dimension-size(updates, dynamic_size),
dimensions={0}
ROOT scatter = s32[1, <=2, 3] scatter(operand_dynamic, indices, updates_dynamic),
to_apply=update_s32,
update_window_dims={0, 1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
auto hlo_module = GetHloModule(hlo_text);
Literal operand = LiteralUtil::CreateR3<int32_t>({{{0, 0, 0}, {0, 0, 0}}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0});
Literal updates =
LiteralUtil::CreateR3<int32_t>({{{10}, {20}, {30}}, {{70}, {80}, {90}}});
TF_ASSERT_OK_AND_ASSIGN(
Literal padded,
PadAndExecute(std::move(hlo_module),
{&operand, &scatter_indices, &updates}, false));
Literal expected =
LiteralUtil::CreateR3<int32_t>({{{10, 20, 30}, {70, 80, 90}}});
EXPECT_EQ(padded, expected);
}
XLA_TEST_F(ExecutionTest, ScatterUpdateF32) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_f32 (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
ROOT rhs = f32[] parameter(1)
}
ENTRY main {
operand = f32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = f32[2,3] parameter(2)
dynamic_size = s32[] parameter(3)
indices_dynamic = s32[<=2] set-dimension-size(indices, dynamic_size), dimensions={0}
updates_dynamic = f32[<=2,3] set-dimension-size(updates, dynamic_size), dimensions={0}
ROOT scatter = f32[3,3] scatter(operand, indices_dynamic, updates_dynamic),
to_apply=update_f32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
auto module_not_padded = GetHloModule(hlo_text);
Literal operand = LiteralUtil::CreateR2<float>(
{{1.0, 2.0, 3.0}, {4.0, 5.0, 6.0}, {7.0, 8.0, 9.0}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
Literal updates =
LiteralUtil::CreateR2<float>({{10.0, 20.0, 30.0}, {70.0, 80.0, 90.0}});
Literal dynamic_size = LiteralUtil::CreateR0<int32_t>(1);
auto module_padded = GetHloModule(hlo_text);
DynamicPadder padder;
TF_CHECK_OK(padder.Run(module_padded.get()).status());
TF_ASSERT_OK_AND_ASSIGN(
Literal not_padded,
PadAndExecute(std::move(module_padded),
{&operand, &scatter_indices, &updates, &dynamic_size}));
EXPECT_EQ(LiteralUtil::CreateR2<float>(
{{10.0, 20.0, 30.0}, {4.0, 5.0, 6.0}, {7.0, 8.0, 9.0}}),
not_padded);
}
XLA_TEST_F(ExecutionTest, WholeDimensionGather) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[3, 2, 1] parameter(0)
size = s32[] constant(1)
param_padded = s32[3, <=2, 1] set-dimension-size(param, size), dimensions={1}
index = s32[] constant(1)
gather = s32[<=2,1]{1,0} gather(param_padded, index),
offset_dims={0,1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=0,
slice_sizes={1,2,1}
init = s32[] constant(0)
ROOT reduce = s32[] reduce(gather, init),
dimensions={0, 1},
to_apply=update_s32
}
)";
Literal operand =
LiteralUtil::CreateR3<int32_t>({{{1}, {2}}, {{3}, {4}}, {{5}, {6}}});
auto module = GetHloModule(hlo_text);
DynamicPadder padder;
TF_CHECK_OK(padder.Run(module.get()).status());
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR0<int32_t>(3);
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, TwoDimensionReduce) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[INDICES_BOUND, INDICES_BOUND] parameter(0)
dynamic_size = s32[] parameter(1)
param_0 = s32[<=INDICES_BOUND,INDICES_BOUND] set-dimension-size(param, dynamic_size), dimensions={0}
param_1 = s32[<=INDICES_BOUND,INDICES_BOUND] set-dimension-size(param_0, dynamic_size), dimensions={1}
const = s32[] constant(0)
ROOT reduce = s32[] reduce(param_1, const),
dimensions={0, 1},
to_apply=update_s32
}
)";
const std::string hlo_text_not_padded =
absl::StrReplaceAll(hlo_text, {{"INDICES_BOUND", "2"}});
auto module_not_padded = GetHloModule(hlo_text_not_padded);
Literal operand = LiteralUtil::CreateR2<int32_t>({{1, 2}, {4, 5}});
Literal dynamic_size = LiteralUtil::CreateR0<int32_t>(2);
Literal not_padded = ExecuteAndTransfer(std::move(module_not_padded),
{&operand, &dynamic_size});
const std::string hlo_text_padded =
absl::StrReplaceAll(hlo_text, {{"INDICES_BOUND", "4"}});
auto module_padded = GetHloModule(hlo_text_padded);
Literal operand_padded = LiteralUtil::CreateR2<int32_t>(
{{1, 2, 3, 4}, {4, 5, 6, 7}, {1, 2, 3, 4}, {4, 5, 6, 7}});
DynamicPadder padder;
TF_CHECK_OK(padder.Run(module_padded.get()).status());
TF_ASSERT_OK_AND_ASSIGN(Literal padded,
PadAndExecute(std::move(module_padded),
{&operand_padded, &dynamic_size}));
EXPECT_EQ(padded, not_padded);
}
XLA_TEST_F(ExecutionTest, DynamicDimensionClamp) {
const std::string hlo_text = R"(
HloModule TensorFlowTenaryV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[5] parameter(0)
const = s32[] constant(3)
param_padded = s32[<=5] set-dimension-size(param, const), dimensions={0}
clamp = s32[<=5] clamp(param_padded, param_padded, param_padded)
init = s32[] constant(0)
ROOT reduce = s32[] reduce(clamp, init),
dimensions={0},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR1<int32_t>({1, 2, 3, 4, 5});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR0<int32_t>(6);
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicConcat) {
const std::string hlo_text = R"(
HloModule DynamicConcat
ENTRY main {
param_0 = s32[3] parameter(0)
param_1 = s32[3] parameter(1)
param_2 = s32[3] parameter(2)
size = s32[] constant(2)
param_padded_0 = s32[<=3] set-dimension-size(param_0, size), dimensions={0}
param_padded_2 = s32[<=3] set-dimension-size(param_2, size), dimensions={0}
ROOT %concatenate = s32[<=9]
concatenate(s32[<=3] param_padded_0, s32[<=3] param_1, s32[<=3] param_padded_2),
dimensions={0}
}
)";
Literal operand_0 =
LiteralUtil::CreateR1<int32_t>({1, 2, -1});
Literal operand_1 =
LiteralUtil::CreateR1<int32_t>({3, 4, 5});
Literal operand_2 =
LiteralUtil::CreateR1<int32_t>({6, 7, -1});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
PadAndExecute(std::move(module), {&operand_0, &operand_1, &operand_2},
false));
result.SetDynamicSize(0, 7);
Literal expected = LiteralUtil::CreateR1<int32_t>({1, 2, 3, 4, 5, 6, 7});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicReverseSingleDim) {
const std::string hlo_text = R"(
HloModule DynamicConcat
ENTRY main {
param_0 = s32[3] parameter(0)
size = s32[] constant(2)
param_padded_0 = s32[<=3] set-dimension-size(param_0, size), dimensions={0}
ROOT %reverse = s32[<=3]
reverse(s32[<=3] param_padded_0),
dimensions={0}
}
)";
Literal operand_0 =
LiteralUtil::CreateR1<int32_t>({1, 2, -1});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(
Literal result, PadAndExecute(std::move(module), {&operand_0}, false));
result.SetDynamicSize(0, 2);
Literal expected = LiteralUtil::CreateR1<int32_t>({2, 1});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicReverseMultiDims) {
const std::string hlo_text = R"(
HloModule DynamicConcat
ENTRY main {
param_0 = s32[3, 3] parameter(0)
size = s32[] constant(2)
param_padded_0 = s32[<=3, 3] set-dimension-size(param_0, size), dimensions={0}
param_padded_1 = s32[<=3, <=3] set-dimension-size(param_padded_0, size),
dimensions={1}
ROOT %reverse = s32[<=3, <=3]
reverse(s32[<=3, <=3] param_padded_1),
dimensions={0, 1}
}
)";
Literal operand_0 = LiteralUtil::CreateR2<int32_t>(
{{1, 2, -1}, {3, 4, -1}, {-1, -1, -1}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(
Literal result, PadAndExecute(std::move(module), {&operand_0}, false));
result.SetDynamicSize(0, 2);
result.SetDynamicSize(1, 2);
Literal expected = LiteralUtil::CreateR2<int32_t>({{4, 3}, {2, 1}});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicDimensionReduce) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[5] parameter(0)
const = s32[] constant(3)
param_padded = s32[<=5] set-dimension-size(param, const), dimensions={0}
init = s32[] constant(0)
ROOT reduce = s32[] reduce(param_padded, init),
dimensions={0},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR1<int32_t>({1, 2, 3, 4, 5});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR0<int32_t>(6);
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, InputMinorDimensionReshape) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[1, 2, 5, 1] parameter(0)
const = s32[] constant(3)
param_padded = s32[1, 2, <=5, 1] set-dimension-size(param, const), dimensions={2}
reshaped = s32[<=10] reshape(param_padded)
init = s32[] constant(0)
ROOT reduce = s32[] reduce(reshaped, init),
dimensions={0},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR4<int32_t>(
{{{{1}, {2}, {3}, {4}, {5}}, {{2}, {4}, {6}, {7}, {8}}}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR0<int32_t>(18);
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, SliceSingleElement) {
const std::string hlo_text = R"(
HloModule Slicing
ENTRY main {
param = s32[5] parameter(0)
const = s32[] constant(3)
param_padded = s32[<=5] set-dimension-size(param, const), dimensions={0}
ROOT slice = s32[1]{0} slice(param_padded), slice={[0:1]}
}
)";
Literal operand = LiteralUtil::CreateR1<int32_t>({0, 1, 2, 3, 4});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR1<int32_t>({0});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, OutputMinorDimensionReshape) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[12] parameter(0)
const = s32[] constant(8)
param_padded = s32[<=12] set-dimension-size(param, const), dimensions={0}
reshaped = s32[2, <=3, 2] reshape(param_padded), inferred_dimension=1
init = s32[] constant(0)
ROOT reduce = s32[2, 2] reduce(reshaped, init),
dimensions={1},
to_apply=update_s32
}
)";
Literal operand =
LiteralUtil::CreateR1<int32_t>({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR2<int32_t>({{2, 4}, {10, 12}});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, OutputMinorDimensionReshapeWithUnchangedDimMajor) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[2, 6] parameter(0)
const = s32[] constant(4)
param_padded = s32[2, <=6] set-dimension-size(param, const), dimensions={1}
reshaped = s32[2, 2, <=3] reshape(param_padded), inferred_dimension=2
init = s32[] constant(0)
ROOT reduce = s32[2, 2] reduce(reshaped, init),
dimensions={2},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR2<int32_t>(
{{0, 1, 2, 3, 4, 5}, {6, 7, 8, 9, 10, 11}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR2<int32_t>({{1, 5}, {13, 17}});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, OutputMinorDimensionReshapeWithUnchangedDimMinor) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[6, 2] parameter(0)
const = s32[] constant(4)
param_padded = s32[<=6, 2] set-dimension-size(param, const), dimensions={0}
reshaped = s32[2, <=3, 2] reshape(param_padded), inferred_dimension=1
init = s32[] constant(0)
ROOT reduce = s32[2, 2] reduce(reshaped, init),
dimensions={1},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR2<int32_t>(
{{0, 1}, {2, 3}, {4, 5}, {6, 7}, {8, 9}, {10, 11}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR2<int32_t>({{2, 4}, {10, 12}});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicInputFeature) {
const std::string hlo_text = R"(
HloModule DynamicInputFeature
ENTRY main {
param = f32[1, 1, 5] parameter(0)
const = s32[] constant(5)
one = f32[] constant(1)
kernel = f32[1,5,1]{2,1,0} broadcast(f32[] one), dimensions={}
param_dynamic = f32[1,1,<=5] set-dimension-size(param, const), dimensions={2}
ROOT conv = f32[1, 1, 1]{2,1,0} custom-call(f32[1, 1, <=5] param_dynamic, f32[1,<=5,1]{2,1,0} kernel),
window={size=1 pad=0_0},
dim_labels=b0f_0io->b0f,
padding_type=PADDING_VALID,
custom_call_target="DynamicConvolutionForward"
}
)";
Literal operand = LiteralUtil::CreateR3<float>({{{1, 2, 3, 4, 5}}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR3<float>({{{15}}});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(LlvmIrGenTestBase, LargeDynamicInput) {
#ifndef XLA_TEST_BACKEND_GPU
GTEST_SKIP();
#endif
const std::string hlo_text = R"(
HloModule LargeDynamicInput
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY main {
param = f32[<=20,<=20,<=20,<=20,<=20,<=20,<=20,<=20] parameter(0)
zero = f32[] constant(0)
ROOT out = reduce(param, zero), to_apply=add, dimensions={0,1,2,3,4,5,6,7}
}
)";
CompileAndVerifyIr(hlo_text, R"(
CHECK: ret void
)",
true);
}
XLA_TEST_F(ExecutionTest, DynamicDimensionReshapeUnchanged) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[1, 2, 5, 1] parameter(0)
const = s32[] constant(3)
param_padded = s32[1, 2, <=5, 1] set-dimension-size(param, const), dimensions={2}
reshaped = s32[2, <=5] reshape(param_padded)
init = s32[] constant(0)
ROOT reduce = s32[2] reduce(reshaped, init),
dimensions={1},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR4<int32_t>(
{{{{1}, {2}, {3}, {4}, {5}}, {{2}, {4}, {6}, {7}, {8}}}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR1<int32_t>({6, 12});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DegeneratedDimension) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[1, 2, 5, 1] parameter(0)
size = s32[] constant(0)
param_padded = s32[<=1, 2, 5, 1] set-dimension-size(param, size),
dimensions={0}
reshaped = s32[<=10] reshape(param_padded)
init = s32[] constant(0)
ROOT reduce = s32[] reduce(reshaped, init),
dimensions={0},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR4<int32_t>(
{{{{1}, {2}, {3}, {4}, {5}}, {{2}, {4}, {6}, {7}, {8}}}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR0<int32_t>(0);
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, ReshapeSplitCombineSameTime) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[4, 2, 2] parameter(0)
two = s32[] constant(2)
one = s32[] constant(1)
param_padded_partial = s32[<=4, 2, 2] set-dimension-size(param, two),
dimensions={0}
param_padded_dynamic = s32[<=4, 2, <=2] set-dimension-size(param_padded_partial,
one),
dimensions={2}
reshaped = s32[2, <=2, <=4] reshape(param_padded_dynamic),
inferred_dimension=1
init = s32[] constant(0)
ROOT reduce = s32[] reduce(reshaped, init),
dimensions={0, 1, 2},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR3<int32_t>({{{0, -1}, {1, -1}},
{{2, -1}, {3, -1}},
{{-1, -1}, {-1, -1}},
{{-1, -1}, {-1, -1}}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR0<int32_t>(6);
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, ReshapeComplicated) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[2, 4, 4] parameter(0)
two = s32[] constant(2)
param_padded_dynamic = s32[2, <=4, 4] set-dimension-size(param, two),
dimensions={1}
reshaped = s32[<=16, 2] reshape(param_padded_dynamic), inferred_dimension=0
init = s32[] constant(0)
ROOT reduce = s32[] reduce(reshaped, init),
dimensions={0, 1},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR3<int32_t>(
{{{1, 2, 3, 4}, {5, 6, 7, 8}, {-1, -1, -1, -1}, {-1, -1, -1, -1}},
{{9, 10, 11, 12},
{13, 14, 15, 16},
{-1, -1, -1, -1},
{-1, -1, -1, -1}}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR0<int32_t>(136);
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, WhileLoopStack) {
const std::string hlo_text = R"(
HloModule module
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
body {
stack = (s32[<=4,2]) parameter(0)
stack_buffer = s32[<=4, 2] get-tuple-element(stack), index=0
stack_size = s32[] get-dimension-size(stack_buffer), dimensions={0}
zero = s32[] constant(0)
one = s32[] constant(1)
new_data = s32[1, 2] broadcast(s32[] stack_size), dimensions={}
new_stack_size = s32[] add(stack_size, one)
new_stack_buffer = s32[<=4, 2] set-dimension-size(stack_buffer, new_stack_size), dimensions={0}
new_stack = s32[<=4, 2] dynamic-update-slice(new_stack_buffer, new_data, stack_size, zero)
ROOT new_stack_tuple = (s32[<=4,2]) tuple(new_stack)
}
condition {
stack = (s32[<=4,2]) parameter(0)
stack_buffer = s32[<=4, 2] get-tuple-element(stack), index=0
stack_size = s32[] get-dimension-size(stack_buffer), dimensions={0}
three = s32[] constant(3)
ROOT less-than = pred[] compare(s32[] stack_size, s32[] three), direction=LT
}
ENTRY entry {
zero = s32[] constant(0)
pad = s32[] constant(-1)
stack_buffer_input = s32[4, 2] broadcast(s32[] pad), dimensions={}
stack_buffer_input_dynamic = s32[<=4, 2] set-dimension-size(stack_buffer_input, zero), dimensions={0}
input_tuple = (s32[<=4 ,2]) tuple(stack_buffer_input_dynamic)
while = (s32[<=4, 2]) while(input_tuple), body=body, condition=condition
stack_buffer = s32[<=4, 2] get-tuple-element(while), index=0
ROOT reduce = s32[2] reduce(stack_buffer, zero),
dimensions={0},
to_apply=update_s32
}
)";
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {}));
Literal expected = LiteralUtil::CreateR1<int32_t>({{3, 3}});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicAddWithImplicitBroadcast) {
const std::string hlo_text = R"(
HloModule module
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY entry {
zero = s32[] constant(0)
one = s32[] constant(1)
two = s32[] constant(2)
three = s32[] constant(3)
input1 = s32[4, 2] iota(), iota_dimension=0
ones = s32[4, 2] broadcast(one), dimensions={}
input1_added = s32[4, 2] add(input1, ones)
input1_dynamic = s32[<=4, 2] set-dimension-size(input1_added, one), dimensions={0}
input2 = s32[4, 2] broadcast(two), dimensions={}
input2_dynamic = s32[<=4, 2] set-dimension-size(input2, three), dimensions={0}
add = s32[<=4, 2] add(input1_dynamic, input2_dynamic)
ROOT reduce = s32[2] reduce(add, zero),
dimensions={0},
to_apply=update_s32
}
)";
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {}));
Literal expected = LiteralUtil::CreateR1<int32_t>({{9, 9}});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicAddWithImplicitSlice) {
const std::string hlo_text = R"(
HloModule module
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY entry {
zero = s32[] constant(0)
one = s32[] constant(1)
two = s32[] constant(2)
three = s32[] constant(3)
input1 = s32[4, 2] broadcast(one), dimensions={}
input1_dynamic = s32[<=4, 2] set-dimension-size(input1, three), dimensions={0}
input2 = s32[4, 2] broadcast(two), dimensions={}
input2_dynamic = s32[<=4, 2] set-dimension-size(input2, two), dimensions={0}
add = s32[<=4, 2] add(input1_dynamic, input2_dynamic)
ROOT reduce = s32[2] reduce(add, zero),
dimensions={0},
to_apply=update_s32
}
)";
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {}));
Literal expected = LiteralUtil::CreateR1<int32_t>({{6, 6}});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicStackPop) {
const std::string hlo_text = R"(
HloModule module
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
body {
param_tuple = (s32[<=4,2]) parameter(0)
param = s32[<=4, 2] get-tuple-element(param_tuple), index=0
one = s32[] constant(1)
size = s32[] get-dimension-size(param), dimensions={0}
new_size = s32[] subtract(size, one)
output = s32[<=4, 2] set-dimension-size(param, new_size), dimensions={0}
ROOT root = (s32[<=4, 2]) tuple(output)
}
condition {
stack = (s32[<=4,2]) parameter(0)
stack_buffer = s32[<=4,2] get-tuple-element(stack), index=0
stack_size = s32[] get-dimension-size(stack_buffer), dimensions={0}
two = s32[] constant(2)
ROOT greater-than = pred[] compare(s32[] stack_size, s32[] two), direction=GT
}
ENTRY entry {
one = s32[] constant(1)
zero = s32[] constant(0)
four = s32[] constant(4)
stack_buffer_input = s32[4, 2] broadcast(s32[] one), dimensions={}
stack_buffer_dynamic = s32[<=4, 2] set-dimension-size(stack_buffer_input, four), dimensions={0}
input_tuple = (s32[<=4, 2]) tuple(stack_buffer_dynamic)
while = (s32[<=4, 2]) while(input_tuple), body=body, condition=condition
stack_buffer = s32[<=4, 2] get-tuple-element(while), index=0
ROOT reduce = s32[2] reduce(stack_buffer, zero),
dimensions={0},
to_apply=update_s32
}
)";
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {}));
Literal expected = LiteralUtil::CreateR1<int32_t>({{2, 2}});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DoubleDynamicDimension) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[2, 3, 3] parameter(0)
size = s32[] constant(2)
param_padded_partial = s32[2, <=3, 3] set-dimension-size(param, size),
dimensions={1}
param_padded = s32[2, 3, <=3] set-dimension-size(param_padded_partial, size),
dimensions={2}
reshaped = s32[<=18] reshape(param_padded)
init = s32[] constant(0)
ROOT reduce = s32[] reduce(reshaped, init),
dimensions={0},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR3<int32_t>(
{{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}}, {{0, 1, 2}, {3, 4, 5}, {6, 7, 8}}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR0<int32_t>(16);
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicReshapeDoubleDynamicDimensions) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
ENTRY main {
param = s32[2, 3, 3] parameter(0)
size = s32[] constant(2)
param_padded_partial = s32[2, <=3, 3] set-dimension-size(param, size),
dimensions={1}
param_padded = s32[2, <=3, <=3] set-dimension-size(param_padded_partial, size),
dimensions={2}
result_size = s32[] constant(8)
ROOT reshaped = s32[<=18] dynamic-reshape(param_padded, result_size)
}
)";
Literal operand = LiteralUtil::CreateR3<int32_t>(
{{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}}, {{0, 1, 2}, {3, 4, 5}, {6, 7, 8}}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}, false));
result.SetDynamicSize(0, 8);
Literal expected = LiteralUtil::CreateR1<int32_t>({0, 1, 3, 4, 0, 1, 3, 4});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicReshapeOutputDoubleDynamicDimensions) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
ENTRY main {
param = s32[18] parameter(0)
eight = s32[] constant(8)
param_dynamic = s32[<=18] set-dimension-size(param, eight), dimensions={0}
two = s32[] constant(2)
ROOT reshaped = s32[2, <=3, <=3] dynamic-reshape(param_dynamic, two, two, two)
}
)";
Literal operand = LiteralUtil::CreateR1<int32_t>(
{0, 1, 3, 4, 0, 1, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}, false));
VLOG(1) << " result: " << result.ToString();
result.SetDynamicSize(1, 2);
result.SetDynamicSize(2, 2);
Literal expected =
LiteralUtil::CreateR3<int32_t>({{{0, 1}, {3, 4}}, {{0, 1}, {3, 4}}});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicReshapeComplicated) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
ENTRY main {
param = s32[3, 4, 4] parameter(0)
two = s32[] constant(2)
param_dynamic = s32[<=3, 4, 4] set-dimension-size(param, two), dimensions={0}
three = s32[] constant(3)
param_dynamic1 = s32[<=3, <=4, 4] set-dimension-size(param_dynamic, three), dimensions={1}
param_dynamic2 = s32[<=3, <=4, <=4] set-dimension-size(param_dynamic1, three), dimensions={2}
six = s32[] constant(6)
ROOT reshaped = s32[<=6, <=8] dynamic-reshape(param_dynamic2, three, six)
}
)";
Literal operand = LiteralUtil::CreateR3<int32_t>(
{{{0, 1, 2, -1}, {3, 4, 5, -1}, {6, 7, 8, -1}, {-1, -1, -1, -1}},
{{9, 8, 7, -1}, {6, 5, 4, -1}, {3, 2, 1, -1}, {-1, -1, -1, -1}},
{{-1, -1, -1, -1},
{-1, -1, -1, -1},
{-1, -1, -1, -1},
{-1, -1, -1, -1}}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}, false));
result.SetDynamicSize(0, 3);
result.SetDynamicSize(1, 6);
Literal expected = LiteralUtil::CreateR2<int32_t>(
{{0, 1, 2, 3, 4, 5}, {6, 7, 8, 9, 8, 7}, {6, 5, 4, 3, 2, 1}});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, SetGetDimensionSize) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
ENTRY main {
param = s32[3] parameter(0)
size = s32[] constant(2)
param_dynamic_size = s32[3] set-dimension-size(param, size),
dimensions={0}
ROOT gds = s32[] get-dimension-size(param_dynamic_size),
dimensions={0}
}
)";
Literal operand = LiteralUtil::CreateR1<int32_t>({1, 2, 3});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR0<int32_t>(2);
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicSort) {
const std::string hlo_text = R"(
HloModule TEST
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
%compare-greater-than (lhs: s32[], rhs: s32[]) -> pred[] {
%lhs = s32[] parameter(0)
%rhs = s32[] parameter(1)
ROOT %compare = pred[] compare(s32[] %lhs, s32[] %rhs), direction=GT
}
ENTRY main {
param = s32[4] parameter(0)
size = s32[] constant(3)
param_dynamic_size = s32[<=4] set-dimension-size(param, size),
dimensions={0}
ROOT sort = s32[<=4]{0} sort(s32[4]{0} %param_dynamic_size),
dimensions={0}, is_stable=false, to_apply=%compare-greater-than
}
)";
Literal operand = LiteralUtil::CreateR1<int32_t>({1, 4, 3, 2});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand},
false));
Literal expected = LiteralUtil::CreateR1<int32_t>({4, 3, 1, 2});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicPad) {
const std::string hlo_text = R"(
HloModule TEST
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[4] parameter(0)
size = s32[] constant(3)
padding = s32[] constant(2)
param_dynamic = s32[<=4] set-dimension-size(param, size),
dimensions={0}
pad = s32[<=6] pad(param_dynamic, padding), padding=1_1
init = s32[] constant(0)
ROOT reduce = s32[] reduce(pad, init),
dimensions={0},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR1<int32_t>({1, 4, 3, 5});
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand},
false));
Literal expected = LiteralUtil::CreateR0<int32_t>(12);
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicPadInteriorPadding) {
const std::string hlo_text = R"(
HloModule TEST
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[4] parameter(0)
size = s32[] constant(3)
padding = s32[] constant(2)
param_dynamic = s32[<=4] set-dimension-size(param, size),
dimensions={0}
pad = s32[<=7] pad(param_dynamic, padding), padding=0_0_1
init = s32[] constant(0)
ROOT reduce = s32[] reduce(pad, init),
dimensions={0},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR1<int32_t>({1, 4, 3, 5});
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand},
false));
Literal expected = LiteralUtil::CreateR0<int32_t>(12);
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicConditionalDimension) {
const std::string hlo_text = R"(
HloModule module
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
true_branch {
true_param = (s32[<=3,2]) parameter(0)
param = s32[<=3, 2] get-tuple-element(true_param), index=0
add = s32[<=3,2] add(param, param)
ROOT true_tuple = (s32[<=3,2], s32[<=3,2]) tuple(add, add)
}
false_branch {
false_param = (s32[<=3,2]) parameter(0)
param = s32[<=3, 2] get-tuple-element(false_param), index=0
add = s32[<=3,2] add(param, param)
ROOT false_tuple = (s32[<=3,2], s32[<=3,2]) tuple(add, add)
}
ENTRY entry {
param0 = s32[3,2] parameter(0)
size = s32[] constant(2)
branch = pred[] constant(false)
param_dynamic = s32[<=3, 2] set-dimension-size(param0, size), dimensions={0}
param_tuple = (s32[<=3 ,2]) tuple(param_dynamic)
conditional = (s32[<=3, 2], s32[<=3, 2]) conditional(branch, param_tuple, param_tuple),
true_computation=true_branch, false_computation=false_branch
gte0 = s32[<=3,2] get-tuple-element(conditional), index=1
init = s32[] constant(0)
ROOT reduce = s32[2] reduce(gte0, init),
dimensions={0},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR2<int32_t>({{0, 1}, {2, 3}, {4, 5}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand},
false));
Literal expected = LiteralUtil::CreateR1<int32_t>({4, 8});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicTupleSort) {
const std::string hlo_text = R"(
HloModule TEST
%compare-greater-than (lhs: s32[], rhs: s32[], lhs_2: s32[], lhs_2: s32[]) -> pred[] {
%lhs = s32[] parameter(0)
%rhs = s32[] parameter(1)
%lhs_2 = s32[] parameter(2)
%rhs_2 = s32[] parameter(3)
ROOT %compare = pred[] compare(s32[] %lhs, s32[] %rhs), direction=GT
}
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[3] parameter(0)
size = s32[] constant(2)
param_dynamic_size = s32[<=3] set-dimension-size(param, size),
dimensions={0}
sort = (s32[<=3]{0}, s32[<=3]{0}) sort(s32[<=3]{0} %param_dynamic_size,
s32[<=3]{0} %param_dynamic_size),
dimensions={0}, is_stable=true, to_apply=%compare-greater-than
ROOT get-tuple-element = s32[<=3]{0} get-tuple-element((s32[<=3]{0}, s32[<=3]{0}) %sort),
index=0
}
)";
Literal operand = LiteralUtil::CreateR1<int32_t>({0, 4, 2});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand},
false));
Literal expected = LiteralUtil::CreateR1<int32_t>({4, 0, 2});
EXPECT_EQ(result, expected);
}
namespace op = xla::testing::opcode_matchers;
class HloDimensionSizeLegalizerTest : public HloTestBase {
protected:
HloDimensionSizeLegalizerTest() {}
};
TEST_F(HloDimensionSizeLegalizerTest, Ok) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule _
ENTRY gds {
p = s32[3,4] parameter(0)
size0 = s32[] get-dimension-size(p), dimensions={0}
size1 = s32[] get-dimension-size(p), dimensions={1}
ROOT mul = s32[] multiply(size0, size1)
})")
.value();
DynamicPadder pass;
EXPECT_TRUE(pass.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Multiply(op::Constant(), op::Constant()));
}
TEST_F(HloDimensionSizeLegalizerTest, GetSetSetDimensionSizeRewriter) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule _
ENTRY gds {
p = s32[3,4] parameter(0)
size0 = s32[] get-dimension-size(p), dimensions={0}
p_copy = s32[3,4] copy(p)
p_copy_dynamic = s32[<=3, 4] set-dimension-size(p_copy, size0), dimensions={0}
size1 = s32[] get-dimension-size(p_copy_dynamic), dimensions={0}
ROOT mul = s32[] multiply(size0, size1)
})")
.value();
DynamicPadder pass;
EXPECT_TRUE(pass.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Multiply(op::Constant(), op::Constant()));
}
TEST_F(HloDimensionSizeLegalizerTest, IllegalType) {
auto module = ParseAndReturnUnverifiedModule(R"(
HloModule _
ENTRY gds {
p = s32[3]{0} parameter(0)
ROOT gds = s64[] get-dimension-size(p), dimensions={0}
})")
.value();
DynamicPadder pass;
EXPECT_FALSE(pass.Run(module.get()).ok());
}
TEST_F(HloDimensionSizeLegalizerTest, IllegalDimension) {
auto module = ParseAndReturnUnverifiedModule(R"(
HloModule _
ENTRY gds {
p = f32[2,5] parameter(0)
ROOT gds = s32[] get-dimension-size(p), dimensions={2}
})")
.value();
DynamicPadder pass;
EXPECT_FALSE(pass.Run(module.get()).ok());
}
class SizeCheckTest : public HloTestBase {
protected:
SizeCheckTest() {}
};
TEST_F(SizeCheckTest, CompileTimeCheckBinaryOpFail) {
auto module = ParseAndReturnUnverifiedModule(R"(
HloModule _
ENTRY gds {
size_0 = s32[] parameter(0)
size_1 = s32[] parameter(1)
arg = s32[4]{0} parameter(2)
dynamic_arg_0 = s32[<=4] set-dimension-size(arg, size_0), dimensions={0}
dynamic_arg_1 = s32[<=4] set-dimension-size(arg, size_1), dimensions={0}
ROOT add = s32[<=4] add(dynamic_arg_0, dynamic_arg_1)
})")
.value();
auto options = DynamicPadderOptions();
options.shape_check_mode =
DynamicDimensionInference::ShapeCheckMode::kCompileTime;
DynamicPadder pass(options);
auto status = pass.Run(module.get()).status();
EXPECT_THAT(status.code(), tsl::error::INVALID_ARGUMENT);
}
TEST_F(SizeCheckTest, CompileTimeCheckBinaryOpPass) {
auto module = ParseAndReturnUnverifiedModule(R"(
HloModule _
ENTRY gds {
size_0 = s32[] parameter(0)
size_0_reshape = s32[1] reshape(size_0)
size_1 = s32[] reshape(size_0_reshape)
arg = s32[4]{0} parameter(1)
dynamic_arg_0 = s32[<=4] set-dimension-size(arg, size_0), dimensions={0}
dynamic_arg_1 = s32[<=4] set-dimension-size(arg, size_1), dimensions={0}
ROOT add = s32[<=4] add(dynamic_arg_0, dynamic_arg_1)
})")
.value();
auto options = DynamicPadderOptions();
options.shape_check_mode =
DynamicDimensionInference::ShapeCheckMode::kCompileTime;
DynamicDimensionSimplifier simplifier;
EXPECT_TRUE(simplifier.Run(module.get()).ok());
DynamicPadder pass(options);
auto status = pass.Run(module.get()).status();
EXPECT_TRUE(status.ok());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_padder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_padder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fb09fbe9-6296-4eab-b4c4-62ab42ce86f5 | cpp | tensorflow/tensorflow | reduce_decomposer | third_party/xla/xla/service/reduce_decomposer.cc | third_party/xla/xla/service/reduce_decomposer_test.cc | #include "xla/service/reduce_decomposer.h"
#include <functional>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/hlo_creation_utils.h"
namespace xla {
namespace {
class VariadicReductionLayoutEqualizer : public DfsHloRewriteVisitor {
public:
absl::Status HandleReduce(HloInstruction* hlo) override {
auto reduce = Cast<HloReduceInstruction>(hlo);
std::vector<HloInstruction*> new_inputs;
bool changed = false;
for (HloInstruction* input : reduce->inputs()) {
auto first_input = reduce->inputs()[0];
auto first_input_s = first_input->shape();
auto input_s = input->shape();
if (first_input_s.layout() != input_s.layout()) {
Shape new_input_s = ShapeUtil::MakeShapeWithDenseLayout(
input_s.element_type(), input_s.dimensions(),
first_input_s.layout().minor_to_major());
auto copy = MakeCopyHlo(input, new_input_s);
changed = true;
new_inputs.push_back(copy);
} else {
new_inputs.push_back(input);
}
}
if (changed) {
TF_ASSIGN_OR_RETURN(
auto new_reduce,
MakeReduceHlo(new_inputs, reduce->init_values(), reduce->dimensions(),
reduce->called_computations()[0]));
TF_RETURN_IF_ERROR(ReplaceInstruction(reduce, new_reduce));
}
return absl::OkStatus();
}
};
class ReduceDecomposerVisitor : public DfsHloRewriteVisitor {
public:
explicit ReduceDecomposerVisitor(HloPredicate custom_layout_allowed)
: custom_layout_allowed_(std::move(custom_layout_allowed)) {}
absl::Status HandleReduce(HloInstruction* hlo) override {
auto reduce = Cast<HloReduceInstruction>(hlo);
auto shape = reduce->shape();
if (custom_layout_allowed_ && custom_layout_allowed_(reduce)) {
return absl::OkStatus();
}
std::vector<Shape> expected_shapes(reduce->input_count());
for (int i = 0; i < reduce->input_count(); i++) {
expected_shapes[i] = ExpectedOutputShape(reduce, i);
TF_RET_CHECK(reduce->inputs()[i]->shape().layout() ==
reduce->inputs()[0]->shape().layout());
}
std::vector<Shape> output_shapes;
if (shape.IsTuple()) {
for (int i = 0; i < shape.tuple_shapes_size(); i++) {
output_shapes.push_back(ShapeUtil::GetTupleElementShape(shape, i));
TF_RET_CHECK(output_shapes[i].layout() == output_shapes[0].layout());
}
} else {
output_shapes.push_back(shape);
}
TF_RET_CHECK(!output_shapes.empty());
if (ShapeUtil::MakeMaybeTupleShape(expected_shapes) !=
ShapeUtil::MakeMaybeTupleShape(output_shapes)) {
TF_ASSIGN_OR_RETURN(auto r_prime,
MakeReduceHlo(reduce->inputs(), reduce->init_values(),
reduce->dimensions(),
reduce->called_computations()[0]));
TF_RET_CHECK(r_prime->shape() ==
ShapeUtil::MakeMaybeTupleShape(expected_shapes));
if (!shape.IsTuple()) {
auto copy = MakeCopyHlo(r_prime, shape);
TF_RETURN_IF_ERROR(ReplaceInstruction(reduce, copy));
return absl::OkStatus();
}
std::vector<HloInstruction*> copies;
for (int i = 0; i < reduce->input_count(); i++) {
TF_ASSIGN_OR_RETURN(auto from, GetOutput(r_prime, i));
auto copy = MakeCopyHlo(from, output_shapes[i]);
copies.push_back(copy);
}
auto out = MaybeMakeTuple(copies);
TF_RETURN_IF_ERROR(ReplaceInstruction(reduce, out));
}
return absl::OkStatus();
}
private:
absl::StatusOr<HloInstruction*> GetOutput(HloInstruction* instr, int idx) {
if (instr->shape().IsTuple()) {
return MakeGetTupleElementHlo(instr, idx);
} else {
TF_RET_CHECK(idx == 0);
return instr;
}
}
Shape ExpectedOutputShape(HloReduceInstruction* reduce, int input_idx) {
Shape reduce_shape = reduce->shape();
auto output_shape = reduce_shape.IsTuple()
? reduce_shape.tuple_shapes(input_idx)
: reduce_shape;
auto* operand = reduce->inputs()[input_idx];
auto operand_shape = operand->shape();
return ShapeUtil::DeleteDimensions(reduce->dimensions(), operand_shape);
}
HloPredicate custom_layout_allowed_;
};
}
absl::StatusOr<bool> ReduceDecomposer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_ASSIGN_OR_RETURN(bool changed1,
VariadicReductionLayoutEqualizer{}.RunOnModule(
module, execution_threads));
TF_ASSIGN_OR_RETURN(
bool changed2,
ReduceDecomposerVisitor{custom_layout_allowed_}.RunOnModule(
module, execution_threads));
return changed1 || changed2;
}
} | #include "xla/service/reduce_decomposer.h"
#include <functional>
#include <memory>
#include <optional>
#include "xla/service/hlo_parser.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class ReduceDecomposerTest : public HloTestBase {};
TEST_F(ReduceDecomposerTest, ReducePerformsTransposition) {
const char* hlo = R"(
HloModule module
add {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT out = add(a, b)
}
ENTRY c {
p = f32[5,3,4]{2,1,0} parameter(0)
z = f32[] constant(0)
ROOT r = f32[5,4]{0,1} reduce(p, z), dimensions={1}, to_apply=add
}
)";
RunAndFilecheckHloRewrite(
hlo,
ReduceDecomposer{[&](const HloInstruction*) {
return true;
}},
std::nullopt);
RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{},
R"(
)");
}
TEST_F(ReduceDecomposerTest, ReduceNaturalLayout) {
const char* hlo = R"(
HloModule module
add {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT out = add(a, b)
}
ENTRY c {
p = f32[5,3,4]{2,1,0} parameter(0)
z = f32[] constant(0)
ROOT r = reduce(p, z), dimensions={1}, to_apply=add
}
)";
RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{}, std::nullopt);
}
TEST_F(ReduceDecomposerTest, VariadicReductionWithTranspose) {
const char* hlo = R"(
HloModule ReduceWithLayoutChangeVariadicDifferent
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
arg0 = f32[2,3,4,1024]{3,2,1,0} parameter(0)
idxs = u32[2,3,4,1024]{3,2,1,0} parameter(1)
constant0 = f32[] constant(0)
constant1 = u32[] constant(0)
ROOT reduce0 = (
f32[2,3,4]{0,1,2},
u32[2,3,4]{0,1,2}
) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax
}
)";
RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{},
R"(
)");
}
TEST_F(ReduceDecomposerTest, VariadicReductionDescendingLayout) {
const char* hlo = R"(
HloModule ReduceWithLayoutChangeVariadicDifferent
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
arg0 = f32[2,3,4,1024]{3,2,1,0} parameter(0)
idxs = u32[2,3,4,1024]{3,2,1,0} parameter(1)
constant0 = f32[] constant(0)
constant1 = u32[] constant(0)
ROOT reduce0 = (
f32[2,3,4]{2,1,0},
u32[2,3,4]{2,1,0}
) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax
}
)";
RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{}, std::nullopt);
}
TEST_F(ReduceDecomposerTest, VariadicReductionInputsDifferentLayout) {
const char* hlo = R"(
HloModule ReduceWithLayoutChangeVariadicDifferent
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
arg0 = f32[2,3,4,1024]{3,2,1,0} parameter(0)
idxs = u32[2,3,4,1024]{2,1,3,0} parameter(1)
constant0 = f32[] constant(0)
constant1 = u32[] constant(0)
ROOT reduce0 = (
f32[2,3,4]{2,1,0},
u32[2,3,4]{2,1,0}
) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax
}
)";
RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{}, R"(
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_decomposer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_decomposer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f470b0a5-df2e-44f5-a4bb-da606c6ec291 | cpp | tensorflow/tensorflow | dynamic_index_splitter | third_party/xla/xla/service/dynamic_index_splitter.cc | third_party/xla/xla/service/dynamic_index_splitter_test.cc | #include "xla/service/dynamic_index_splitter.h"
#include <map>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
namespace xla {
absl::StatusOr<bool> DynamicIndexSplitter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
std::vector<HloComputation*> computations =
module->MakeNonfusionComputations(execution_threads);
for (HloComputation* computation : computations) {
for (HloInstruction* dynamic_op : computation->MakeInstructionPostOrder()) {
switch (dynamic_op->opcode()) {
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
break;
default:
continue;
}
auto parent = dynamic_op->parent();
bool is_update = dynamic_op->opcode() == HloOpcode::kDynamicUpdateSlice;
int64_t num_indices = dynamic_op->operand(0)->shape().rank();
if (num_indices == 0) {
if (is_update) {
TF_CHECK_OK(parent->ReplaceInstruction(
dynamic_op, dynamic_op->mutable_operand(1)));
} else {
TF_CHECK_OK(parent->ReplaceInstruction(
dynamic_op, dynamic_op->mutable_operand(0)));
}
changed = true;
continue;
}
int64_t index_operand_number =
Cast<HloDynamicIndexInstruction>(dynamic_op)
->first_index_operand_number();
auto index_operand = dynamic_op->mutable_operand(index_operand_number);
if (ShapeUtil::IsScalar(index_operand->shape())) {
continue;
}
TF_RET_CHECK(index_operand->shape().rank() == 1);
auto index_element_type = index_operand->shape().element_type();
std::vector<HloInstruction*> index_array;
index_array.reserve(num_indices);
for (int64_t dim = 0; dim < num_indices; ++dim) {
auto slice = parent->AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(index_element_type, {1}), index_operand, {dim},
{dim + 1}, {1}));
auto bitcast = parent->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(index_element_type, {}), slice));
index_array.push_back(bitcast);
}
auto new_dynamic_op =
is_update
? HloInstruction::CreateDynamicUpdateSlice(
dynamic_op->shape(), dynamic_op->mutable_operand(0),
dynamic_op->mutable_operand(1), absl::MakeSpan(index_array))
: HloInstruction::CreateDynamicSlice(
dynamic_op->shape(), dynamic_op->mutable_operand(0),
absl::MakeSpan(index_array),
dynamic_op->dynamic_slice_sizes());
TF_CHECK_OK(parent->ReplaceWithNewInstruction(dynamic_op,
std::move(new_dynamic_op)));
changed = true;
}
}
return changed;
}
} | #include "xla/service/dynamic_index_splitter.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class DynamicIndexSplitterTest : public HloTestBase {};
TEST_F(DynamicIndexSplitterTest, DynamicSlice) {
const char* const kDynamicSlice = R"(
HloModule DynamicSlice_module
ENTRY entry (operand: s32[4,5,6], indices: s32[3]) -> s32[1,1,1] {
operand = s32[4,5,6] parameter(0)
indices = s32[3] parameter(1)
ROOT dynamic-slice = s32[1,1,1] dynamic-slice(operand, indices), dynamic_slice_sizes={1,1,1}
}
)";
HloModuleConfig config;
DebugOptions debug_options = config.debug_options();
debug_options.set_xla_allow_scalar_index_dynamic_ops(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kDynamicSlice, config));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
DynamicIndexSplitter().Run(module.get()));
EXPECT_TRUE(changed);
ASSERT_THAT(module->entry_computation()->root_instruction(),
op::DynamicSlice(op::Parameter(0),
op::Reshape(op::Slice(op::Parameter(1))),
op::Reshape(op::Slice(op::Parameter(1))),
op::Reshape(op::Slice(op::Parameter(1)))));
for (int i = 0; i < 3; ++i) {
const HloInstruction* slice = module->entry_computation()
->root_instruction()
->operand(i + 1)
->operand(0);
EXPECT_EQ(slice->slice_starts(0), i);
EXPECT_EQ(slice->slice_limits(0), i + 1);
}
}
TEST_F(DynamicIndexSplitterTest, DynamicUpdateSlice) {
const char* const kDynamicUpdateSlice = R"(
HloModule DynamicUpdatedSlice_module
ENTRY entry (operand: s32[4,5,6], indices: s32[3], update: s32[1,1,1]) -> s32[4,5,6] {
operand = s32[4,5,6] parameter(0)
indices = s32[3] parameter(1)
update = s32[1,1,1] parameter(2)
ROOT dynamic-update-slice = s32[4,5,6] dynamic-update-slice(operand, update, indices)
}
)";
HloModuleConfig config;
DebugOptions debug_options = config.debug_options();
debug_options.set_xla_allow_scalar_index_dynamic_ops(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(kDynamicUpdateSlice, config));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
DynamicIndexSplitter().Run(module.get()));
EXPECT_TRUE(changed);
ASSERT_THAT(module->entry_computation()->root_instruction(),
op::DynamicUpdateSlice(op::Parameter(0), op::Parameter(2),
op::Reshape(op::Slice(op::Parameter(1))),
op::Reshape(op::Slice(op::Parameter(1))),
op::Reshape(op::Slice(op::Parameter(1)))));
for (int i = 0; i < 3; ++i) {
const HloInstruction* slice = module->entry_computation()
->root_instruction()
->operand(i + 2)
->operand(0);
EXPECT_EQ(slice->slice_starts(0), i);
EXPECT_EQ(slice->slice_limits(0), i + 1);
}
}
TEST_F(DynamicIndexSplitterTest, AlreadyScalar) {
const char* const kDynamicSlice = R"(
HloModule DynamicSlice_module
ENTRY entry (operand: s32[4,5,6], index.0: s32[], index.1: s32[], index.2: s32[]) -> s32[1,1,1] {
operand = s32[4,5,6] parameter(0)
index.0 = s32[] parameter(1)
index.1 = s32[] parameter(2)
index.2 = s32[] parameter(3)
ROOT dynamic-slice = s32[1,1,1] dynamic-slice(operand, index.0, index.1, index.2), dynamic_slice_sizes={1,1,1}
}
)";
HloModuleConfig config;
DebugOptions debug_options = config.debug_options();
debug_options.set_xla_allow_scalar_index_dynamic_ops(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kDynamicSlice, config));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
DynamicIndexSplitter().Run(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::DynamicSlice(op::Parameter(0), op::Parameter(1),
op::Parameter(2), op::Parameter(3)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_index_splitter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_index_splitter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d759e3dd-49f9-433a-a301-b7317ffa4f80 | cpp | tensorflow/tensorflow | root_instruction_sinker | third_party/xla/xla/service/root_instruction_sinker.cc | third_party/xla/xla/service/root_instruction_sinker_test.cc | #include "xla/service/root_instruction_sinker.h"
#include "xla/service/tuple_util.h"
namespace xla {
namespace {
void SinkTupleRoot(HloComputation* computation) {
HloInstruction* root = computation->root_instruction();
CHECK(root->shape().IsTuple());
HloInstruction* new_root = TupleUtil::Duplicate(root);
HloInstructionSequence& sequence =
computation->parent()->schedule().GetOrCreateSequence(computation);
for (HloInstruction* operand : new_root->operands()) {
sequence.push_back(operand);
}
sequence.push_back(new_root);
computation->set_root_instruction(new_root);
}
void SinkNontupleRoot(HloComputation* computation) {
HloInstruction* root = computation->root_instruction();
CHECK(!root->shape().IsTuple());
HloInstruction* new_root = computation->AddInstruction(
HloInstruction::CreateBitcast(root->shape(), root));
HloInstructionSequence& sequence =
computation->parent()->schedule().GetOrCreateSequence(computation);
sequence.push_back(new_root);
computation->set_root_instruction(new_root);
}
}
absl::StatusOr<bool> RootInstructionSinker::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_RET_CHECK(module->has_schedule());
bool modified = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
HloInstructionSequence& sequence =
module->schedule().GetOrCreateSequence(computation);
if (computation->root_instruction() ==
sequence.instructions().at(sequence.size() - 1)) {
continue;
}
if (computation->root_instruction()->shape().IsTuple()) {
SinkTupleRoot(computation);
} else {
SinkNontupleRoot(computation);
}
modified = true;
}
return modified;
}
} | #include "xla/service/root_instruction_sinker.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using RootInstructionSinkerTest = HloTestBase;
TEST_F(RootInstructionSinkerTest, TupleNoChange) {
absl::string_view hlo_string = R"(
HloModule While, is_scheduled=true
While.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
While.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(100)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY While {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=
While.condition, body=While.body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto while_body =
module->entry_computation()->root_instruction()->while_body();
int num_body_instructions = while_body->instruction_count();
RootInstructionSinker sinker;
EXPECT_FALSE(sinker.Run(module.get()).value());
EXPECT_EQ(module->entry_computation()
->root_instruction()
->while_body()
->instruction_count(),
num_body_instructions);
}
TEST_F(RootInstructionSinkerTest, Tuple) {
absl::string_view hlo_string = R"(
HloModule While, is_scheduled=true
While.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
after-all = token[] after-all()
send = (s32[3]{0}, u32[], token[]) send(multiply, after-all), channel_id=1
send-done = token[] send-done(send), channel_id=1
}
While.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(100)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY While {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=
While.condition, body=While.body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
RootInstructionSinker sinker;
EXPECT_TRUE(sinker.Run(module.get()).value());
auto while_body =
module->entry_computation()->root_instruction()->while_body();
const auto& sequence = module->schedule().sequence(while_body);
EXPECT_EQ(sequence.instructions().at(sequence.size() - 1),
while_body->root_instruction());
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::GetTupleElement(op::Tuple()),
op::GetTupleElement(op::Tuple())));
}
TEST_F(RootInstructionSinkerTest, NontupleNoChange) {
absl::string_view hlo_string = R"(
HloModule Call, is_scheduled=true
Call {
param = s32[3]{0} parameter(0)
ROOT multiply = s32[3]{0} multiply(param, param)
}
ENTRY While {
constant.4 = s32[3]{0} constant({0, 1, 2})
ROOT call = s32[3]{0} call(constant.4), to_apply=Call
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto called_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
int num_instructions = called_computation->instruction_count();
RootInstructionSinker sinker;
EXPECT_FALSE(sinker.Run(module.get()).value());
EXPECT_EQ(module->entry_computation()
->root_instruction()
->called_computations()[0]
->instruction_count(),
num_instructions);
}
TEST_F(RootInstructionSinkerTest, Nontuple) {
absl::string_view hlo_string = R"(
HloModule Call, is_scheduled=true
Call {
param = s32[3]{0} parameter(0)
ROOT multiply = s32[3]{0} multiply(param, param)
after-all = token[] after-all()
send = (s32[3]{0}, u32[], token[]) send(multiply, after-all), channel_id=1
send-done = token[] send-done(send), channel_id=1
}
ENTRY While {
constant.4 = s32[3]{0} constant({0, 1, 2})
ROOT call = s32[3]{0} call(constant.4), to_apply=Call
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
RootInstructionSinker sinker;
EXPECT_TRUE(sinker.Run(module.get()).value());
auto called_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const auto& sequence = module->schedule().sequence(called_computation);
EXPECT_EQ(sequence.instructions().at(sequence.size() - 1),
called_computation->root_instruction());
EXPECT_THAT(called_computation->root_instruction(),
op::Bitcast(op::Multiply()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/root_instruction_sinker.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/root_instruction_sinker_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5c54fd05-5e24-4353-98e8-4a51907c7f2c | cpp | tensorflow/tensorflow | dot_decomposer | third_party/xla/xla/service/dot_decomposer.cc | third_party/xla/xla/service/dot_decomposer_test.cc | #include "xla/service/dot_decomposer.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::Status CanonicalizeDot(HloDotInstruction* original_dot) {
auto computation = original_dot->parent();
const auto& original_dnums = original_dot->dot_dimension_numbers();
const int64_t num_batch_dims = original_dnums.lhs_batch_dimensions_size();
const int64_t num_contracting_dims =
original_dnums.lhs_contracting_dimensions_size();
int lhs_sparse_dim = -1, rhs_sparse_dim = -1;
for (const SparsityDescriptor& descriptor : original_dot->sparsity()) {
(descriptor.index() == 0 ? lhs_sparse_dim : rhs_sparse_dim) =
descriptor.dimension();
}
auto move_dim_to_end = [&](std::vector<int64_t>& dims, int sparse_dim) {
if (sparse_dim < 0) return;
auto it = std::remove(dims.begin(), dims.end(), sparse_dim);
*it = sparse_dim;
};
const auto& lhs_shape = original_dot->operand(0)->shape();
const int64_t lhs_rank = lhs_shape.rank();
const int64_t num_lhs_non_contracting_dims =
lhs_rank - num_batch_dims - num_contracting_dims;
std::vector<int64_t> lhs_non_contracting_dims;
lhs_non_contracting_dims.reserve(num_lhs_non_contracting_dims);
int64_t lhs_contracting_size = 1;
bool lhs_contracting_dynamic = false;
int64_t lhs_non_contracting_size = 1;
bool lhs_non_contracting_dynamic = false;
std::vector<int64_t> batch_dim_sizes;
batch_dim_sizes.reserve(num_batch_dims);
std::vector<bool> batch_dynamic_dims;
batch_dynamic_dims.reserve(num_batch_dims);
for (int64_t i = 0; i < lhs_rank; ++i) {
if (absl::c_linear_search(original_dnums.lhs_contracting_dimensions(), i)) {
lhs_contracting_size *= lhs_shape.dimensions(i);
lhs_contracting_dynamic |= lhs_shape.is_dynamic_dimension(i);
} else if (absl::c_linear_search(original_dnums.lhs_batch_dimensions(),
i)) {
batch_dim_sizes.push_back(lhs_shape.dimensions(i));
batch_dynamic_dims.push_back(lhs_shape.is_dynamic_dimension(i));
} else {
lhs_non_contracting_dims.push_back(i);
lhs_non_contracting_size *= lhs_shape.dimensions(i);
lhs_non_contracting_dynamic |= lhs_shape.is_dynamic_dimension(i);
}
}
std::vector<int64_t> lhs_transpose;
lhs_transpose.reserve(lhs_rank);
lhs_transpose.insert(lhs_transpose.end(),
original_dnums.lhs_batch_dimensions().begin(),
original_dnums.lhs_batch_dimensions().end());
lhs_transpose.insert(lhs_transpose.end(), lhs_non_contracting_dims.begin(),
lhs_non_contracting_dims.end());
lhs_transpose.insert(lhs_transpose.end(),
original_dnums.lhs_contracting_dimensions().begin(),
original_dnums.lhs_contracting_dimensions().end());
move_dim_to_end(lhs_transpose, lhs_sparse_dim);
HloInstruction* lhs_operand = original_dot->mutable_operand(0);
HloInstruction* transposed_lhs = computation->AddInstruction(
HloInstruction::CreateTranspose(
ShapeUtil::PermuteDimensions(lhs_transpose, lhs_shape), lhs_operand,
lhs_transpose),
&lhs_operand->metadata());
std::vector<int64_t> lhs_reshape_dims = batch_dim_sizes;
std::vector<bool> lhs_reshape_dynamic_dims = batch_dynamic_dims;
if (lhs_non_contracting_size > 1) {
lhs_reshape_dims.push_back(lhs_non_contracting_size);
lhs_reshape_dynamic_dims.push_back(lhs_non_contracting_dynamic);
}
lhs_reshape_dims.push_back(lhs_contracting_size);
lhs_reshape_dynamic_dims.push_back(lhs_contracting_dynamic);
HloInstruction* reshaped_lhs = computation->AddInstruction(
HloInstruction::CreateReshape(
ShapeUtil::MakeShape(lhs_shape.element_type(), lhs_reshape_dims,
lhs_reshape_dynamic_dims),
transposed_lhs),
&transposed_lhs->metadata());
const auto& rhs_shape = original_dot->operand(1)->shape();
const int64_t rhs_rank = rhs_shape.rank();
const int64_t num_rhs_non_contracting_dims =
rhs_rank - num_batch_dims - num_contracting_dims;
std::vector<int64_t> rhs_non_contracting_dims;
rhs_non_contracting_dims.reserve(num_rhs_non_contracting_dims);
int64_t rhs_non_contracting_size = 1;
bool rhs_non_contracting_dynamic = false;
int64_t rhs_contracting_size = 1;
bool rhs_contracting_dynamic = false;
for (int64_t i = 0; i < rhs_rank; ++i) {
if (absl::c_linear_search(original_dnums.rhs_contracting_dimensions(), i)) {
rhs_contracting_size *= rhs_shape.dimensions(i);
rhs_contracting_dynamic |= rhs_shape.is_dynamic_dimension(i);
} else if (!absl::c_linear_search(original_dnums.rhs_batch_dimensions(),
i)) {
rhs_non_contracting_dims.push_back(i);
rhs_non_contracting_size *= rhs_shape.dimensions(i);
rhs_non_contracting_dynamic |= rhs_shape.is_dynamic_dimension(i);
}
}
std::vector<int64_t> rhs_transpose;
rhs_transpose.reserve(rhs_rank);
rhs_transpose.insert(rhs_transpose.end(),
original_dnums.rhs_batch_dimensions().begin(),
original_dnums.rhs_batch_dimensions().end());
rhs_transpose.insert(rhs_transpose.end(),
original_dnums.rhs_contracting_dimensions().begin(),
original_dnums.rhs_contracting_dimensions().end());
move_dim_to_end(rhs_transpose, rhs_sparse_dim);
rhs_transpose.insert(rhs_transpose.end(), rhs_non_contracting_dims.begin(),
rhs_non_contracting_dims.end());
HloInstruction* rhs_operand = original_dot->mutable_operand(1);
HloInstruction* transposed_rhs = computation->AddInstruction(
HloInstruction::CreateTranspose(
ShapeUtil::PermuteDimensions(rhs_transpose, rhs_shape), rhs_operand,
rhs_transpose),
&rhs_operand->metadata());
std::vector<int64_t> rhs_reshape_dims = batch_dim_sizes;
rhs_reshape_dims.push_back(rhs_contracting_size);
std::vector<bool> rhs_reshape_dynamic_dims = batch_dynamic_dims;
rhs_reshape_dynamic_dims.push_back(rhs_contracting_dynamic);
if (rhs_non_contracting_size > 1) {
rhs_reshape_dims.push_back(rhs_non_contracting_size);
rhs_reshape_dynamic_dims.push_back(rhs_non_contracting_dynamic);
}
HloInstruction* reshaped_rhs = computation->AddInstruction(
HloInstruction::CreateReshape(
ShapeUtil::MakeShape(rhs_shape.element_type(), rhs_reshape_dims,
rhs_reshape_dynamic_dims),
transposed_rhs),
&transposed_rhs->metadata());
std::vector<int64_t> dot_dims = batch_dim_sizes;
std::vector<bool> dot_dynamic_dims = batch_dynamic_dims;
if (lhs_non_contracting_size > 1) {
dot_dims.push_back(lhs_non_contracting_size);
dot_dynamic_dims.push_back(lhs_non_contracting_dynamic);
}
if (rhs_non_contracting_size > 1) {
dot_dims.push_back(rhs_non_contracting_size);
dot_dynamic_dims.push_back(rhs_non_contracting_dynamic);
}
DotDimensionNumbers dot_dnums;
for (int64_t i = 0; i < num_batch_dims; ++i) {
dot_dnums.add_lhs_batch_dimensions(i);
dot_dnums.add_rhs_batch_dimensions(i);
}
dot_dnums.add_lhs_contracting_dimensions(
num_batch_dims + (lhs_non_contracting_size > 1 ? 1 : 0));
dot_dnums.add_rhs_contracting_dimensions(num_batch_dims);
std::vector<SparsityDescriptor> sparsity;
std::vector<HloInstruction*> sparse_meta;
sparsity.reserve(original_dot->sparse_operands());
sparse_meta.reserve(original_dot->sparse_operands());
auto transpose_meta = [&](HloInstruction* original_meta,
absl::Span<const int64_t> transpose) {
return computation->AddInstruction(
HloInstruction::CreateTranspose(
ShapeUtil::PermuteDimensions(transpose, original_meta->shape()),
original_meta, transpose),
&original_meta->metadata());
};
for (int i = 0; i < original_dot->sparse_operands(); ++i) {
SparsityDescriptor descriptor = original_dot->sparsity()[i];
descriptor.set_dimension(num_batch_dims + (descriptor.index() == 0 &&
lhs_non_contracting_size > 1));
sparsity.push_back(descriptor);
HloInstruction* meta =
original_dot->mutable_operand(HloDotInstruction::kOperands + i);
HloInstruction* meta_operand;
if (descriptor.index() == 0) {
meta = transpose_meta(meta, lhs_transpose);
meta_operand = reshaped_lhs;
} else {
meta = transpose_meta(meta, rhs_transpose);
meta_operand = reshaped_rhs;
}
TF_ASSIGN_OR_RETURN(Shape result_shape,
ShapeInference::InferSparseDotMetadataShape(
meta_operand->shape(), dot_dnums, descriptor));
meta = computation->AddInstruction(
HloInstruction::CreateReshape(result_shape, meta), &meta->metadata());
sparse_meta.push_back(meta);
}
HloInstruction* dot = computation->AddInstruction(HloInstruction::CreateDot(
ShapeUtil::MakeShape(original_dot->shape().element_type(), dot_dims,
dot_dynamic_dims),
reshaped_lhs, reshaped_rhs, dot_dnums, original_dot->precision_config(),
sparsity, sparse_meta));
original_dot->SetupDerivedInstruction(dot);
std::unique_ptr<HloInstruction> replacement =
HloInstruction::CreateReshape(original_dot->shape(), dot);
VLOG(3) << "Canonicalizing dot:\n"
<< "\t old: " << original_dot->ToString() << "\n"
<< "\t new: " << dot->ToString() << "\n"
<< "\t -> " << replacement->ToString();
return computation->ReplaceWithNewInstruction(original_dot,
std::move(replacement));
}
}
absl::StatusOr<bool> DotDecomposer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::vector<HloInstruction*> non_canonical_dots;
for (auto* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kDot) {
continue;
}
const DotDimensionNumbers& dnums = instruction->dot_dimension_numbers();
if (dnums.lhs_contracting_dimensions_size() != 1) {
non_canonical_dots.push_back(instruction);
continue;
}
if (dnums.lhs_batch_dimensions_size() + 2 <
instruction->operand(0)->shape().rank() ||
dnums.rhs_batch_dimensions_size() + 2 <
instruction->operand(1)->shape().rank()) {
non_canonical_dots.push_back(instruction);
continue;
}
if (dnums.lhs_batch_dimensions().empty() &&
dnums.lhs_contracting_dimensions().empty()) {
non_canonical_dots.push_back(instruction);
continue;
}
std::vector<int64_t> canonical_batch_dims(
dnums.lhs_batch_dimensions_size());
absl::c_iota(canonical_batch_dims, 0);
if (!absl::c_equal(dnums.lhs_batch_dimensions(), canonical_batch_dims) ||
!absl::c_equal(dnums.rhs_batch_dimensions(), canonical_batch_dims)) {
non_canonical_dots.push_back(instruction);
}
}
}
bool changed = false;
for (auto* dot : non_canonical_dots) {
TF_RETURN_IF_ERROR(CanonicalizeDot(Cast<HloDotInstruction>(dot)));
changed = true;
}
return changed;
}
} | #include "xla/service/dot_decomposer.h"
#include <memory>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
namespace m = ::xla::match;
namespace op = ::xla::testing::opcode_matchers;
using DotDecomposerTest = HloTestBase;
TEST_F(DotDecomposerTest, CanonicalizeMultipleNonContractingDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = f32[64,63,512]{2,1,0} parameter(0)
p1 = f32[512,512]{1,0} parameter(1)
ROOT dot = f32[64,63,512]{2,1,0} dot(p0, p1), lhs_contracting_dims={2},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,
DotDecomposer().Run(module.get()));
EXPECT_TRUE(canonicalized);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Reshape(AllOf(op::Dot(op::Reshape(), op::Reshape(),
1,
0),
op::Shape("f32[4032,512]"))));
}
TEST_F(DotDecomposerTest, DontCanonicalizeIfNoNoncontractingDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = f32[64,4]{1,0} parameter(0)
p1 = f32[64,4]{1,0} parameter(1)
ROOT dot = f32[64]{0} dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1},
rhs_batch_dims={0},
rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,
DotDecomposer().Run(module.get()));
EXPECT_FALSE(canonicalized);
}
TEST_F(DotDecomposerTest, DontAddLhsNonContractingDimIfOne) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = f32[64,4]{1,0} parameter(0)
p1 = f32[64,4,2,1]{3,2,1,0} parameter(1)
ROOT dot = f32[64,2,1]{2,1,0} dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1},
rhs_batch_dims={0},
rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,
DotDecomposer().Run(module.get()));
EXPECT_TRUE(canonicalized);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Reshape(AllOf(op::Dot(op::Reshape(), op::Reshape(),
1,
1),
op::Shape("f32[64,2]"))));
}
TEST_F(DotDecomposerTest, DontAddRhsNonContractingDimIfOne) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = f32[64,4,2,1]{3,2,1,0} parameter(0)
p1 = f32[64,4]{1,0} parameter(1)
ROOT dot = f32[64,2,1]{2,1,0} dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1},
rhs_batch_dims={0},
rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,
DotDecomposer().Run(module.get()));
EXPECT_TRUE(canonicalized);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Reshape(AllOf(op::Dot(op::Reshape(), op::Reshape(),
2,
1),
op::Shape("f32[64,2]"))));
}
template <typename Arg0, typename Arg1, typename Arg2>
auto SparseDotMatcher(Arg0&& arg0, Arg1&& arg1, Arg2&& arg2) {
return match::Op()
.WithOpcode(HloOpcode::kDot)
.WithOperand(0, std::forward<Arg0>(arg0))
.WithOperand(1, std::forward<Arg1>(arg1))
.WithOperand(2, std::forward<Arg2>(arg2));
}
TEST_F(DotDecomposerTest, CanonicalizeSparseLhs) {
absl::string_view kHlo = R"(
HloModule module
ENTRY main {
lhs = f32[16,4,3,7] parameter(0)
rhs = f32[32,4,5,7] parameter(1)
meta = u16[2,4,3,7] parameter(2)
ROOT dot = f32[7,3,5] dot(lhs, rhs, meta), sparsity=L.0@2:4,
lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1},
lhs_batch_dims={3}, rhs_batch_dims={3}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHlo));
TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,
DotDecomposer().Run(module.get()));
EXPECT_TRUE(canonicalized);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Reshape(SparseDotMatcher(
m::Reshape(m::Transpose(m::Parameter(0))),
m::Reshape(m::Transpose(m::Parameter(1))),
m::Reshape(m::Transpose(m::Parameter(2)))))));
auto dot = Cast<HloDotInstruction>(root->operand(0));
auto descriptor = dot->sparsity().front();
EXPECT_EQ(descriptor.index(), 0);
EXPECT_EQ(descriptor.dimension(), 2);
}
TEST_F(DotDecomposerTest, CanonicalizeSparseRhs) {
absl::string_view kHlo = R"(
HloModule module
ENTRY main {
lhs = f32[32,4,3,7] parameter(0)
rhs = f32[16,4,5,7] parameter(1)
meta = u16[2,4,5,7] parameter(2)
ROOT dot = f32[7,3,5] dot(lhs, rhs, meta), sparsity=R.0@2:4,
lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1},
lhs_batch_dims={3}, rhs_batch_dims={3}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHlo));
TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,
DotDecomposer().Run(module.get()));
EXPECT_TRUE(canonicalized);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Reshape(SparseDotMatcher(
m::Reshape(m::Transpose(m::Parameter(0))),
m::Reshape(m::Transpose(m::Parameter(1))),
m::Reshape(m::Transpose(m::Parameter(2)))))));
auto dot = Cast<HloDotInstruction>(root->operand(0));
auto descriptor = dot->sparsity().front();
EXPECT_EQ(descriptor.index(), 1);
EXPECT_EQ(descriptor.dimension(), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dot_decomposer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dot_decomposer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8a31a24b-fd06-41cc-bf29-c12ce667dce2 | cpp | tensorflow/tensorflow | hlo_constant_folding | third_party/xla/xla/service/hlo_constant_folding.cc | third_party/xla/xla/service/hlo_constant_folding_test.cc | #include "xla/service/hlo_constant_folding.h"
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/service/slow_operation_alarm.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
namespace xla {
static bool IsOrContainsIllegalInstr(const HloInstruction* instr) {
if (instr->opcode() == HloOpcode::kAfterAll ||
instr->opcode() == HloOpcode::kRng) {
return true;
}
for (const HloComputation* c : instr->called_computations()) {
if (absl::c_any_of(c->instructions(), IsOrContainsIllegalInstr)) {
return true;
}
}
return false;
}
std::atomic<int64_t> HloConstantFolding::slow_op_counter_{0};
absl::StatusOr<bool> HloConstantFolding::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
auto evaluator = std::make_unique<HloEvaluator>(0);
evaluator->set_use_fast_path(true);
std::vector<HloInstruction*> dead_instructions;
for (auto* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (auto* instruction : computation->MakeInstructionPostOrder()) {
if (instruction->IsDead()) {
continue;
}
if (!absl::c_any_of(instruction->operands(),
HloPredicateIsOp<HloOpcode::kConstant>) ||
!absl::c_all_of(
instruction->operands(), [](const HloInstruction* operand) {
return operand->opcode() == HloOpcode::kConstant ||
(operand->opcode() == HloOpcode::kBroadcast &&
operand->operand(0)->opcode() == HloOpcode::kConstant);
})) {
continue;
}
if (instruction->opcode() == HloOpcode::kParameter ||
instruction->opcode() == HloOpcode::kConstant ||
instruction->opcode() == HloOpcode::kTuple) {
continue;
}
if (instruction->opcode() == HloOpcode::kBroadcast ||
instruction->opcode() == HloOpcode::kIota) {
continue;
}
if (instruction->IsAsynchronous() &&
instruction->async_execution_thread() !=
instruction->parent()->execution_thread()) {
continue;
}
if (instruction->opcode() == HloOpcode::kFft) {
continue;
}
if (IsOrContainsIllegalInstr(instruction)) {
continue;
}
if (instruction->HasSideEffect()) {
continue;
}
if (instruction->opcode() == HloOpcode::kPad &&
instruction->operand(0)->opcode() == HloOpcode::kBroadcast &&
instruction->operand(1)->opcode() == HloOpcode::kConstant) {
continue;
}
if (instruction->shape().IsArray()) {
int64_t elements_in_operands = 0;
for (HloInstruction* operand : instruction->operands()) {
if (operand->shape().IsArray()) {
elements_in_operands += ShapeUtil::ElementsIn(operand->shape());
}
}
int64_t elements_in_constant =
ShapeUtil::ElementsIn(instruction->shape());
static const int64_t kMaximumConstantSizeElements = 45 * 1000 * 1000;
if (std::max(elements_in_constant, elements_in_operands) >
kMaximumConstantSizeElements) {
VLOG(2) << "Ignore constant folding: result shape size is "
<< elements_in_constant << " total size of arguments is "
<< elements_in_operands;
continue;
}
}
VLOG(5) << "Constant folding: " << instruction->ToString();
absl::Duration slow_timeout =
absl::Seconds(uint64_t{1} << slow_op_counter_.load());
SlowOperationAlarm slow_alarm(slow_timeout, [instruction, slow_timeout] {
const bool ndebug =
#if NDEBUG
true;
#else
false;
#endif
absl::string_view explanation_msg =
ndebug
? "This isn't necessarily a bug; constant-folding is "
"inherently a trade-off between compilation time and speed "
"at runtime. XLA has some guards that attempt to keep "
"constant folding from taking too long, but fundamentally "
"you'll always be able to come up with an input program that "
"takes a long time.\n\n"
"If you'd like to file a bug, run with envvar "
"XLA_FLAGS=--xla_dump_to=/tmp/foo and attach the results."
: "XLA was built without compiler optimizations, which can be "
"slow. Try rebuilding with -c opt.";
return absl::StrFormat(
"Constant folding an instruction is taking > %s:\n\n"
" %s\n\n"
"%s",
absl::FormatDuration(slow_timeout), instruction->ToString(),
explanation_msg);
});
Literal result;
if (!evaluator->TryEvaluate(
instruction, &result,
true)) {
VLOG(2) << "Constant folding failed for instruction: "
<< instruction->ToString();
continue;
}
slow_alarm.cancel();
if (slow_alarm.fired()) {
slow_op_counter_++;
}
VLOG(4) << "Constant folded: " << instruction->ToString();
dead_instructions.push_back(instruction);
HloInstruction* new_constant = instruction->AddInstruction(
HloInstruction::CreateConstant(std::move(result)));
if (new_constant->shape().has_layout()) {
new_constant->mutable_shape()
->mutable_layout()
->set_element_size_in_bits(
instruction->shape().layout().element_size_in_bits());
}
TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(new_constant));
}
}
const bool changed = !dead_instructions.empty();
for (HloInstruction* dead_instruction : dead_instructions) {
CHECK(dead_instruction->IsDead());
HloComputation* computation = dead_instruction->parent();
TF_RETURN_IF_ERROR(computation->RemoveInstruction(dead_instruction));
}
return changed;
}
} | #include "xla/service/hlo_constant_folding.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/permutation_util.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
namespace m = xla::match;
using HloConstantFoldingTest = HloTestBase;
TEST_F(HloConstantFoldingTest, ConvertF32ToS64) {
HloComputation::Builder builder(TestName());
HloInstruction* input = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
builder.AddInstruction(
HloInstruction::CreateConvert(ShapeUtil::MakeShape(S64, {}), input));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Convert().WithOperand(0, m::Op().Is(input))));
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));
EXPECT_TRUE(result);
EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Constant()));
EXPECT_EQ(
computation->root_instruction()->literal().GetFirstElement<int64_t>(),
42);
}
TEST_F(HloConstantFoldingTest, ConvertS64ToF32) {
HloComputation::Builder builder(TestName());
HloInstruction* input = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int64_t>(42)));
builder.AddInstruction(
HloInstruction::CreateConvert(ShapeUtil::MakeShape(F32, {}), input));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Convert().WithOperand(0, m::Op().Is(input))));
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));
EXPECT_TRUE(result);
EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Constant()));
EXPECT_EQ(computation->root_instruction()->literal().GetFirstElement<float>(),
42.0f);
}
TEST_F(HloConstantFoldingTest, ConvertF32ArrayToS64Array) {
HloComputation::Builder builder(TestName());
HloInstruction* input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({42.0f, 19.0f})));
builder.AddInstruction(
HloInstruction::CreateConvert(ShapeUtil::MakeShape(S64, {2}), input));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Convert().WithOperand(0, m::Op().Is(input))));
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));
EXPECT_TRUE(result);
EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Constant()));
EXPECT_EQ(computation->root_instruction()->literal().Get<int64_t>({0}), 42);
EXPECT_EQ(computation->root_instruction()->literal().Get<int64_t>({1}), 19);
}
TEST_F(HloConstantFoldingTest, Concatenate) {
const struct TestConfig {
int concat_dimension;
std::vector<int64_t> dimensions;
std::vector<int64_t> concat_sizes;
} test_configs[] = {
{1, {11, 0, 7, 5, 9}, {2, 5, 7, 11}},
{3, {1, 4, 17, 0, 8}, {1, 3, 9, 12}},
};
for (auto& test_config : test_configs) {
HloComputation::Builder builder(TestName());
std::vector<int64_t> dimensions(test_config.dimensions.begin(),
test_config.dimensions.end());
int64_t concat_size = 0;
std::vector<HloInstruction*> operands;
for (auto csize : test_config.concat_sizes) {
dimensions[test_config.concat_dimension] = csize;
concat_size += csize;
auto literal = LiteralUtil::CreateFromDimensions(F32, dimensions);
HloInstruction* insn = builder.AddInstruction(
HloInstruction::CreateConstant(std::move(literal)));
operands.push_back(insn);
}
dimensions[test_config.concat_dimension] = concat_size;
Shape shape = ShapeUtil::MakeShape(F32, dimensions);
builder.AddInstruction(HloInstruction::CreateConcatenate(
shape, operands, test_config.concat_dimension));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));
EXPECT_TRUE(result);
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Constant()));
EXPECT_TRUE(ShapeUtil::Equal(root->shape(), shape));
}
}
TEST_F(HloConstantFoldingTest, Slice) {
HloComputation::Builder builder(TestName());
const int64_t dimensions[] = {11, 8, 7, 5, 9};
const int64_t slice_start[] = {4, 2, 3, 1, 5};
const int64_t slice_limits[] = {10, 8, 6, 5, 9};
const int64_t slice_strides[] = {1, 1, 1, 1, 1};
TF_ASSERT_OK_AND_ASSIGN(auto literal,
LiteralUtil::CreateRandomLiteral<F32>(
ShapeUtil::MakeShape(F32, dimensions), 0.0, 1.0));
HloInstruction* literal_instruction = builder.AddInstruction(
HloInstruction::CreateConstant(std::move(literal)));
Shape shape = ShapeUtil::MakeShape(F32, {6, 6, 3, 4, 4});
builder.AddInstruction(HloInstruction::CreateSlice(
shape, literal_instruction, slice_start, slice_limits, slice_strides));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));
EXPECT_TRUE(result);
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Constant()));
EXPECT_TRUE(ShapeUtil::Equal(root->shape(), shape));
}
TEST_F(HloConstantFoldingTest, TransposeConstantFold) {
HloComputation::Builder builder(TestName());
const int64_t dimensions[] = {11, 8, 7, 5, 9};
TF_ASSERT_OK_AND_ASSIGN(auto literal,
LiteralUtil::CreateRandomLiteral<F32>(
ShapeUtil::MakeShape(F32, dimensions), 0.0, 1.0));
auto literal_clone = literal.Clone();
HloInstruction* literal_instruction = builder.AddInstruction(
HloInstruction::CreateConstant(std::move(literal)));
Shape shape = ShapeUtil::MakeShape(F32, {8, 7, 11, 9, 5});
const int64_t permutation[] = {1, 2, 0, 4, 3};
builder.AddInstruction(
HloInstruction::CreateTranspose(shape, literal_instruction, permutation));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));
EXPECT_TRUE(result);
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Constant()));
EXPECT_TRUE(ShapeUtil::Compatible(root->shape(), shape));
using NativeT = typename primitive_util::PrimitiveTypeToNative<F32>::type;
bool matched = true;
root->literal().EachCell<NativeT>(
[&](absl::Span<const int64_t> indices, NativeT value) {
std::vector<int64_t> rindexes = PermuteInverse(indices, permutation);
matched = matched && (value == literal_clone.Get<NativeT>(rindexes));
});
EXPECT_TRUE(matched);
}
const char* const kConstantFoldReduce = R"(
HloModule ConstantFoldReduce
add {
a = s32[] parameter(0)
b = s32[] parameter(1)
ROOT add = s32[] add(a, b)
}
ENTRY r {
x = s32[3] constant({1, 2, 3})
init = s32[] constant(0)
ROOT reduce = s32[] reduce(x, init), dimensions={0}, to_apply=add
})";
TEST_F(HloConstantFoldingTest, ConstantFoldReduce) {
TF_ASSERT_OK_AND_ASSIGN(auto m,
ParseAndReturnVerifiedModule(kConstantFoldReduce));
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(m.get()));
EXPECT_TRUE(result);
EXPECT_EQ(6, m->entry_computation()
->root_instruction()
->literal()
.GetFirstElement<int32_t>());
}
constexpr absl::string_view kConstantFoldReduceWithMetadata = R"(
HloModule ConstantFoldReduce
add {
a = s32[] parameter(0)
b = s32[] parameter(1)
ROOT add = s32[] add(a, b)
}
ENTRY r {
x = s32[3] constant({1, 2, 3}), metadata={op_name="constant"}
init = s32[] constant(0), metadata={op_name="zero_constant"}
ROOT reduce = s32[] reduce(x, init), metadata={op_name="reduce"}, dimensions={0}, to_apply=add
})";
TEST_F(HloConstantFoldingTest, ConstantFoldReduceCheckMetadata) {
TF_ASSERT_OK_AND_ASSIGN(
auto m, ParseAndReturnVerifiedModule(kConstantFoldReduceWithMetadata));
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(m.get()));
EXPECT_TRUE(result);
OpMetadata reduce_metadata;
reduce_metadata.set_op_name("reduce");
EXPECT_THAT(m->entry_computation()->root_instruction(),
AllOf(op::Constant(), op::Metadata(reduce_metadata)));
}
TEST_F(HloConstantFoldingTest, ConstantFoldReduceNoLayout) {
TF_ASSERT_OK_AND_ASSIGN(auto m,
ParseAndReturnVerifiedModule(kConstantFoldReduce));
HloInstruction* add = (*m->computations().begin())->root_instruction();
LayoutUtil::ClearLayout(add->mutable_shape());
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(m.get()));
EXPECT_FALSE(result);
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reduce()));
}
const char* const kConstantFoldLargePad = R"(
HloModule ConstantFoldLargePad
ENTRY r {
a = f32[1,1,1] constant({{{7}}})
b = f32[] constant(42)
ROOT pad = f32[2048,2048,128] pad(a, b), padding=1024_1023x1024_1023x64_63
})";
TEST_F(HloConstantFoldingTest, DoesNotFoldLargePad) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kConstantFoldLargePad));
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));
EXPECT_FALSE(result);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Pad(m::Constant(), m::Constant())));
}
TEST_F(HloConstantFoldingTest, DoesNotFoldPadBroadcast) {
const char* const kConstantFoldPadBroadcast = R"(
HloModule ConstantFoldLargePad
ENTRY r {
a = f32[] constant(239)
broadcast_a = f32[4] broadcast(a), dimensions={}
b = f32[] constant(42)
ROOT pad = f32[8] pad(f32[4] broadcast_a, f32[] b), padding=4_0
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(kConstantFoldPadBroadcast));
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));
EXPECT_FALSE(result);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Pad(m::Broadcast(), m::Constant())));
}
TEST_F(HloConstantFoldingTest, DoesNotFoldSlicesWithLargeOperand) {
const char* const kModuleStr = R"(
HloModule test
ENTRY r {
a = f32[] constant(42)
broadcast = f32[1000000000]{0} broadcast(a), dimensions={}
slice1 = f32[10000]{0} slice(broadcast), slice={[0:10000]}
slice2 = f32[10000]{0} slice(broadcast), slice={[10000:20000]}
ROOT add = f32[10000]{0} add(slice1, slice2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
HloConstantFolding const_folder;
TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));
EXPECT_FALSE(result);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Add(m::Slice(), m::Slice())));
}
TEST_F(HloConstantFoldingTest, DontFoldSubcomputationContainingAfterAll) {
const char* const kModuleStr = R"(
HloModule test
Fn {
tok = token[] after-all()
ROOT root = f32[10] iota(), iota_dimension=0
}
ENTRY entry {
ROOT call = f32[10] call(), to_apply=Fn
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
HloConstantFolding constant_folding;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_folding, module.get()));
EXPECT_FALSE(result);
}
TEST_F(HloConstantFoldingTest,
DontFoldSubcomputationTransitivelyContainingRng) {
const char* const kModuleStr = R"(
HloModule test
InnerFn {
c0 = f32[] constant(0)
c1 = f32[] constant(1)
ROOT rng = f32[10] rng(c0, c1), distribution=rng_uniform
}
Fn {
ROOT fusion = f32[10] fusion(), kind=kLoop, calls=InnerFn
}
ENTRY entry {
ROOT call = f32[10] call(), to_apply=Fn
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
HloConstantFolding constant_folding;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_folding, module.get()));
EXPECT_FALSE(result);
}
TEST_F(HloConstantFoldingTest, FoldOpsWhereOneOperandIsBroadcast) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
not_folded1 = f32[4] broadcast(f32[] constant(1))
not_folded2 = add(f32[4] broadcast(f32[] constant(2)),
f32[4] broadcast(f32[] constant(3)))
folded1 = add(f32[4] broadcast(f32[] constant(5)),
f32[4] constant({0,1,2,3}))
folded2 = add(f32[4] constant({0,1,2,3}),
f32[4] broadcast(f32[] constant(5)))
ROOT root = tuple(not_folded1, not_folded2, folded1, folded2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
HloConstantFolding constant_folding;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_folding, module.get()));
EXPECT_TRUE(result);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Broadcast(m::Constant()),
m::Add(m::Broadcast(m::Constant()),
m::Broadcast(m::Constant())),
m::Constant(),
m::Constant()
)));
}
TEST_F(HloConstantFoldingTest, FoldInt4Ops) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
c0 = s4[2]{0:E(4)} constant({1, 2})
c1 = s4[2]{0:E(4)} constant({3, 4})
add1 = s4[2]{0:E(4)} add(c0, c1)
c2 = s4[]{:E(4)} constant(5)
add2 = s4[2]{0:E(4)} add(c0, s4[2]{0:E(4)} broadcast(c2))
ROOT root = tuple(add1, add2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
HloConstantFolding constant_folding;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_folding, module.get()));
EXPECT_TRUE(result);
auto is_4_bit = [](const HloInstruction* instr) {
return instr->shape().layout().element_size_in_bits() == 4;
};
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Constant().WithPredicate(is_4_bit),
m::Constant().WithPredicate(is_4_bit))));
}
TEST_F(HloConstantFoldingTest, BigReduceWindow) {
constexpr absl::string_view kModuleStr = R"(
HloModule test
add_bf16 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
ENTRY accumulated_all_reduce {
x = bf16[160,10,10,512]{3,2,1,0} broadcast(bf16[] constant(1.0))
init = bf16[] constant(0)
ROOT reduce-window = reduce-window(x, init), window={size=1x2x2x1 stride=1x2x2x1}, to_apply=add_bf16
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
HloConstantFolding constant_folding;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_folding, module.get()));
EXPECT_TRUE(result);
}
TEST_F(HloConstantFoldingTest, TimingConsumingTest) {
constexpr absl::string_view mod_str = R"(
HloModule jit_f, entry_computation_layout={()->f32[]}
region_0.4 {
Arg_0.5 = f32[] parameter(0)
Arg_1.6 = f32[] parameter(1)
ROOT add.7 = f32[] add(Arg_0.5, Arg_1.6)
}
ENTRY main.9 {
constant.1 = f32[] constant(1)
broadcast.2 = f32[32,999,40,512]{3,2,1,0} broadcast(constant.1), dimensions={}
constant.3 = f32[] constant(0)
ROOT reduce.8 = f32[] reduce(broadcast.2, constant.3), dimensions={0,1,2,3}, to_apply=region_0.4
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(mod_str));
HloConstantFolding const_fold;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&const_fold, module.get()));
EXPECT_FALSE(result);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_constant_folding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_constant_folding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
39ece867-becf-40a5-be85-a8bfb9996b0b | cpp | tensorflow/tensorflow | all_reduce_reassociate | third_party/xla/xla/service/all_reduce_reassociate.cc | third_party/xla/xla/service/all_reduce_reassociate_test.cc | #include "xla/service/all_reduce_reassociate.h"
#include <cstdint>
#include <optional>
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/service/all_reduce_key.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
namespace m = match;
bool AreAllreduceKeysEqual(AllReduceKey& key0, AllReduceKey& key1,
bool ignore_element_type) {
if (ignore_element_type) {
return std::get<0>(key0) == std::get<0>(key1) &&
std::get<2>(key0) == std::get<2>(key1) &&
std::get<3>(key0) == std::get<3>(key1) &&
std::get<4>(key0) == std::get<4>(key1) &&
std::get<5>(key0) == std::get<5>(key1);
} else {
return key0 == key1;
}
}
bool AreCompatible(const HloAllReduceInstruction* ar0,
const HloAllReduceInstruction* ar1, ReductionKind op_kind,
bool ignore_element_type) {
std::optional<AllReduceKey> key0 = GetAllReduceKey(ar0);
std::optional<AllReduceKey> key1 = GetAllReduceKey(ar1);
auto kind0 = MatchReductionComputation(ar0->to_apply());
return key0 && key1 && kind0 &&
AreAllreduceKeysEqual(*key0, *key1, ignore_element_type) &&
kind0 == op_kind;
}
HloInstruction* LookThroughForAllReduce(HloInstruction* instr,
const Literal& reduction_identity) {
if (instr->opcode() == HloOpcode::kDynamicSlice) {
if (instr->operand(0)->opcode() != HloOpcode::kAllReduce ||
instr->operand(0)->user_count() != 1 || instr->user_count() != 1) {
return nullptr;
}
return instr;
}
while (instr->opcode() != HloOpcode::kAllReduce) {
if (instr->user_count() != 1) {
return nullptr;
}
if (instr->opcode() != HloOpcode::kReshape &&
instr->opcode() != HloOpcode::kPad &&
instr->opcode() != HloOpcode::kSlice &&
instr->opcode() != HloOpcode::kConvert) {
return nullptr;
}
if (instr->opcode() == HloOpcode::kPad) {
if (!instr->operand(1)->IsConstant()) {
return nullptr;
}
if (instr->operand(1)->literal() != reduction_identity) {
return nullptr;
}
}
instr = instr->mutable_operand(0);
}
if (instr->user_count() != 1) {
return nullptr;
}
return instr;
}
bool ReassociateAllReduceIsProfitable(HloInstruction* ar0, HloInstruction* ar1,
HloInstruction* reassociated_inst) {
int64_t pre_reassociated_size = ShapeUtil::ElementsIn(ar0->shape());
if (ar0 != ar1) {
pre_reassociated_size += ShapeUtil::ElementsIn(ar1->shape());
}
return pre_reassociated_size >=
ShapeUtil::ElementsIn(reassociated_inst->shape());
}
bool AreCompatibleConverts(const HloInstruction* convert0,
const HloInstruction* convert1) {
bool is_compatible = true;
if (convert0) {
is_compatible &= primitive_util::CastPreservesValues(
convert0->operand(0)->shape().element_type(),
convert0->shape().element_type());
}
if (convert1) {
is_compatible &= primitive_util::CastPreservesValues(
convert1->operand(0)->shape().element_type(),
convert1->shape().element_type());
}
if (convert0 && convert1) {
CHECK(convert0->shape().element_type() == convert1->shape().element_type());
is_compatible &= convert0->operand(0)->shape().element_type() ==
convert1->operand(0)->shape().element_type();
}
return is_compatible;
}
template <typename Pattern>
auto OptionalConvertWithOneUser(HloInstruction** optional_convert,
Pattern pattern) {
return m::AnyOf<HloInstruction>(
m::Convert(optional_convert, pattern).WithOneUser(), std::move(pattern));
}
bool MatchOperandsToAllReduceWithOptionalConvert(HloInstruction* inst,
HloInstruction** convert0,
HloInstruction** convert1) {
auto ar_op_optional_convert_pattern =
m::Op()
.WithOperand(0, OptionalConvertWithOneUser(convert0, m::AllReduce()))
.WithOperand(1, OptionalConvertWithOneUser(convert1, m::AllReduce()))
.WithPredicate([](const HloInstruction* inst) {
return inst->shape().IsArray();
});
return Match(inst, ar_op_optional_convert_pattern);
}
}
absl::StatusOr<bool> AllReduceReassociate::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (hlo_query::ContainsLayoutConstrainedAllReduce(*module)) {
VLOG(1)
<< "Skip AllReduceReassociate because the module contains all-reduce "
"with constrained layouts";
return false;
}
int64_t next_channel_id = hlo_query::NextChannelId(*module);
bool changed = false;
for (auto computation : module->computations(execution_threads)) {
for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {
std::optional<ReductionKind> kind = MatchReductionInstruction(inst);
if (!kind) {
continue;
}
std::optional<Literal> reduction_identity =
GetReductionIdentity(*kind, inst->shape().element_type());
if (!reduction_identity) {
continue;
}
HloInstruction* lhs = LookThroughForAllReduce(inst->mutable_operand(0),
*reduction_identity);
if (lhs == nullptr) {
continue;
}
HloInstruction* rhs = LookThroughForAllReduce(inst->mutable_operand(1),
*reduction_identity);
if (rhs == nullptr) {
continue;
}
if (!inst->shape().IsArray()) {
continue;
}
if (lhs->opcode() != rhs->opcode() ||
(lhs->opcode() == HloOpcode::kDynamicSlice &&
!ShapeUtil::Compatible(lhs->operand(0)->shape(),
rhs->operand(0)->shape()))) {
continue;
}
HloAllReduceInstruction* ar0 = nullptr;
HloAllReduceInstruction* ar1 = nullptr;
bool reduce_scatter_pattern_match = false;
if (lhs->opcode() == HloOpcode::kDynamicSlice) {
HloInstruction* original_rhs_operand = rhs->mutable_operand(0);
TF_RETURN_IF_ERROR(rhs->ReplaceOperandWith(0, lhs->mutable_operand(0)));
if (!lhs->Identical(*rhs)) {
TF_RETURN_IF_ERROR(rhs->ReplaceOperandWith(0, original_rhs_operand));
continue;
}
TF_RETURN_IF_ERROR(rhs->ReplaceOperandWith(0, original_rhs_operand));
ar0 = Cast<HloAllReduceInstruction>(lhs->mutable_operand(0));
ar1 = Cast<HloAllReduceInstruction>(rhs->mutable_operand(0));
reduce_scatter_pattern_match = true;
} else {
ar0 = Cast<HloAllReduceInstruction>(lhs);
ar1 = Cast<HloAllReduceInstruction>(rhs);
}
if (!ReassociateAllReduceIsProfitable(lhs, rhs, inst)) {
continue;
}
HloInstruction* convert0 = nullptr;
HloInstruction* convert1 = nullptr;
if (!MatchOperandsToAllReduceWithOptionalConvert(inst, &convert0,
&convert1)) {
VLOG(2) << "One or both inputs are type-converted.";
}
bool should_promote_ar = convert0 || convert1;
if (should_promote_ar) {
if (!reassociate_converted_ar_) {
VLOG(2) << "Promotions of all_reduces for reassociation will be "
"disabled.";
continue;
}
if (!AreCompatibleConverts(convert0, convert1)) {
VLOG(2) << "Inputs' Converts are not preserving "
"value, skipping";
continue;
}
}
HloInstruction* op_operand0 = inst->mutable_operand(0);
HloInstruction* op_operand1 = inst->mutable_operand(1);
if (convert0) {
op_operand0 = convert0->mutable_operand(0);
}
if (convert1) {
op_operand1 = convert1->mutable_operand(0);
}
if (!AreCompatible(ar0, ar1, *kind,
should_promote_ar)) {
VLOG(2) << "All-Reduce operations are not compatible, skipping";
continue;
}
VLOG(2) << "Reassociated:";
VLOG(2) << "\tAR0: " << ar0->ToString();
VLOG(2) << "\tAR1: " << ar1->ToString();
auto op_users = inst->users();
HloInstruction* new_op_operand0 = ar0->mutable_operand(0);
HloInstruction* new_op_operand1 = ar1->mutable_operand(0);
if (convert0) {
HloInstruction* ar0_operand = ar0->mutable_operand(0);
TF_RETURN_IF_ERROR(convert0->ReplaceOperandWith(0, ar0_operand));
new_op_operand0 = convert0;
}
if (convert1) {
HloInstruction* ar1_operand = ar1->mutable_operand(0);
TF_RETURN_IF_ERROR(convert1->ReplaceOperandWith(0, ar1_operand));
new_op_operand1 = convert1;
}
HloInstruction* new_op = inst;
if (should_promote_ar) {
new_op = computation->AddInstruction(inst->CloneWithNewOperands(
inst->shape(), {new_op_operand0, new_op_operand1}));
} else if (reduce_scatter_pattern_match) {
new_op = computation->AddInstruction(inst->CloneWithNewOperands(
ar0->shape(), {new_op_operand0, new_op_operand1}));
}
Shape new_ar_out_shape = inst->shape();
CHECK(!should_promote_ar || !reduce_scatter_pattern_match);
if (should_promote_ar) {
new_ar_out_shape.set_element_type(
new_op_operand0->shape().element_type());
} else if (reduce_scatter_pattern_match) {
new_ar_out_shape = ar0->shape();
} else {
TF_RETURN_IF_ERROR(ar0->ReplaceAllUsesWith(ar0->mutable_operand(0)));
TF_RETURN_IF_ERROR(ar1->ReplaceAllUsesWith(ar1->mutable_operand(0)));
}
HloInstruction* new_ar = computation->AddInstruction(
ar0->CloneWithNewOperands(new_ar_out_shape, {new_op}));
if (new_ar->channel_id()) {
new_ar->set_channel_id(next_channel_id++);
}
if (should_promote_ar) {
HloComputation* to_apply = new_ar->to_apply();
PrimitiveType type = new_ar->shape().element_type();
std::string name = absl::StrCat(to_apply->name(), "_reassoc_promoted");
HloComputation::Builder promoted(name);
auto x = promoted.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(type, {}), "x"));
auto y = promoted.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(type, {}), "y"));
promoted.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(type, {}),
to_apply->root_instruction()->opcode(), x, y));
HloComputation* to_apply_promoted =
inst->GetModule()->AddEmbeddedComputation(promoted.Build());
new_ar->set_to_apply(to_apply_promoted);
TF_RETURN_IF_ERROR(inst->ReplaceAllUsesWith(new_ar));
} else if (reduce_scatter_pattern_match) {
auto dyn_slice_operands = lhs->mutable_operands();
dyn_slice_operands[0] = new_ar;
HloInstruction* new_dyn_slice = inst->parent()->AddInstruction(
lhs->CloneWithNewOperands(inst->shape(), dyn_slice_operands));
TF_RETURN_IF_ERROR(inst->ReplaceUsesWith(op_users, new_dyn_slice));
} else {
TF_RETURN_IF_ERROR(inst->ReplaceUsesWith(op_users, new_ar));
}
if (should_promote_ar || reduce_scatter_pattern_match) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(inst));
}
if (reduce_scatter_pattern_match) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(lhs));
if (lhs != rhs) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(rhs));
}
}
TF_RETURN_IF_ERROR(computation->RemoveInstruction(ar0));
if (ar0 != ar1) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(ar1));
}
changed = true;
}
}
return changed;
}
} | #include "xla/service/all_reduce_reassociate.h"
#include <cstddef>
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = xla::testing::opcode_matchers;
using ::testing::_;
class AllReduceSimplifierTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module, bool expect_change,
bool reassociate_converted_ar = false) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_module));
auto changed =
AllReduceReassociate(reassociate_converted_ar).Run(module.get());
if (!changed.ok()) {
return changed.status();
}
EXPECT_EQ(changed.value(), expect_change);
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
size_t AllReduceCount(std::unique_ptr<HloModule>& module) {
return absl::c_count_if(module->entry_computation()->instructions(),
HloPredicateIsOp<HloOpcode::kAllReduce>);
}
};
TEST_F(AllReduceSimplifierTest, Simple) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
ROOT add = f32[8] add(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::AllReduce(m::Add(m::Parameter(0), m::Parameter(1))));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, SimpleWithChannelId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), channel_id=1, replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), channel_id=1, replica_groups={}, to_apply=sum
ROOT add = f32[8] add(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::AllReduce(m::Add(m::Parameter(0), m::Parameter(1))));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, SimpleChain) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
p2 = f32[8] parameter(2)
p3 = f32[8] parameter(3)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
ar2 = f32[8] all-reduce(p2), replica_groups={}, to_apply=sum
ar3 = f32[8] all-reduce(p3), replica_groups={}, to_apply=sum
add0 = f32[8] add(ar0, ar1)
add1 = f32[8] add(add0, ar2)
ROOT add2 = f32[8] add(add1, ar3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
m::AllReduce(m::Add(
m::Add(m::Add(m::Parameter(0), m::Parameter(1)), m::Parameter(2)),
m::Parameter(3))));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, SimpleTree) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
p2 = f32[8] parameter(2)
p3 = f32[8] parameter(3)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
ar2 = f32[8] all-reduce(p2), replica_groups={}, to_apply=sum
ar3 = f32[8] all-reduce(p3), replica_groups={}, to_apply=sum
add0 = f32[8] add(ar0, ar1)
add1 = f32[8] add(ar2, ar3)
ROOT add2 = f32[8] add(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::AllReduce(m::Add(m::Add(m::Parameter(0), m::Parameter(1)),
m::Add(m::Parameter(2), m::Parameter(3)))));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, MismatchOp0) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
max {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT r = f32[] maximum(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=max
ROOT add = f32[8] add(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(AllReduceSimplifierTest, MismatchOp1) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
max {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT r = f32[] maximum(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=max
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=max
ROOT add = f32[8] add(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(AllReduceSimplifierTest, MismatchReplicaGroups) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={{0}}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
ROOT add = f32[8] add(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(AllReduceSimplifierTest, MismatchHasChannelId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, channel_id=3, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
ROOT add = f32[8] add(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(AllReduceSimplifierTest, MismatchUseGlobalDeviceId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={{0, 1}}, channel_id=3, use_global_device_ids=true, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={{0, 1}}, channel_id=4, to_apply=sum
ROOT add = f32[8] add(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(AllReduceSimplifierTest, NotSingleUser) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
add = f32[8] add(ar0, ar1)
ROOT t = (f32[8], f32[8]) tuple(ar0, add)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(AllReduceSimplifierTest, DoubleUse) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
add = f32[8] add(ar0, ar0)
ROOT c = f32[8] copy(add)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
}
TEST_F(AllReduceSimplifierTest, PaddedUse) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
%constant.1 = f32[] constant(0)
pad = f32[12]{0} pad(ar0, constant.1), padding=0_4
pad.1 = f32[12]{0} pad(ar1, constant.1), padding=0_4
ROOT add = f32[12] add(pad, pad.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::AllReduce(m::Add(m::Pad(m::Parameter(0), _),
m::Pad(m::Parameter(1), _))));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, PaddedUseInvalidReduceValue) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
%constant.1 = f32[] constant(-1.0)
pad = f32[12]{0} pad(ar0, constant.1), padding=0_4
pad.1 = f32[12]{0} pad(ar1, constant.1), padding=0_4
ROOT add = f32[12] add(pad, pad.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
EXPECT_EQ(AllReduceCount(module), 2);
}
TEST_F(AllReduceSimplifierTest, PaddedUseNotProfitable) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
%constant.1 = f32[] constant(0)
pad = f32[17]{0} pad(ar0, constant.1), padding=0_9
pad.1 = f32[17]{0} pad(ar1, constant.1), padding=0_9
ROOT add = f32[17] add(pad, pad.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
EXPECT_EQ(AllReduceCount(module), 2);
}
TEST_F(AllReduceSimplifierTest, PaddedUseDoubleUseNotProfitable) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
%constant.1 = f32[] constant(0)
pad = f32[9]{0} pad(ar0, constant.1), padding=0_1
ROOT add = f32[9] add(pad, pad)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, ReshapeUse) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[1,8] parameter(0)
p1 = f32[1,8] parameter(1)
ar0 = f32[1,8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[1,8] all-reduce(p1), replica_groups={}, to_apply=sum
rshp0 = f32[8]{0} reshape(ar0)
rshp1 = f32[8]{0} reshape(ar1)
ROOT add = f32[8] add(rshp0, rshp1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::AllReduce(m::Add(m::Reshape(m::Parameter(0)),
m::Reshape(m::Parameter(1)))));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, SliceUse) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
rshp0 = f32[4]{0} slice(ar0), slice={[0:4]}
rshp1 = f32[4]{0} slice(ar1), slice={[0:4]}
ROOT add = f32[4] add(rshp0, rshp1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::AllReduce(m::Add(m::Slice(m::Parameter(0)),
m::Slice(m::Parameter(1)))));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, ChainWithConvert) {
absl::string_view hlo_string = R"(
HloModule m
add.1 {
x.47 = bf16[] parameter(0)
y.47 = bf16[] parameter(1)
ROOT add.2532 = bf16[] add(x.47, y.47)
}
ENTRY main {
p0 = bf16[8] parameter(0)
p1 = bf16[8] parameter(1)
p2 = bf16[8] parameter(2)
p3 = bf16[8] parameter(3)
ar0 = bf16[8] all-reduce(p0), replica_groups={}, to_apply=add.1
ar1 = bf16[8] all-reduce(p1), replica_groups={}, to_apply=add.1
ar2 = bf16[8] all-reduce(p2), replica_groups={}, to_apply=add.1
ar3 = bf16[8] all-reduce(p3), replica_groups={}, to_apply=add.1
convert0 = f32[8] convert(ar0)
convert1 = f32[8] convert(ar1)
add0 = f32[8] add(convert0, convert1)
convert2 = f32[8] convert(ar2)
add1 = f32[8] add(add0, convert2)
convert3 = f32[8] convert(ar3)
add2 = f32[8] add(add1, convert3)
ROOT convert4 = bf16[8] convert(add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true,
true));
SCOPED_TRACE(module->ToString());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
m::Convert(m::AllReduce(m::Add(m::Add(m::Add(m::Convert(m::Parameter(0)),
m::Convert(m::Parameter(1))),
m::Convert(m::Parameter(2))),
m::Convert(m::Parameter(3))))));
EXPECT_EQ(AllReduceCount(module), 1);
EXPECT_THAT(
module->entry_computation()->root_instruction()->operand(0)->shape(),
GmockMatch(::xla::match::Shape().WithElementType(F32)));
}
TEST_F(AllReduceSimplifierTest, AllreduceWithConvertIncompatibleType) {
absl::string_view hlo_string = R"(
HloModule m
add.1 {
x.47 = bf16[] parameter(0)
y.47 = bf16[] parameter(1)
ROOT add.2532 = bf16[] add(x.47, y.47)
}
max.1 {
x.48 = bf16[] parameter(0)
y.48 = bf16[] parameter(1)
ROOT max.2533 = bf16[] maximum(x.48, y.48)
}
min.1 {
x.49 = bf16[] parameter(0)
y.49 = bf16[] parameter(1)
ROOT min.2534 = bf16[] minimum(x.49, y.49)
}
mul.1 {
x.50 = bf16[] parameter(0)
y.50 = bf16[] parameter(1)
ROOT mul.2535 = bf16[] multiply(x.50, y.50)
}
ENTRY main {
p0 = bf16[8] parameter(0)
p1 = bf16[8] parameter(1)
p2 = bf16[8] parameter(2)
p3 = bf16[8] parameter(3)
ar0 = bf16[8] all-reduce(p0), replica_groups={}, to_apply=add.1
ar1 = bf16[8] all-reduce(p1), replica_groups={}, to_apply=max.1
ar2 = bf16[8] all-reduce(p2), replica_groups={}, to_apply=min.1
ar3 = bf16[8] all-reduce(p3), replica_groups={}, to_apply=mul.1
convert0 = f32[8] convert(ar0)
convert1 = f32[8] convert(ar1)
add0 = f32[8] add(convert0, convert1)
convert2 = f32[8] convert(ar2)
add1 = f32[8] add(add0, convert2)
convert3 = f32[8] convert(ar3)
add2 = f32[8] add(add1, convert3)
ROOT convert4 = bf16[8] convert(add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
SCOPED_TRACE(module->ToString());
}
TEST_F(AllReduceSimplifierTest, AllreduceWithLossyConvert) {
absl::string_view hlo_string = R"(
HloModule m
add.1 {
x.47 = bf16[] parameter(0)
y.47 = bf16[] parameter(1)
ROOT add.2532 = bf16[] add(x.47, y.47)
}
ENTRY main {
p0 = bf16[8] parameter(0)
p1 = bf16[8] parameter(1)
p2 = bf16[8] parameter(2)
p3 = bf16[8] parameter(3)
ar0 = bf16[8] all-reduce(p0), replica_groups={}, to_apply=add.1
ar1 = bf16[8] all-reduce(p1), replica_groups={}, to_apply=add.1
ar2 = bf16[8] all-reduce(p2), replica_groups={}, to_apply=add.1
ar3 = bf16[8] all-reduce(p3), replica_groups={}, to_apply=add.1
convert0 = u32[8] convert(ar0)
convert1 = u32[8] convert(ar1)
add0 = u32[8] add(convert0, convert1)
convert2 = u32[8] convert(ar2)
add1 = u32[8] add(add0, convert2)
convert3 = u32[8] convert(ar3)
add2 = u32[8] add(add1, convert3)
ROOT convert4 = bf16[8] convert(add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
SCOPED_TRACE(module->ToString());
}
TEST_F(AllReduceSimplifierTest, AllReduceDynamicSlicePattern) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[1,8] parameter(0)
p1 = f32[1,8] parameter(1)
p2 = f32[1,8] parameter(2)
p3 = s32[] parameter(3)
cst = s32[] constant(0)
ar0 = f32[1,8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[1,8] all-reduce(p1), replica_groups={}, to_apply=sum
ar2 = f32[1,8] all-reduce(p2), replica_groups={}, to_apply=sum
dyn0 = f32[1,4] dynamic-slice(ar0, cst, p3), dynamic_slice_sizes={1,4}
dyn1 = f32[1,4] dynamic-slice(ar1, cst, p3), dynamic_slice_sizes={1,4}
dyn2 = f32[1,4] dynamic-slice(ar2, cst, p3), dynamic_slice_sizes={1,4}
add = f32[1,4] add(dyn0, dyn1)
ROOT add1 = f32[1,4] add(add, dyn2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::DynamicSlice(
m::AllReduce(m::Add(m::Add(m::Parameter(0), m::Parameter(1)),
m::Parameter(2))),
m::Constant(), m::Parameter(3)));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, AllReduceDynamicSlicePatternSameOperand) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[1,8] parameter(0)
p1 = f32[1,8] parameter(1)
p2 = s32[] parameter(2)
cst = s32[] constant(0)
ar0 = f32[1,8] all-reduce(p0), replica_groups={}, to_apply=sum
ar2 = f32[1,8] all-reduce(p1), replica_groups={}, to_apply=sum
dyn0 = f32[1,4] dynamic-slice(ar0, cst, p2), dynamic_slice_sizes={1,4}
dyn2 = f32[1,4] dynamic-slice(ar2, cst, p2), dynamic_slice_sizes={1,4}
add = f32[1,4] add(dyn0, dyn0)
ROOT add1 = f32[1,4] add(add, dyn2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::DynamicSlice(
m::AllReduce(m::Add(m::Add(m::Parameter(0), m::Parameter(0)),
m::Parameter(1))),
m::Constant(), m::Parameter(2)));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, AllReduceDynamicSliceDifferentSlices) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[1,8] parameter(0)
p1 = f32[1,8] parameter(1)
p2 = f32[1,16] parameter(2)
p3 = s32[] parameter(3)
cst = s32[] constant(0)
ar0 = f32[1,8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[1,8] all-reduce(p1), replica_groups={}, to_apply=sum
ar2 = f32[1,16] all-reduce(p2), replica_groups={}, to_apply=sum
dyn0 = f32[1,4] dynamic-slice(ar0, cst, p3), dynamic_slice_sizes={1,4}
dyn1 = f32[1,4] dynamic-slice(ar1, cst, p3), dynamic_slice_sizes={1,4}
dyn2 = f32[1,4] dynamic-slice(ar2, cst, p3), dynamic_slice_sizes={1,4}
add = f32[1,4] add(dyn0, dyn1)
ROOT add1 = f32[1,4] add(add, dyn2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
m::Add(m::DynamicSlice(),
m::DynamicSlice(m::AllReduce(), m::Constant(), m::Parameter(3))));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_EQ(AllReduceCount(module), 2);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_reassociate.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_reassociate_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1b0d1084-8d5e-4768-bdf2-88e35ad4abd6 | cpp | tensorflow/tensorflow | batch_dot_simplification | third_party/xla/xla/service/batch_dot_simplification.cc | third_party/xla/xla/service/batch_dot_simplification_test.cc | #include "xla/service/batch_dot_simplification.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::StatusOr<bool>
BatchDotSimplification::ElideDegenerateBatchDimensionFromBatchDot(
HloInstruction* batch_dot) {
if (Cast<HloDotInstruction>(batch_dot)->sparse_operands()) {
return false;
}
const auto& is_iota = [](absl::Span<const int64_t> dims) {
for (int64_t i = 0; i < dims.size(); ++i) {
if (dims[i] != i) {
return false;
}
}
return true;
};
if (!absl::c_equal(
batch_dot->dot_dimension_numbers().lhs_batch_dimensions(),
batch_dot->dot_dimension_numbers().rhs_batch_dimensions()) ||
!is_iota(batch_dot->dot_dimension_numbers().lhs_batch_dimensions())) {
return false;
}
const DotDimensionNumbers& dim_numbers = batch_dot->dot_dimension_numbers();
HloInstruction *lhs = batch_dot->mutable_operand(0),
*rhs = batch_dot->mutable_operand(1);
const Shape& lhs_shape = lhs->shape();
if (dim_numbers.lhs_contracting_dimensions_size() != 1) {
return false;
}
std::vector<int64_t> degenerate_dims;
for (int64_t batch_dim : dim_numbers.lhs_batch_dimensions()) {
if (lhs_shape.dimensions(batch_dim) == 1) {
degenerate_dims.push_back(batch_dim);
}
}
if (degenerate_dims.empty()) {
return false;
}
TF_ASSIGN_OR_RETURN(HloInstruction * new_lhs,
ElideDegenerateDims(lhs, degenerate_dims));
TF_ASSIGN_OR_RETURN(HloInstruction * new_rhs,
ElideDegenerateDims(rhs, degenerate_dims));
DotDimensionNumbers new_dim_numbers = dim_numbers;
new_dim_numbers.clear_lhs_batch_dimensions();
new_dim_numbers.clear_rhs_batch_dimensions();
for (int64_t i = 0, e = dim_numbers.lhs_batch_dimensions_size() -
degenerate_dims.size();
i < e; i++) {
new_dim_numbers.add_lhs_batch_dimensions(i);
new_dim_numbers.add_rhs_batch_dimensions(i);
}
new_dim_numbers.set_lhs_contracting_dimensions(
0,
new_dim_numbers.lhs_contracting_dimensions(0) - degenerate_dims.size());
new_dim_numbers.set_rhs_contracting_dimensions(
0,
new_dim_numbers.rhs_contracting_dimensions(0) - degenerate_dims.size());
TF_ASSIGN_OR_RETURN(
HloInstruction * new_dot,
MakeDotHlo(new_lhs, new_rhs, new_dim_numbers,
batch_dot->precision_config(),
batch_dot->shape().element_type()));
TF_ASSIGN_OR_RETURN(HloInstruction * new_dot_reshaped,
MakeReshapeHlo(batch_dot->shape(), new_dot));
VLOG(2) << "Replaced " << batch_dot->ToString() << " with "
<< new_dot->ToString();
TF_RETURN_IF_ERROR(
batch_dot->parent()->ReplaceInstruction(batch_dot, new_dot_reshaped));
return true;
}
absl::StatusOr<bool> BatchDotSimplification::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
std::vector<HloInstruction*> dot_instrs;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
absl::c_copy_if(computation->instructions(), std::back_inserter(dot_instrs),
[](HloInstruction* instr) {
return instr->opcode() == HloOpcode::kDot;
});
}
for (HloInstruction* dot_instr : dot_instrs) {
TF_ASSIGN_OR_RETURN(bool elided_batch_dim_from_one,
ElideDegenerateBatchDimensionFromBatchDot(dot_instr));
changed |= elided_batch_dim_from_one;
}
return changed;
}
} | #include "xla/service/batch_dot_simplification.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class BatchDotSimplificationTest : public HloTestBase {};
TEST_F(BatchDotSimplificationTest,
ElideSingleDegenerateBatchDotDim_VectorVector) {
const std::string hlo_text = R"(
HloModule BatchDot
main {
a = f32[1,3] parameter(0)
b = f32[1,3] parameter(1)
ROOT dot = f32[1] dot(a, b), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_TRUE(pass.Run(m.get()).value());
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::Reshape(op::Dot(
op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),
0, 0)));
}
TEST_F(BatchDotSimplificationTest,
ElideSingleDegenerateBatchDotDim_MatrixVector) {
const std::string hlo_text = R"(
HloModule BatchDot
main {
a = f32[1,9,3] parameter(0)
b = f32[1,3] parameter(1)
ROOT dot = f32[1,9] dot(a, b), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_TRUE(pass.Run(m.get()).value());
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::Reshape(op::Dot(
op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),
1, 0)));
}
TEST_F(BatchDotSimplificationTest,
ElideSingleDegenerateBatchDotDim_MatrixMatrix) {
const std::string hlo_text = R"(
HloModule BatchDot
main {
a = f32[1,9,3] parameter(0)
b = f32[1,3,7] parameter(1)
ROOT dot = f32[1,9,7] dot(a, b), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_TRUE(pass.Run(m.get()).value());
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::Reshape(op::Dot(
op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),
1, 0)));
}
TEST_F(BatchDotSimplificationTest,
ElideMultipleDegenerateBatchDotDims_VectorVector) {
const std::string hlo_text = R"(
HloModule BatchDot
main {
a = f32[9,1,7,1,3] parameter(0)
b = f32[9,1,7,1,3] parameter(1)
ROOT dot = f32[9,1,7,1] dot(a, b), lhs_batch_dims={0,1,2,3}, rhs_batch_dims={0,1,2,3}, lhs_contracting_dims={4}, rhs_contracting_dims={4}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_TRUE(pass.Run(m.get()).value());
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::Reshape(op::Dot(
op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),
2, 2)));
}
TEST_F(BatchDotSimplificationTest,
ElideMultipleDegenerateBatchDotDims_VectorMatrix) {
const std::string hlo_text = R"(
HloModule BatchDot
main {
a = f32[9,1,7,1,3] parameter(0)
b = f32[9,1,7,1,20,3] parameter(1)
ROOT dot = f32[9,1,7,1,20] dot(a, b), lhs_batch_dims={0,1,2,3}, rhs_batch_dims={0,1,2,3}, lhs_contracting_dims={4}, rhs_contracting_dims={5}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_TRUE(pass.Run(m.get()).value());
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::Reshape(op::Dot(
op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),
2, 3)));
}
TEST_F(BatchDotSimplificationTest,
ElideMultipleDegenerateBatchDotDims_MatrixMatrix) {
const std::string hlo_text = R"(
HloModule BatchDot
main {
a = f32[9,1,7,1,19,3] parameter(0)
b = f32[9,1,7,1,3,20] parameter(1)
ROOT dot = f32[9,1,7,1,19,20] dot(a, b), lhs_batch_dims={0,1,2,3}, rhs_batch_dims={0,1,2,3}, lhs_contracting_dims={5}, rhs_contracting_dims={4}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_TRUE(pass.Run(m.get()).value());
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::Reshape(op::Dot(
op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),
3, 2)));
}
TEST_F(BatchDotSimplificationTest,
ElideMultipleDegenerateBatchDotDimsNonContracting) {
const char* hlo_text = R"(
HloModule BatchDot
main {
a = f32[1,101] parameter(0)
b = f32[1,101] parameter(1)
ROOT dot = f32[1,101,101] dot(a,b), lhs_batch_dims={0},
lhs_contracting_dims={},
rhs_batch_dims={0},
rhs_contracting_dims={}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_FALSE(pass.Run(m.get()).value());
}
TEST_F(BatchDotSimplificationTest,
ElideMultipleDegenerateBatchDotDimsMultipleContracting) {
const char* hlo_text = R"(
HloModule BatchDot
main {
lhs = f32[1,5,17,10,13] parameter(0)
rhs = f32[1,9,10,13,6,5] parameter(1)
ROOT dot = f32[10,1,17,9,6] dot(lhs,rhs), lhs_batch_dims={3,0},
rhs_batch_dims={2,0},
lhs_contracting_dims={1,4},
rhs_contracting_dims={5,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
BatchDotSimplification pass;
ASSERT_FALSE(pass.Run(m.get()).value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/batch_dot_simplification.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/batch_dot_simplification_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
51a7fed9-c52a-4155-b34f-5a1bf3110f66 | cpp | tensorflow/tensorflow | hlo_liveness_analysis | third_party/xla/xla/service/hlo_liveness_analysis.cc | third_party/xla/xla/service/hlo_liveness_analysis_test.cc | #include "xla/service/hlo_liveness_analysis.h"
#include <cstddef>
#include <cstdint>
#include <deque>
#include <functional>
#include <memory>
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/call_graph.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
using Worklist = std::deque<const HloInstruction*>;
using Workset = absl::flat_hash_set<const HloInstruction*>;
void AddToWorklist(const HloInstruction* instruction, Worklist* worklist,
Workset* workset) {
if (workset->insert(instruction).second) {
worklist->push_back(instruction);
VLOG(3) << "ADD instruction: " << instruction->name();
}
}
using VisitorFunction = absl::FunctionRef<void(const ShapeIndex& )>;
void ForEachLiveIndex(const ShapeTree<bool>& index_tree, VisitorFunction func) {
index_tree.ForEachElement([&](const ShapeIndex& shape_index, bool live) {
if (live) {
func(shape_index);
}
});
}
void MarkLiveAtIndex(const HloInstruction* instruction,
const ShapeIndex& shape_index,
HloLivenessAnalysis::HloIndexMap* live_index_map,
Worklist* worklist, Workset* workset) {
std::unique_ptr<ShapeTree<bool>>& liveness = (*live_index_map)[instruction];
if (liveness == nullptr) {
liveness = std::make_unique<ShapeTree<bool>>(instruction->shape(),
false);
}
bool& alive = *liveness->mutable_element(shape_index);
if (!alive) {
AddToWorklist(instruction, worklist, workset);
alive = true;
VLOG(3) << "MARK instruction: " << instruction->name()
<< " shape_index: " << shape_index;
}
}
void MarkLiveAtAllIndices(const HloInstruction* instruction,
HloLivenessAnalysis::HloIndexMap* live_index_map,
Worklist* worklist, Workset* workset) {
bool add_to_worklist = false;
std::unique_ptr<ShapeTree<bool>>& liveness = (*live_index_map)[instruction];
if (liveness == nullptr) {
liveness = std::make_unique<ShapeTree<bool>>(instruction->shape(),
true);
add_to_worklist = true;
} else {
for (auto& entry : *liveness) {
if (!entry.second) {
add_to_worklist = true;
entry.second = true;
VLOG(3) << "MARK instruction: " << instruction->name()
<< " shape_index: " << entry.first;
}
}
}
if (add_to_worklist) {
AddToWorklist(instruction, worklist, workset);
}
}
void PropagateLivenessThroughTuple(
const HloInstruction* instruction,
HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist,
Workset* workset) {
CHECK_EQ(instruction->opcode(), HloOpcode::kTuple);
const ShapeTree<bool>& index_tree = *live_index_map->at(instruction);
ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) {
const size_t size = shape_index.size();
if (size == 0) {
return;
}
const int64_t operand_index = shape_index[0];
if (operand_index >= instruction->operand_count()) {
return;
}
MarkLiveAtIndex(instruction->operand(operand_index), {}, live_index_map,
worklist, workset);
ShapeIndex operand_shape_index(size - 1);
for (int i = 1; i < size; ++i) {
operand_shape_index[i - 1] = shape_index[i];
}
MarkLiveAtIndex(instruction->operand(operand_index), operand_shape_index,
live_index_map, worklist, workset);
});
}
void PropagateLivenessThroughGTE(
const HloInstruction* instruction,
HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist,
Workset* workset) {
CHECK_EQ(instruction->opcode(), HloOpcode::kGetTupleElement);
MarkLiveAtIndex(instruction->operand(0), {}, live_index_map, worklist,
workset);
const ShapeTree<bool>& index_tree = *live_index_map->at(instruction);
ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) {
ShapeIndex operand_shape_index(shape_index);
operand_shape_index.push_front(instruction->tuple_index());
MarkLiveAtIndex(instruction->operand(0), operand_shape_index,
live_index_map, worklist, workset);
});
}
void PropagateLivenessThroughWhile(
const HloInstruction* instruction,
HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist,
Workset* workset) {
CHECK_EQ(instruction->opcode(), HloOpcode::kWhile);
const ShapeTree<bool>& index_tree = *live_index_map->at(instruction);
ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) {
MarkLiveAtIndex(instruction->while_body()->root_instruction(), shape_index,
live_index_map, worklist, workset);
MarkLiveAtIndex(instruction->operand(0), shape_index, live_index_map,
worklist, workset);
});
MarkLiveAtIndex(instruction->while_condition()->root_instruction(), {},
live_index_map, worklist, workset);
}
void PropagateLivenessToParameterCallers(
const HloInstruction* instruction,
HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist,
Workset* workset, CallGraph* call_graph) {
CHECK_EQ(instruction->opcode(), HloOpcode::kParameter);
const CallGraphNode& call_graph_node =
call_graph->GetNode(instruction->parent());
if (call_graph_node.context() == CallContext::kControlFlow) {
for (const CallSite& callsite : call_graph_node.caller_callsites()) {
if (callsite.instruction()->opcode() == HloOpcode::kWhile) {
auto* xla_while = callsite.instruction();
const ShapeTree<bool>& index_tree = *live_index_map->at(instruction);
ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) {
MarkLiveAtIndex(xla_while, shape_index, live_index_map, worklist,
workset);
MarkLiveAtIndex(xla_while->while_body()->root_instruction(),
shape_index, live_index_map, worklist, workset);
MarkLiveAtIndex(xla_while->operand(0), shape_index, live_index_map,
worklist, workset);
});
}
}
}
}
void PropagateLivenessThroughControlFlow(
const HloInstruction* instruction,
HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist,
Workset* workset, CallGraph* call_graph) {
const CallGraphNode& call_graph_node =
call_graph->GetNode(instruction->parent());
if (call_graph_node.context() == CallContext::kControlFlow) {
for (const CallSite& callsite : call_graph_node.caller_callsites()) {
HloInstruction* caller = callsite.instruction();
if (caller->opcode() == HloOpcode::kWhile) {
MarkLiveAtIndex(caller->while_condition()->root_instruction(), {},
live_index_map, worklist, workset);
} else if (caller->opcode() == HloOpcode::kConditional) {
MarkLiveAtIndex(caller->operand(0), {}, live_index_map, worklist,
workset);
MarkLiveAtIndex(caller, {}, live_index_map, worklist, workset);
const HloComputation* callee_comp = instruction->parent();
int64_t operand_index = 1;
for (auto* caller_comp : caller->called_computations()) {
if (callee_comp == caller_comp) {
MarkLiveAtIndex(caller->operand(operand_index), {}, live_index_map,
worklist, workset);
if (instruction->opcode() == HloOpcode::kParameter) {
const ShapeTree<bool>& index_tree =
*live_index_map->at(instruction);
ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) {
MarkLiveAtIndex(caller->operand(operand_index), shape_index,
live_index_map, worklist, workset);
});
}
break;
}
++operand_index;
}
}
}
}
}
}
HloLivenessAnalysis::HloLivenessAnalysis(const HloModule& module)
: module_(module), call_graph_(CallGraph::Build(&module)) {}
void HloLivenessAnalysis::RunAnalysis() {
Worklist worklist;
Workset workset;
MarkLiveAtAllIndices(module_.entry_computation()->root_instruction(),
&live_index_map_, &worklist, &workset);
for (auto* computation : module_.computations()) {
for (auto* instruction : computation->instructions()) {
if (instruction->HasSideEffectNoRecurse()) {
MarkLiveAtAllIndices(instruction, &live_index_map_, &worklist,
&workset);
}
}
}
while (!worklist.empty()) {
const HloInstruction* instruction = worklist.front();
worklist.pop_front();
workset.erase(workset.find(instruction));
VLOG(1) << "VISIT instruction: " << instruction->name();
if (instruction->opcode() == HloOpcode::kTuple) {
PropagateLivenessThroughTuple(instruction, &live_index_map_, &worklist,
&workset);
} else if (instruction->opcode() == HloOpcode::kGetTupleElement) {
PropagateLivenessThroughGTE(instruction, &live_index_map_, &worklist,
&workset);
} else if (instruction->opcode() == HloOpcode::kWhile) {
PropagateLivenessThroughWhile(instruction, &live_index_map_, &worklist,
&workset);
} else if (instruction->opcode() == HloOpcode::kParameter) {
PropagateLivenessToParameterCallers(instruction, &live_index_map_,
&worklist, &workset,
call_graph_.get());
} else {
for (auto* called_computation : instruction->called_computations()) {
MarkLiveAtAllIndices(called_computation->root_instruction(),
&live_index_map_, &worklist, &workset);
}
for (HloInstruction* operand : instruction->operands()) {
MarkLiveAtAllIndices(operand, &live_index_map_, &worklist, &workset);
}
}
PropagateLivenessThroughControlFlow(instruction, &live_index_map_,
&worklist, &workset, call_graph_.get());
}
}
bool HloLivenessAnalysis::IsLive(const HloInstruction* instruction,
const ShapeIndex& shape_index) const {
auto it = live_index_map_.find(instruction);
return (it != live_index_map_.end()) && it->second->element(shape_index);
}
absl::StatusOr<std::unique_ptr<HloLivenessAnalysis>> HloLivenessAnalysis::Run(
const HloModule& module) {
VLOG(1) << "HloLivenessAnalysis::Run on module " << module.name();
XLA_VLOG_LINES(2, module.ToString());
auto liveness_analysis = absl::WrapUnique(new HloLivenessAnalysis(module));
liveness_analysis->RunAnalysis();
return std::move(liveness_analysis);
}
} | #include "xla/service/hlo_liveness_analysis.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class HloLivenessAnalysisTest : public HloTestBase {
protected:
HloLivenessAnalysisTest() {}
const HloLivenessAnalysis& RunLiveness(HloModule* module) {
liveness_ = HloLivenessAnalysis::Run(*module).value();
return *liveness_;
}
HloInstruction* GetInstruction(HloModule* module, const std::string& name) {
HloInstruction* to_return = nullptr;
for (auto* comp : module->computations()) {
for (auto* inst : comp->instructions()) {
if (inst->name() == name) {
to_return = inst;
break;
}
}
}
return CHECK_NOTNULL(to_return);
}
std::unique_ptr<HloLivenessAnalysis> liveness_;
};
TEST_F(HloLivenessAnalysisTest, AddAtEntryRoot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleModule
ENTRY SimpleComputation {
constant.1 = s32[] constant(0)
constant.2 = s32[] constant(1)
ROOT add = s32[] add(constant.1, constant.2)
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {}));
}
TEST_F(HloLivenessAnalysisTest, DeadAdd) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleModule
ENTRY SimpleComputation {
constant.1 = s32[] constant(0)
constant.2 = s32[] constant(1)
add.1 = s32[] add(constant.1, constant.2)
ROOT add.2 = s32[] add(constant.1, constant.2)
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add.2"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "add.1"), {}));
}
TEST_F(HloLivenessAnalysisTest, TupleAtEntryRoot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleModule
ENTRY SimpleComputation {
constant.1 = s32[] constant(0)
constant.2 = s32[] constant(1)
ROOT tuple.1 = (s32[], s32[]) tuple(constant.1, constant.2)
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {}));
}
TEST_F(HloLivenessAnalysisTest, NestedTupleAtEntryRoot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleModule
ENTRY SimpleComputation {
constant.1 = s32[] constant(1)
constant.2 = s32[] constant(2)
constant.3 = s32[] constant(3)
tuple.1 = (s32[], s32[]) tuple(constant.2, constant.3)
ROOT tuple.2 = (s32[], (s32[], s32[])) tuple(constant.1, tuple.1)
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {}));
}
TEST_F(HloLivenessAnalysisTest, GteOfTuple) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleModule
ENTRY SimpleComputation {
constant.1 = s32[] constant(0)
constant.2 = s32[] constant(1)
tuple.1 = (s32[], s32[]) tuple(constant.1, constant.2)
ROOT get-tuple-element.1 = s32[] get-tuple-element(tuple.1), index=0
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {}));
}
TEST_F(HloLivenessAnalysisTest, GteOfNestedTuple) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleModule
ENTRY SimpleComputation {
constant.1 = s32[] constant(0)
constant.2 = s32[] constant(1)
constant.3 = s32[] constant(2)
tuple.1 = (s32[], s32[]) tuple(constant.2, constant.3)
tuple.2 = (s32[], (s32[], s32[])) tuple(constant.1, tuple.1)
ROOT get-tuple-element.1 = (s32[], s32[]) get-tuple-element(tuple.2), index=1
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.1"), {}));
EXPECT_TRUE(liveness.IsLive(
GetInstruction(module.get(), "get-tuple-element.1"), {0}));
EXPECT_TRUE(liveness.IsLive(
GetInstruction(module.get(), "get-tuple-element.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {}));
}
TEST_F(HloLivenessAnalysisTest, GteOfGteOfNestedTuple) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleModule
ENTRY SimpleComputation {
constant.1 = s32[] constant(0)
constant.2 = s32[] constant(1)
constant.3 = s32[] constant(2)
tuple.1 = (s32[], s32[]) tuple(constant.2, constant.3)
tuple.2 = (s32[], (s32[], s32[])) tuple(constant.1, tuple.1)
get-tuple-element.1 = (s32[], s32[]) get-tuple-element(tuple.2), index=1
ROOT get-tuple-element.2 = s32[] get-tuple-element(get-tuple-element.1), index=0
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.2"), {}));
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.1"), {}));
EXPECT_TRUE(liveness.IsLive(
GetInstruction(module.get(), "get-tuple-element.1"), {0}));
EXPECT_FALSE(liveness.IsLive(
GetInstruction(module.get(), "get-tuple-element.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 0}));
EXPECT_FALSE(
liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {}));
}
TEST_F(HloLivenessAnalysisTest, WhileWithDeadTupleElement) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply.0 = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple.0 = (s32[], s32[3]{0}) tuple(add.0, multiply.0)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
while.0 = (s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT get-tuple-element.4 = s32[] get-tuple-element(while.0), index=0
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.4"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {0}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {0}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add.0"), {}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "multiply.0"), {}));
}
TEST_F(HloLivenessAnalysisTest, WhileCondPropagatesLiveness) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
add_S32 {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
SimpleLoop.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply.0 = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple.0 = (s32[], s32[3]{0}) tuple(add.0, multiply.0)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
get-tuple-element.4 = s32[3]{0} get-tuple-element(loop_var.2), index=1
zero = s32[] constant(0)
reduce = s32[] reduce(get-tuple-element.4, zero), dimensions={0}, to_apply=add_S32
add.1 = s32[] add(get-tuple-element.3, reduce)
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(add.1, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
while.0 = (s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT get-tuple-element.5 = s32[] get-tuple-element(while.0), index=0
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.5"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.4"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "multiply.0"), {}));
}
TEST_F(HloLivenessAnalysisTest, WhileWithLiveTupleElements) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[], s32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
get-tuple-element.2 = s32[] get-tuple-element(loop_var.1), index=1
add.1 = s32[] add(get-tuple-element.1, get-tuple-element.2)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.1), index=2
multiply.1 = s32[] multiply(get-tuple-element.3, get-tuple-element.3)
ROOT tuple.1 = (s32[], s32[], s32[]) tuple(add.1, get-tuple-element.3, multiply.1)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[], s32[]) parameter(0)
get-tuple-element.4 = s32[] get-tuple-element(loop_var.2), index=0
constant.1 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.4, constant.1), direction=LT
}
ENTRY SimpleLoop {
constant.2 = s32[] constant(0)
constant.3 = s32[] constant(1)
constant.4 = s32[] constant(2)
tuple.2 = (s32[], s32[], s32[]) tuple(constant.2, constant.3, constant.4)
while.1 = (s32[], s32[], s32[]) while(tuple.2), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT get-tuple-element.5 = s32[] get-tuple-element(while.1), index=0
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.5"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.1"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.1"), {2}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {2}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {2}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "loop_var.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "loop_var.1"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "loop_var.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "loop_var.1"), {2}));
}
TEST_F(HloLivenessAnalysisTest, WhileWithOutfeed) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule OutfeedLoop
WhileBody {
body_param = (s32[]) parameter(0)
token0 = token[] after-all()
constant.2 = s32[] constant(2)
outfeed_tuple = (s32[]) outfeed(constant.2, token0)
get-tuple-element.1 = s32[] get-tuple-element(body_param), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
ROOT tuple = (s32[]) tuple(add)
}
WhileCondition {
cond_param = (s32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(cond_param), index=0
constant.2 = s32[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
tuple.1 = (s32[]) tuple(constant.3)
while = (s32[]) while(tuple.1), condition=WhileCondition,
body=WhileBody
ROOT rtuple = () tuple()
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {}));
}
TEST_F(HloLivenessAnalysisTest, NestedWhileWithOutfeed) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule OutfeedLoop
InnerWhileBody {
body_param = (s32[]) parameter(0)
token0 = token[] after-all()
constant.2 = s32[] constant(2)
outfeed_tuple = (s32[]) outfeed(constant.2, token0)
get-tuple-element.1 = s32[] get-tuple-element(body_param), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
ROOT tuple = (s32[]) tuple(add)
}
InnerWhileCondition {
cond_param = (s32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(cond_param), index=0
constant.2 = s32[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
OuterWhileCondition {
cond_param.2 = (s32[]) parameter(0)
get-tuple-element.5 = s32[] get-tuple-element(cond_param.2), index=0
constant.5 = s32[] constant(5)
ROOT less-than.2 = pred[] compare(get-tuple-element.5, constant.5), direction=LT
}
OuterWhileBody {
body_param.2 = (s32[]) parameter(0)
get-tuple-element.8 = s32[] get-tuple-element(body_param.2), index=0
constant.6 = s32[] constant(0)
tuple.2 = (s32[]) tuple(constant.6)
inner_while = (s32[]) while(tuple.2), condition=InnerWhileCondition,
body=InnerWhileBody
constant.7 = s32[] constant(1)
add.2 = s32[] add(get-tuple-element.8, constant.7)
ROOT rtuple = (s32[]) tuple(add.2)
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
tuple.1 = (s32[]) tuple(constant.3)
while = (s32[]) while(tuple.1), condition=OuterWhileCondition,
body=OuterWhileBody
ROOT rtuple = () tuple()
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add.2"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {}));
}
TEST_F(HloLivenessAnalysisTest, PropagateLivenessFromConditionalComputation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule main.67
%region_0.10 (Arg_0.11: (s32[], s32[], f32[1024,3], s32[1])) -> (s32[], s32[], f32[1024,3], s32[1]) {
%Arg_0.11 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) parameter(0)
%get-tuple-element.17 = s32[] get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.11), index=0, metadata={op_name="while"}
%constant.13 = s32[] constant(1)
%add.25 = s32[] add(s32[] %get-tuple-element.17, s32[] %constant.13), metadata={op_name="while/add_1"}
%get-tuple-element.18 = s32[] get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.11), index=1, metadata={op_name="while"}
%add.22 = s32[] add(s32[] %get-tuple-element.18, s32[] %constant.13), metadata={op_name="while/add"}
%get-tuple-element.19 = f32[1024,3]{1,0} get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.11), index=2, metadata={op_name="while"}
%constant.16 = f32[] constant(0)
%constant.15 = f32[] constant(1)
%rng.21 = f32[3]{0} rng(f32[] %constant.16, f32[] %constant.15), distribution=rng_uniform, metadata={op_name="while/random_uniform/RandomUniform"}
%reshape.23 = f32[1,3]{1,0} reshape(f32[3]{0} %rng.21), metadata={op_name="while/TensorArrayV2Write/TensorListSetItem"}
%constant.12 = s32[] constant(0)
%dynamic-update-slice.24 = f32[1024,3]{1,0} dynamic-update-slice(f32[1024,3]{1,0} %get-tuple-element.19, f32[1,3]{1,0} %reshape.23, s32[] %get-tuple-element.18, s32[] %constant.12), metadata={op_name="while/TensorArrayV2Write/TensorListSetItem"}
%get-tuple-element.20 = s32[1]{0} get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.11), index=3, metadata={op_name="while"}
ROOT %tuple.26 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) tuple(s32[] %add.25, s32[] %add.22, f32[1024,3]{1,0} %dynamic-update-slice.24, s32[1]{0} %get-tuple-element.20), metadata={op_name="while"}
}
%region_1.27 (Arg_0.28: (s32[], s32[], f32[1024,3], s32[1])) -> pred[] {
%Arg_0.28 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) parameter(0)
%get-tuple-element.30 = s32[] get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.28), index=1, metadata={op_name="while"}
%constant.29 = s32[] constant(1024)
ROOT %compare.31 = pred[] compare(s32[] %get-tuple-element.30, s32[] %constant.29), direction=LT, metadata={op_name="while/Less"}
}
%region_2.42 (Arg_0.43: (f32[3,32,32,3], token[])) -> (pred[], token[]) {
%constant.44 = pred[] constant(true)
%Arg_0.43 = (f32[3,32,32,3]{3,2,1,0}, token[]) parameter(0)
%get-tuple-element.52 = f32[3,32,32,3]{3,2,1,0} get-tuple-element((f32[3,32,32,3]{3,2,1,0}, token[]) %Arg_0.43), index=0, metadata={op_name="image_sample/write_summary/summary_cond"}
%constant.49 = f32[] constant(255.5)
%broadcast.50 = f32[3,32,32,3]{3,2,1,0} broadcast(f32[] %constant.49), dimensions={}, metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Mul"}
%multiply.53 = f32[3,32,32,3]{3,2,1,0} multiply(f32[3,32,32,3]{3,2,1,0} %get-tuple-element.52, f32[3,32,32,3]{3,2,1,0} %broadcast.50), metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Mul"}
%constant.47 = f32[] constant(0)
%broadcast.48 = f32[3,32,32,3]{3,2,1,0} broadcast(f32[] %constant.47), dimensions={}, metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Maximum"}
%maximum.54 = f32[3,32,32,3]{3,2,1,0} maximum(f32[3,32,32,3]{3,2,1,0} %multiply.53, f32[3,32,32,3]{3,2,1,0} %broadcast.48), metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Maximum"}
%constant.45 = f32[] constant(255)
%broadcast.46 = f32[3,32,32,3]{3,2,1,0} broadcast(f32[] %constant.45), dimensions={}, metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Minimum"}
%minimum.55 = f32[3,32,32,3]{3,2,1,0} minimum(f32[3,32,32,3]{3,2,1,0} %maximum.54, f32[3,32,32,3]{3,2,1,0} %broadcast.46), metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Minimum"}
%convert.56 = u8[3,32,32,3]{3,2,1,0} convert(f32[3,32,32,3]{3,2,1,0} %minimum.55), metadata={op_name="image_sample/write_summary/summary_cond/convert_image"}
%get-tuple-element.51 = token[] get-tuple-element((f32[3,32,32,3]{3,2,1,0}, token[]) %Arg_0.43), index=1, metadata={op_name="image_sample/write_summary/summary_cond"}
%send.57 = (u8[3,32,32,3]{3,2,1,0}, u32[], token[]) send(u8[3,32,32,3]{3,2,1,0} %convert.56, token[] %get-tuple-element.51), channel_id=2, is_host_transfer=true, frontend_attributes={_xla_host_transfer_rendezvous="host_compute_channel_0_args_dtoh_0"}, metadata={op_name="image_sample/write_summary/summary_cond/encode_each_image/TensorArrayUnstack/TensorListFromTensor"}
%send-done.58 = token[] send-done((u8[3,32,32,3]{3,2,1,0}, u32[], token[]) %send.57), channel_id=2, is_host_transfer=true, frontend_attributes={_xla_host_transfer_rendezvous="host_compute_channel_0_args_dtoh_0"}, metadata={op_name="image_sample/write_summary/summary_cond/encode_each_image/TensorArrayUnstack/TensorListFromTensor"}
ROOT %tuple.59 = (pred[], token[]) tuple(pred[] %constant.44, token[] %send-done.58), metadata={op_name="image_sample/write_summary/summary_cond"}
}
%region_3.60 (Arg_0.61: (f32[3,32,32,3], token[])) -> (pred[], token[]) {
%constant.62 = pred[] constant(false)
%Arg_0.61 = (f32[3,32,32,3]{3,2,1,0}, token[]) parameter(0)
%get-tuple-element.63 = token[] get-tuple-element((f32[3,32,32,3]{3,2,1,0}, token[]) %Arg_0.61), index=1, metadata={op_name="image_sample/write_summary/summary_cond"}
ROOT %tuple.64 = (pred[], token[]) tuple(pred[] %constant.62, token[] %get-tuple-element.63), metadata={op_name="image_sample/write_summary/summary_cond"}
}
ENTRY %main.67 (arg_tuple.1: (s32[])) -> () {
%arg_tuple.1 = (s32[]{:T(256)}) parameter(0)
%get-tuple-element.2 = s32[]{:T(256)} get-tuple-element((s32[]{:T(256)}) %arg_tuple.1), index=0
%constant.3 = s32[] constant(0)
%compare.8 = pred[]{:T(256)} compare(s32[]{:T(256)} %get-tuple-element.2, s32[] %constant.3), direction=EQ, metadata={op_name="image_sample/write_summary/Equal"}
%constant.5 = f32[] constant(0)
%broadcast.6 = f32[1024,3]{1,0} broadcast(f32[] %constant.5), dimensions={}, metadata={op_name="tokens_accumulator"}
%constant.4 = s32[1]{0} constant({1024})
%tuple.9 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) tuple(s32[] %constant.3, s32[] %constant.3, f32[1024,3]{1,0} %broadcast.6, s32[1]{0} %constant.4), metadata={op_name="while"}
%while.32 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) while((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %tuple.9), condition=%region_1.27, body=%region_0.10, metadata={op_name="while"}
%get-tuple-element.33 = f32[1024,3]{1,0} get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %while.32), index=2, metadata={op_name="while"}
%transpose.34 = f32[3,1024]{0,1} transpose(f32[1024,3]{1,0} %get-tuple-element.33), dimensions={1,0}, metadata={op_name="transpose.transpose/perm"}
%reshape.35 = f32[3,32,32,1]{3,2,1,0} reshape(f32[3,1024]{0,1} %transpose.34), metadata={op_name="Reshape"}
%broadcast.36 = f32[3,32,32,1]{3,2,1,0} broadcast(f32[3,32,32,1]{3,2,1,0} %reshape.35), dimensions={0,1,2,3}, metadata={op_name="Tile"}
%reshape.37 = f32[3,32,32]{2,1,0} reshape(f32[3,32,32,1]{3,2,1,0} %broadcast.36), metadata={op_name="Tile"}
%broadcast.38 = f32[3,32,32,3]{3,2,1,0} broadcast(f32[3,32,32]{2,1,0} %reshape.37), dimensions={0,1,2}, metadata={op_name="Tile"}
%after-all.7 = token[] after-all(), metadata={op_name="image_sample/write_summary/summary_cond"}
%send.39 = (pred[]{:T(256)}, u32[], token[]) send(pred[]{:T(256)} %compare.8, token[] %after-all.7), channel_id=1, is_host_transfer=true, frontend_attributes={_xla_host_transfer_rendezvous="if_predicate_channel_1_dtoh_0"}, metadata={op_name="image_sample/write_summary/summary_cond"}
%send-done.40 = token[] send-done((pred[]{:T(256)}, u32[], token[]) %send.39), channel_id=1, is_host_transfer=true, frontend_attributes={_xla_host_transfer_rendezvous="if_predicate_channel_1_dtoh_0"}, metadata={op_name="image_sample/write_summary/summary_cond"}
%tuple.41 = (f32[3,32,32,3]{3,2,1,0}, token[]) tuple(f32[3,32,32,3]{3,2,1,0} %broadcast.38, token[] %send-done.40), metadata={op_name="image_sample/write_summary/summary_cond"}
%conditional.65 = (pred[], token[]) conditional(pred[]{:T(256)} %compare.8, (f32[3,32,32,3]{3,2,1,0}, token[]) %tuple.41, (f32[3,32,32,3]{3,2,1,0}, token[]) %tuple.41), true_computation=%region_2.42, false_computation=%region_3.60, metadata={op_name="image_sample/write_summary/summary_cond"}
ROOT %tuple.66 = () tuple()
}
)")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "conditional.65"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.41"), {}));
EXPECT_TRUE(liveness.IsLive(
GetInstruction(module.get(), "get-tuple-element.33"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.32"), {}));
EXPECT_TRUE(liveness.IsLive(
GetInstruction(module.get(), "dynamic-update-slice.24"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "send.57"), {}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_liveness_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_liveness_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
02bc84dc-502e-49cb-bc34-f1006c412337 | cpp | tensorflow/tensorflow | defuser | third_party/xla/xla/service/defuser.cc | third_party/xla/xla/service/defuser_test.cc | #include "xla/service/defuser.h"
#include <algorithm>
#include <memory>
#include <numeric>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/call_graph.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
absl::StatusOr<bool> Defuser::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(1) << "Defusing module " << module->name();
XLA_VLOG_LINES(2, "Before defusion:\n" + module->ToString());
bool changed = false;
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
TF_RETURN_IF_ERROR(call_graph->VisitNodes(
[&](const CallGraphNode& call_graph_node) -> absl::Status {
if (call_graph_node.computation()->IsFusionComputation()) {
TF_RET_CHECK(call_graph_node.caller_callsites().size() == 1);
HloInstruction* fusion_instruction =
call_graph_node.caller_callsites()[0].instruction();
TF_RETURN_IF_ERROR(fusion_instruction->Defuse());
changed = true;
}
return absl::OkStatus();
},
true));
XLA_VLOG_LINES(2, "After defusion:\n" + module->ToString());
return changed;
}
} | #include "xla/service/defuser.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
class DefuserTest : public HloTestBase {
protected:
int FusionCount(const HloModule* m) {
int count = 0;
for (HloComputation* computation : m->computations()) {
if (computation->IsFusionComputation()) {
count++;
}
}
return count;
}
Defuser defuser_;
const Shape shape_ = ShapeUtil::MakeShape(F32, {2, 2});
};
TEST_F(DefuserTest, NoFusionInstruction) {
auto m = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0"));
auto param1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1"));
builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));
m->AddEntryComputation(builder.Build());
EXPECT_EQ(0, FusionCount(m.get()));
EXPECT_FALSE(defuser_.Run(m.get()).value());
}
TEST_F(DefuserTest, TrivialFusionInstructionAsRoot) {
auto m = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0"));
auto param1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));
auto computation = m->AddEntryComputation(builder.Build());
computation->CreateFusionInstruction({add},
HloInstruction::FusionKind::kLoop);
EXPECT_THAT(computation->root_instruction(), op::Fusion());
EXPECT_EQ(1, FusionCount(m.get()));
EXPECT_TRUE(defuser_.Run(m.get()).value());
EXPECT_EQ(0, FusionCount(m.get()));
EXPECT_THAT(computation->root_instruction(),
op::Add(op::Parameter(), op::Parameter()));
}
TEST_F(DefuserTest, TrivialFusionInstructionNotAsRoot) {
auto m = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0"));
auto param1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));
builder.AddInstruction(
HloInstruction::CreateUnary(shape_, HloOpcode::kNegate, add));
auto computation = m->AddEntryComputation(builder.Build());
computation->CreateFusionInstruction({add},
HloInstruction::FusionKind::kLoop);
EXPECT_THAT(computation->root_instruction(), op::Negate(op::Fusion()));
EXPECT_EQ(1, FusionCount(m.get()));
EXPECT_TRUE(defuser_.Run(m.get()).value());
EXPECT_EQ(0, FusionCount(m.get()));
EXPECT_THAT(computation->root_instruction(),
op::Negate(op::Add(op::Parameter(), op::Parameter())));
}
TEST_F(DefuserTest, NonTrivialFusionInstruction) {
auto m = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0"));
auto param1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1"));
auto param3 =
builder.AddInstruction(HloInstruction::CreateParameter(2, shape_, "p2"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(shape_, HloOpcode::kNegate, add));
auto sub = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kSubtract, add, negate));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kMultiply, sub, param3));
auto div = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kDivide, mul, param3));
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, constant, div));
auto computation = m->AddEntryComputation(builder.Build());
computation->CreateFusionInstruction(
{add2, constant, div, mul, sub, negate, add},
HloInstruction::FusionKind::kLoop);
EXPECT_THAT(computation->root_instruction(), op::Fusion());
EXPECT_EQ(1, FusionCount(m.get()));
EXPECT_TRUE(defuser_.Run(m.get()).value());
EXPECT_EQ(0, FusionCount(m.get()));
EXPECT_THAT(computation->root_instruction(),
op::Add(op::Constant(), op::Divide()));
}
TEST_F(DefuserTest, MultipleFusionInstructions) {
auto m = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0"));
auto param1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1"));
auto param3 =
builder.AddInstruction(HloInstruction::CreateParameter(2, shape_, "p2"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(shape_, HloOpcode::kNegate, add));
auto sub = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kSubtract, add, negate));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kMultiply, sub, param3));
auto div = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kDivide, mul, param3));
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, constant, div));
auto computation = m->AddEntryComputation(builder.Build());
computation->CreateFusionInstruction({add2, constant, div, mul},
HloInstruction::FusionKind::kLoop);
computation->CreateFusionInstruction({sub, negate, add},
HloInstruction::FusionKind::kLoop);
EXPECT_THAT(computation->root_instruction(), op::Fusion());
EXPECT_EQ(2, FusionCount(m.get()));
EXPECT_TRUE(defuser_.Run(m.get()).value());
EXPECT_EQ(0, FusionCount(m.get()));
EXPECT_THAT(computation->root_instruction(),
op::Add(op::Constant(), op::Divide()));
}
TEST_F(DefuserTest, NestedFusionInstructions) {
auto m = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0"));
auto param1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(shape_, HloOpcode::kNegate, add));
auto computation = m->AddEntryComputation(builder.Build());
auto outer_fusion = computation->CreateFusionInstruction(
{negate, add}, HloInstruction::FusionKind::kLoop);
HloInstruction* fused_negate = outer_fusion->fused_expression_root();
ASSERT_EQ(fused_negate->opcode(), HloOpcode::kNegate);
outer_fusion->fused_instructions_computation()->CreateFusionInstruction(
{fused_negate}, HloInstruction::FusionKind::kLoop);
EXPECT_THAT(computation->root_instruction(), op::Fusion());
EXPECT_EQ(2, FusionCount(m.get()));
EXPECT_TRUE(defuser_.Run(m.get()).value());
EXPECT_EQ(0, FusionCount(m.get()));
EXPECT_THAT(computation->root_instruction(), op::Negate(op::Add()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/defuser.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/defuser_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
46bc99e4-502f-4dcb-8ff3-7a075b7f1f98 | cpp | tensorflow/tensorflow | elemental_ir_emitter | third_party/xla/xla/service/gpu/elemental_ir_emitter.cc | third_party/xla/xla/service/elemental_ir_emitter_test.cc | #include "xla/service/gpu/elemental_ir_emitter.h"
#include <cstdint>
#include <string>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/ModRef.h"
#include "llvm/TargetParser/Triple.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/service/elemental_ir_emitter.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/ir_emitter_nested.h"
#include "xla/service/gpu/target_util.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/service/llvm_ir/math_ops.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
GpuElementalIrEmitter::GpuElementalIrEmitter(
IrEmitterContext& ir_emitter_context, llvm::IRBuilder<>* b)
: ElementalIrEmitter(ir_emitter_context.llvm_module(), b),
ir_emitter_context_(ir_emitter_context) {}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitDeviceMathCall(
TargetDeviceFunctionID funcid, absl::Span<llvm::Value* const> operands,
absl::Span<const PrimitiveType> input_types, PrimitiveType output_type,
absl::string_view name) {
bool cast_result_to_fp16 = false;
std::vector<llvm::Value*> converted_operands(operands.begin(),
operands.end());
std::vector<PrimitiveType> converted_input_types(input_types.begin(),
input_types.end());
switch (output_type) {
case F16:
cast_result_to_fp16 = true;
for (int64_t i = 0; i < operands.size(); ++i) {
if (input_types[i] == F16) {
converted_operands[i] =
FPCast(converted_operands[i], b()->getFloatTy());
converted_input_types[i] = F32;
}
}
output_type = F32;
[[fallthrough]];
case F32:
break;
case F64:
break;
default:
return Unimplemented("Bad type for device math call: %s",
PrimitiveType_Name(output_type));
}
const std::string& munged_callee = ObtainDeviceFunctionName(
funcid, output_type,
llvm::Triple(b()->GetInsertBlock()->getModule()->getTargetTriple()));
llvm::Value* result = EmitMathCall(munged_callee, converted_operands,
converted_input_types, output_type, name)
.value();
if (cast_result_to_fp16) {
result = FPCast(result, b()->getHalfTy());
}
return result;
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitMathCall(
const std::string& callee_name, absl::Span<llvm::Value* const> operands,
absl::Span<const PrimitiveType> input_types, PrimitiveType output_type,
absl::string_view name) {
for (PrimitiveType input_type : input_types) {
if (output_type != input_type) {
return Unimplemented("Input type != output type: %s != %s",
PrimitiveType_Name(input_type),
PrimitiveType_Name(output_type));
}
}
return EmitDeviceFunctionCall(callee_name, operands, input_types, output_type,
llvm::AttrBuilder(b()->getContext())
.addMemoryAttr(llvm::MemoryEffects::none())
.addAttribute(llvm::Attribute::NoUnwind),
b(), name);
}
llvm_ir::IrArray::Index GpuElementalIrEmitter::GetSourceIndexOfBitcast(
const llvm_ir::IrArray::Index& index, const HloInstruction* hlo) {
Shape shape = hlo->shape();
Shape operand_shape = hlo->operand(0)->shape();
auto gpu_config = hlo->backend_config<GpuBackendConfig>();
CHECK_OK(gpu_config);
const BitcastBackendConfig& bitcast_config =
gpu_config.value().bitcast_backend_config();
if (!bitcast_config.result_layout().minor_to_major().empty()) {
*shape.mutable_layout() =
xla::Layout::CreateFromProto(bitcast_config.result_layout());
}
if (!bitcast_config.source_layout().minor_to_major().empty()) {
*operand_shape.mutable_layout() =
xla::Layout::CreateFromProto(bitcast_config.source_layout());
}
return index.SourceIndexOfBitcast(shape, operand_shape, b());
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitFloatBinaryOp(
const HloInstruction* op, llvm::Value* lhs_value, llvm::Value* rhs_value) {
PrimitiveType lhs_input_type = op->operand(0)->shape().element_type();
PrimitiveType rhs_input_type = op->operand(1)->shape().element_type();
PrimitiveType output_type = op->shape().element_type();
HloOpcode opcode = op->opcode();
if (ir_emitter_context_.debug_options().xla_gpu_enable_fast_min_max() &&
(opcode == HloOpcode::kMaximum || opcode == HloOpcode::kMinimum)) {
return llvm_ir::EmitCallToIntrinsic(
opcode == HloOpcode::kMaximum ? llvm::Intrinsic::maxnum
: llvm::Intrinsic::minnum,
{lhs_value, rhs_value}, {lhs_value->getType()}, b());
}
switch (op->opcode()) {
case HloOpcode::kRemainder: {
return EmitDeviceMathCall(TargetDeviceFunctionID::kFmod,
{lhs_value, rhs_value},
{lhs_input_type, rhs_input_type}, output_type);
}
case HloOpcode::kPower: {
return EmitPowerOp(op, lhs_value, rhs_value);
}
default:
return ElementalIrEmitter::EmitFloatBinaryOp(op, lhs_value, rhs_value);
}
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitPowerOp(
const HloInstruction* op, llvm::Value* lhs_value, llvm::Value* rhs_value) {
CHECK_EQ(op->opcode(), HloOpcode::kPower);
PrimitiveType lhs_input_type = op->operand(0)->shape().element_type();
PrimitiveType rhs_input_type = op->operand(1)->shape().element_type();
PrimitiveType output_type = op->shape().element_type();
return EmitDeviceMathCall(TargetDeviceFunctionID::kPow,
{lhs_value, rhs_value},
{lhs_input_type, rhs_input_type}, output_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitLog(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kLog, {value}, {prim_type},
prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitLog1p(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kLog1p, {value},
{prim_type}, prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitSin(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kSin, {value}, {prim_type},
prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitCos(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kCos, {value}, {prim_type},
prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitTan(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kTan, {value}, {prim_type},
prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitExp(
PrimitiveType prim_type, llvm::Value* value, absl::string_view ) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kExp, {value}, {prim_type},
prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitExpm1(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kExpm1, {value},
{prim_type}, prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitPow(
PrimitiveType prim_type, llvm::Value* lhs, llvm::Value* rhs,
absl::string_view name) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kPow, {lhs, rhs},
{prim_type, prim_type}, prim_type, name);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitSqrt(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kSqrt, {value}, {prim_type},
prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitRsqrt(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kRsqrt, {value},
{prim_type}, prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitAtan2(
PrimitiveType prim_type, llvm::Value* lhs, llvm::Value* rhs,
absl::string_view name) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kAtan2, {lhs, rhs},
{prim_type, prim_type}, prim_type, name);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitTanh(
PrimitiveType prim_type, llvm::Value* value) {
if (prim_type == F64) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kTanh, {value},
{prim_type}, prim_type);
}
llvm::Type* type = prim_type == F16 ? b()->getFloatTy() : value->getType();
llvm::Value* input = FPCast(value, type);
constexpr double kMaxValue = 20.0;
auto max_value = llvm::ConstantFP::get(type, kMaxValue);
llvm::Value* abs_value =
llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::fabs, {input}, {type}, b());
llvm::Value* fast_tanh = llvm_ir::EmitFastTanh(b(), input);
auto one = llvm::ConstantFP::get(type, 1.0);
auto one_with_sign = llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::copysign,
{one, input}, {type}, b());
return FPCast(Select(FCmpULT(abs_value, max_value), fast_tanh, one_with_sign),
value->getType(), "tanh");
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitErf(
PrimitiveType prim_type, llvm::Value* value) {
if (prim_type == F64) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kErf, {value},
{prim_type}, prim_type);
}
llvm::Type* type = prim_type == F16 ? b()->getFloatTy() : value->getType();
if (type == b()->getFloatTy()) {
llvm::Value* x = FPCast(value, type);
auto* result = llvm_ir::EmitErfF32(b(), x);
return FPCast(result, value->getType());
}
return Unimplemented("erf");
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitComplexAbs(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kHypot,
{EmitExtractReal(value), EmitExtractImag(value)},
{prim_type, prim_type}, prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitCbrt(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kCbrt, {value}, {prim_type},
prim_type);
}
absl::StatusOr<std::vector<llvm::Value*>>
GpuElementalIrEmitter::EmitThreadLocalCall(
const HloComputation& callee, absl::Span<llvm::Value* const> parameters,
absl::string_view, bool ) {
return CallNestedComputationWithScalars(b(), ir_emitter_context_, callee,
parameters);
}
}
} | #include "xla/service/elemental_ir_emitter.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <type_traits>
#include <utility>
#include <gtest/gtest.h>
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/types.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using std::nullopt;
class ElementalIrEmitterExecutionTest : public HloTestBase {
protected:
void RunTest(const std::string& hlo_text, absl::Span<Literal* const> args) {
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsForTest());
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text, config));
EXPECT_TRUE(RunAndCompareNoHloPasses(std::move(module), args, nullopt));
}
void RunTypeConversionTest(absl::string_view hlo_text) {
HloModuleConfig config;
auto debug_options = GetDebugOptionsForTest();
debug_options.set_xla_cpu_fast_math_honor_nans(true);
debug_options.set_xla_cpu_fast_math_honor_infs(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text, config));
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{(0.)}));
}
};
class ElementalIrEmitterExecutionTestWithoutFastMinMax
: public ElementalIrEmitterExecutionTest {
protected:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options =
ElementalIrEmitterExecutionTest::GetDebugOptionsForTest();
debug_options.set_xla_cpu_enable_fast_min_max(false);
debug_options.set_xla_gpu_enable_fast_min_max(false);
return debug_options;
}
};
template <typename T>
class ElementalIrEmitterExecutionTypedTest
: public ElementalIrEmitterExecutionTest {
protected:
const std::string& TypeName() {
return primitive_util::LowercasePrimitiveTypeName(
primitive_util::NativeToPrimitiveType<T>());
}
};
using FloatTypes =
::testing::Types<bfloat16, tsl::float8_e5m2, tsl::float8_e5m2fnuz,
tsl::float8_e4m3, tsl::float8_e4m3fn, tsl::float8_e4m3fnuz,
tsl::float8_e4m3b11fnuz, tsl::float8_e3m4>;
TYPED_TEST_SUITE(ElementalIrEmitterExecutionTypedTest, FloatTypes);
XLA_TEST_F(ElementalIrEmitterExecutionTest, DotFusion) {
const std::string hlo_text = R"(
HloModule FusedDot
fused_computation {
arg0 = s32[1,2,1]{2,1,0} parameter(0)
reshape.lhs = s32[2,1]{1,0} reshape(arg0)
arg1 = s32[1,2,1]{2,1,0} parameter(1)
reshape.rhs = s32[2,1]{1,0} reshape(arg1)
ROOT dot = s32[1,1]{1,0} dot(reshape.lhs, reshape.rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY main {
entry_arg0 = s32[1,2,1]{2,1,0} parameter(0)
entry_arg1 = s32[1,2,1]{2,1,0} parameter(1)
ROOT fusion = s32[1,1]{1,0} fusion(entry_arg0, entry_arg1), kind=kLoop, calls=fused_computation
}
)";
Literal lhs = LiteralUtil::CreateR3<int32_t>({{{1}, {2}}});
Literal rhs = LiteralUtil::CreateR3<int32_t>({{{3}, {4}}});
RunTest(hlo_text, {&lhs, &rhs});
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ScalarDotFusion) {
const char* hlo_text = R"(
HloModule ScalarDotFusion
fused_computation {
arg0 = s32[2,2]{1,0} parameter(0)
reshape.lhs = s32[4]{0} reshape(arg0)
arg1 = s32[2,2]{1,0} parameter(1)
reshape.rhs = s32[4]{0} reshape(arg1)
ROOT dot = s32[] dot(reshape.lhs, reshape.rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY main {
entry_arg0 = s32[2,2]{1,0} parameter(0)
entry_arg1 = s32[2,2]{1,0} parameter(1)
ROOT fusion = s32[] fusion(entry_arg0, entry_arg1), kind=kLoop, calls=fused_computation
}
)";
Literal lhs = LiteralUtil::CreateR2<int32_t>({{1, 2}, {3, 4}});
Literal rhs = LiteralUtil::CreateR2<int32_t>({{10, 20}, {30, 40}});
RunTest(hlo_text, {&lhs, &rhs});
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, BatchDot) {
const char* hlo_text = R"(
HloModule BatchDot
fused_computation.1 {
param_0 = f64[1,1,8]{2,1,0} parameter(0)
r.1 = f64[2,4]{1,0} reshape(param_0)
param_1 = f64[1,2,2,2,1]{4,3,2,1,0} parameter(1)
r.2 = f64[2,4,1]{2,1,0} reshape(param_1)
ROOT dot = f64[2,1]{1,0} dot(r.1, r.2), lhs_batch_dims={0},
lhs_contracting_dims={1},
rhs_batch_dims={0},
rhs_contracting_dims={1}
}
ENTRY resampler_Resampler.49 {
p0 = f64[1,1,8]{2,1,0} parameter(0)
p1 = f64[1,2,2,2,1]{4,3,2,1,0} parameter(1)
ROOT f = f64[2,1]{1,0} fusion(p0, p1), kind=kLoop, calls=fused_computation.1
}
)";
HloModuleConfig config;
auto debug_options = GetDebugOptionsForTest();
debug_options.add_xla_disable_hlo_passes("layout-assignment");
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text, config));
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{4e-3, 4e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTest,
DivideComplexNumbersWithInfiniteNormRhs) {
constexpr char hlo_text[] = R"(
HloModule DivideComplexNumbers
ENTRY DivideComplexNumbers {
constant.1 = c64[8]{0} constant({
(1, 1), (1, inf), (1, inf), (nan, 1),
(inf, inf), (inf, nan), (nan, nan), (1, 2)})
real = f32[8]{0} constant({nan, nan, inf, inf, inf, 1, inf, 3})
imag = f32[8]{0} constant({inf, inf, inf, inf, 1, inf, inf, 4})
complex.2 = c64[8]{0} complex(real, imag)
ROOT divide.1 = c64[8]{0} divide(constant.1, complex.2)
}
)";
HloModuleConfig config;
auto debug_options = GetDebugOptionsForTest();
debug_options.set_xla_cpu_fast_math_honor_nans(true);
debug_options.set_xla_cpu_fast_math_honor_infs(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text, config));
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{(0.)}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTest,
DivideComplexNumbersWithFiniteNormRhs) {
constexpr char hlo_text[] = R"(
HloModule DivideComplexNumbers
ENTRY DivideComplexNumbers {
constant.1 = c64[5]{0} constant({
(1, inf), (inf, 1), (inf, nan), (inf, inf), (nan, inf)})
real = f32[5]{0} constant({1, 1, 1, 1, 1})
imag = f32[5]{0} constant({1, 1, 1, 1, 1})
complex.2 = c64[5]{0} complex(real, imag)
ROOT divide.1 = c64[5]{0} divide(constant.1, complex.2)
}
)";
HloModuleConfig config;
auto debug_options = GetDebugOptionsForTest();
debug_options.set_xla_cpu_fast_math_honor_nans(true);
debug_options.set_xla_cpu_fast_math_honor_infs(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text, config));
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{(0.)}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTest,
DivideComplexNumbersWithZeroNormRhs) {
constexpr char hlo_text[] = R"(
HloModule DivideComplexNumbers
ENTRY DivideComplexNumbers {
constant.1 = c64[9]{0} constant({
(1, 1), (1, nan), (1, inf), (inf, inf), (inf, 1),
(inf, nan), (nan, 1), (nan, inf), (nan, nan)})
real = f32[9]{0} constant({0, 0, 0, 0, 0, 0, 0, 0, 0})
imag = f32[9]{0} constant({0, 0, 0, 0, 0, 0, 0, 0, 0})
complex.2 = c64[9]{0} complex(real, imag)
ROOT divide.1 = c64[9]{0} divide(constant.1, complex.2)
}
)";
HloModuleConfig config;
auto debug_options = GetDebugOptionsForTest();
debug_options.set_xla_cpu_fast_math_honor_nans(true);
debug_options.set_xla_cpu_fast_math_honor_infs(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text, config));
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{(0.)}));
}
TYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertFloatsToFloat) {
auto tname = this->TypeName();
if (std::is_same<TypeParam, tsl::float8_e4m3>() ||
std::is_same<TypeParam, tsl::float8_e4m3fn>() ||
std::is_same<TypeParam, tsl::float8_e4m3b11fnuz>() ||
std::is_same<TypeParam, tsl::float8_e3m4>()) {
GTEST_SKIP() << "Skipping test for type " << tname;
}
const auto hlo_text = absl::StrReplaceAll(R"(
HloModule m
ENTRY main {
f16_ = f16[] parameter(0)
f32_ = f32[] parameter(1)
f64_ = f64[] parameter(2)
bf16_ = bf16[] parameter(3)
converted_f16 = ${tname}[] convert(f16_)
converted_f32 = ${tname}[] convert(f32_)
converted_f64 = ${tname}[] convert(f64_)
converted_bf16 = ${tname}[] convert(bf16_)
ROOT tuple = (${tname}[], ${tname}[], ${tname}[], ${tname}[]) tuple(
converted_f16, converted_f32, converted_f64, converted_bf16)
}
)",
{{"${tname}", tname}});
ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text);
}
TYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertSignedToFloat) {
auto tname = this->TypeName();
const auto hlo_text = absl::StrReplaceAll(R"(
HloModule m
ENTRY main {
s8_ = s8[] parameter(0)
s16_ = s16[] parameter(1)
s32_ = s32[] parameter(2)
s64_ = s64[] parameter(3)
converted_s8 = ${tname}[] convert(s8_)
converted_s16 = ${tname}[] convert(s16_)
converted_s32 = ${tname}[] convert(s32_)
converted_s64 = ${tname}[] convert(s64_)
ROOT tuple = (${tname}[], ${tname}[], ${tname}[], ${tname}[]) tuple(
converted_s8, converted_s16, converted_s32, converted_s64)
}
)",
{{"${tname}", tname}});
ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text);
}
TYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertUnsignedToFloat) {
auto tname = this->TypeName();
const auto hlo_text = absl::StrReplaceAll(R"(
HloModule m
ENTRY main {
u8_ = u8[] parameter(0)
u16_ = u16[] parameter(1)
u32_ = u32[] parameter(2)
u64_ = u64[] parameter(3)
converted_u8 = ${tname}[] convert(u8_)
converted_u16 = ${tname}[] convert(u16_)
converted_u32 = ${tname}[] convert(u32_)
converted_u64 = ${tname}[] convert(u64_)
ROOT tuple = (${tname}[], ${tname}[], ${tname}[], ${tname}[]) tuple(
converted_u8, converted_u16, converted_u32, converted_u64)
}
)",
{{"${tname}", tname}});
ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text);
}
TYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertFloatToFloats) {
auto tname = this->TypeName();
const auto hlo_text = absl::StrReplaceAll(R"(
HloModule m
ENTRY main {
to_f16 = ${tname}[] parameter(0)
to_f32 = ${tname}[] parameter(1)
to_f64 = ${tname}[] parameter(2)
to_bf16 = ${tname}[] parameter(3)
f16_ = f16[] convert(to_f16)
f32_ = f32[] convert(to_f32)
f64_ = f64[] convert(to_f64)
bf16_ = bf16[] convert(to_f64)
ROOT tuple = (f16[], f32[], f64[], bf16[]) tuple(f16_, f32_, f64_, bf16_)
}
)",
{{"${tname}", tname}});
ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text);
}
TYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertFloatToSigned) {
auto tname = this->TypeName();
const auto hlo_text = absl::StrReplaceAll(R"(
HloModule m
ENTRY main {
to_s8 = ${tname}[] parameter(0)
to_s16 = ${tname}[] parameter(1)
to_s32 = ${tname}[] parameter(2)
to_s64 = ${tname}[] parameter(3)
s8_ = s8[] convert(to_s8)
s16_ = s16[] convert(to_s16)
s32_ = s32[] convert(to_s32)
s64_ = s64[] convert(to_s64)
ROOT tuple = (s8[], s16[], s32[], s64[]) tuple(s8_, s16_, s32_, s64_)
}
)",
{{"${tname}", tname}});
ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text);
}
TYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertFloatToUnsigned) {
auto tname = this->TypeName();
const auto hlo_text = absl::StrReplaceAll(R"(
HloModule m
ENTRY main {
to_u8 = ${tname}[] parameter(0)
to_u16 = ${tname}[] parameter(1)
to_u32 = ${tname}[] parameter(2)
to_u64 = ${tname}[] parameter(3)
u8_ = u8[] convert(to_u8)
u16_ = u16[] convert(to_u16)
u32_ = u32[] convert(to_u32)
u64_ = u64[] convert(to_u64)
ROOT tuple = (u8[], u16[], u32[], u64[]) tuple(u8_, u16_, u32_, u64_)
}
)",
{{"${tname}", tname}});
ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text);
}
TYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertFloatToComplex) {
auto tname = this->TypeName();
const auto hlo_text = absl::StrReplaceAll(R"(
HloModule m
ENTRY main {
to_c64 = ${tname}[] parameter(0)
to_c128 = ${tname}[] parameter(1)
c64_ = c64[] convert(to_c64)
c128_ = c128[] convert(to_c128)
ROOT tuple = (c64[], c128[]) tuple(c64_, c128_)
}
)",
{{"${tname}", tname}});
ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text);
}
TYPED_TEST(ElementalIrEmitterExecutionTypedTest, CompareFloat) {
auto tname = this->TypeName();
if (std::is_same<TypeParam, tsl::float8_e4m3b11fnuz>()) {
GTEST_SKIP() << "Skipping test for type " << tname;
}
const auto hlo_text = absl::StrReplaceAll(R"(
HloModule m
ENTRY main {
p0 = ${tname}[4] parameter(0)
p1 = ${tname}[4] parameter(1)
ROOT cmp = pred[4] compare(p0, p1), direction=LT
})",
{{"${tname}", tname}});
Literal lhs = LiteralUtil::CreateR1<TypeParam>(
{TypeParam(1.), TypeParam(2.), TypeParam(3.), TypeParam(4.)});
Literal rhs = LiteralUtil::CreateR1<TypeParam>(
{TypeParam(4.), TypeParam(4.), TypeParam(2.), TypeParam(1.)});
ElementalIrEmitterExecutionTest::RunTest(hlo_text, {&lhs, &rhs});
}
TYPED_TEST(ElementalIrEmitterExecutionTypedTest, IotaFloat) {
auto tname = this->TypeName();
if (std::is_same<TypeParam, tsl::float8_e5m2>() ||
std::is_same<TypeParam, tsl::float8_e4m3>() ||
std::is_same<TypeParam, tsl::float8_e4m3fn>() ||
std::is_same<TypeParam, tsl::float8_e4m3b11fnuz>() ||
std::is_same<TypeParam, tsl::float8_e3m4>()) {
GTEST_SKIP() << "Skipping test for type " << tname;
}
const auto hlo_text = absl::StrReplaceAll(R"(
HloModule m
ENTRY main {
ROOT iota_ = ${tname}[4] iota(), iota_dimension=0
}
)",
{{"${tname}", tname}});
ElementalIrEmitterExecutionTest::RunTest(hlo_text, {});
}
TYPED_TEST(ElementalIrEmitterExecutionTypedTest, BatchDotFloat) {
auto tname = this->TypeName();
const auto hlo_text = absl::StrReplaceAll(R"(
HloModule matmul
ENTRY main {
x = ${tname}[8,16] parameter(0)
y = ${tname}[8,16,32] parameter(1)
ROOT dot = ${tname}[8,32] dot(x, y), lhs_batch_dims={0},
rhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
)",
{{"${tname}", tname}});
HloModuleConfig config;
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
HloTestBase::ParseAndReturnVerifiedModule(hlo_text, config));
EXPECT_TRUE(
HloTestBase::RunAndCompare(std::move(module), ErrorSpec{1e-5, 1e-5}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
MinimumHandlesNaNsOnTheLeft) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
neg1 = f32[] constant(-1)
neg1s = f32[5,5] broadcast(neg1), dimensions={}
nans = f32[5,5] sqrt(neg1s)
ROOT min = f32[5,5] minimum(nans, neg1s)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
DISABLED_MinimumHandlesNaNsOnTheRight) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
neg1 = f32[] constant(-1)
neg1s = f32[5,5] broadcast(neg1), dimensions={}
nans = f32[5,5] sqrt(neg1s)
ROOT min = f32[5,5] minimum(neg1s, nans)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
MaximumHandlesNaNsOnTheLeft) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
neg1 = f32[] constant(-1)
neg1s = f32[5,5] broadcast(neg1), dimensions={}
nans = f32[5,5] sqrt(neg1s)
ROOT max = f32[5,5] maximum(nans, neg1s)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
MaximumHandlesNaNsOnTheRight) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
neg1 = f32[] constant(-1)
neg1s = f32[5,5] broadcast(neg1), dimensions={}
nans = f32[5,5] sqrt(neg1s)
ROOT max = f32[5,5] maximum(neg1s, nans)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
MinimumReturnsLHS) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
zero = f32[] constant(0)
zeros = f32[5,5] broadcast(zero), dimensions={}
one = f32[] constant(1)
ones = f32[5,5] broadcast(one), dimensions={}
ROOT min = f32[5,5] minimum(zeros, ones)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3,
1e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
MinimumReturnsRHS) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
zero = f32[] constant(0)
zeros = f32[5,5] broadcast(zero), dimensions={}
one = f32[] constant(1)
ones = f32[5,5] broadcast(one), dimensions={}
ROOT min = f32[5,5] minimum(ones, zeros)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3,
1e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
MaximumReturnsLHS) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
zero = f32[] constant(0)
zeros = f32[5,5] broadcast(zero), dimensions={}
one = f32[] constant(1)
ones = f32[5,5] broadcast(one), dimensions={}
ROOT max = f32[5,5] maximum(ones, zeros)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3,
1e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
MaximumReturnsRHS) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
zero = f32[] constant(0)
zeros = f32[5,5] broadcast(zero), dimensions={}
one = f32[] constant(1)
ones = f32[5,5] broadcast(one), dimensions={}
ROOT max = f32[5,5] maximum(zeros, ones)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3,
1e-3}));
}
class ElementalIrEmitterInternalTest : public HloTestBase {};
XLA_TEST_F(ElementalIrEmitterInternalTest, SparseDotIsUnsupported) {
constexpr absl::string_view kHloText = R"(
HloModule test
ENTRY main {
lhs = f16[5,16] parameter(0)
rhs = f16[32,10] parameter(1)
meta = u16[5,2] parameter(2)
ROOT dot = f32[5,10] dot(lhs, rhs, meta),
lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloText));
HloInstruction* root = module->entry_computation()->root_instruction();
llvm::LLVMContext llvm_context;
llvm::Module llvm_module("", llvm_context);
llvm::IRBuilder<> builder(llvm_context);
ElementalIrEmitterForTests emitter(&llvm_module, &builder);
llvm_ir::IrArray::Index test_index{builder.getInt64Ty()};
auto result = emitter.TestElementalDot(root, test_index);
EXPECT_FALSE(result.ok());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/elemental_ir_emitter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/elemental_ir_emitter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
570c48f7-236d-4c3e-8e7d-44cba5bf1ea5 | cpp | tensorflow/tensorflow | buffer_assignment | third_party/xla/xla/service/buffer_assignment.cc | third_party/xla/xla/service/buffer_assignment_test.cc | #include "xla/service/buffer_assignment.h"
#include <algorithm>
#include <cstdint>
#include <deque>
#include <iterator>
#include <memory>
#include <optional>
#include <ostream>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/btree_map.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_op_metadata.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/map_util.h"
#include "xla/service/buffer_value.h"
#include "xla/service/buffer_value_containers.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_value.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/numbers.h"
namespace xla {
namespace {
using absl::flat_hash_map;
using absl::flat_hash_set;
using absl::StrAppend;
using absl::StrAppendFormat;
using memory_space_assignment::PresetAssignments;
using ::tsl::strings::HumanReadableNumBytes;
absl::flat_hash_map<int64_t, const HloInstruction*> BuildIdToHloInstructionMap(
const HloModule* module) {
absl::flat_hash_map<int64_t, const HloInstruction*> id_to_hlo_instruction;
for (const HloComputation* computation : module->computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
id_to_hlo_instruction[instruction->unique_id()] = instruction;
}
}
return id_to_hlo_instruction;
}
absl::StatusOr<absl::flat_hash_map<int64_t, const HloValue*>>
BuildIdToLogicalBufferMap(
const BufferAssignmentProto& proto,
const absl::flat_hash_map<int64_t, const HloInstruction*>&
id_to_hlo_instruction,
const std::unique_ptr<HloAliasAnalysis>& alias_analysis) {
absl::flat_hash_map<int64_t, const HloValue*> id_to_logical_buffer;
for (const LogicalBufferProto& logical_buffer_proto :
proto.logical_buffers()) {
TF_RET_CHECK(logical_buffer_proto.has_defined_at())
<< "Expected logical buffer to have location information in the proto.";
TF_RET_CHECK(id_to_hlo_instruction.contains(
logical_buffer_proto.defined_at().instruction_id()))
<< "Expected hlo instruction "
<< "with the id '" << logical_buffer_proto.defined_at().instruction_id()
<< "' in the proto to also exist in the "
"HLO module.";
const HloInstruction* hlo_instruction = id_to_hlo_instruction.at(
logical_buffer_proto.defined_at().instruction_id());
std::vector<int64_t> shape_idx_vals;
absl::c_copy(logical_buffer_proto.defined_at().shape_index(),
std::back_inserter(shape_idx_vals));
ShapeIndex proto_shape_index(shape_idx_vals);
auto& logical_buffer = alias_analysis->dataflow_analysis().GetUniqueValueAt(
hlo_instruction, proto_shape_index);
logical_buffer.set_color(logical_buffer_proto.color());
id_to_logical_buffer[logical_buffer_proto.id()] = &logical_buffer;
}
return id_to_logical_buffer;
}
}
absl::Status GatherComputationsByAllocationType(
const HloModule* module,
std::vector<const HloComputation*>* thread_local_computations,
std::vector<const HloComputation*>* global_computations) {
std::deque<std::pair<const HloComputation*, bool>> worklist;
worklist.push_back(std::make_pair(module->entry_computation(),
false));
flat_hash_set<const HloComputation*> thread_local_set;
flat_hash_set<const HloComputation*> global_set;
while (!worklist.empty()) {
auto worklist_front = worklist.front();
worklist.pop_front();
const HloComputation* computation = worklist_front.first;
bool is_thread_local = worklist_front.second;
bool in_thread_local_set = thread_local_set.contains(computation);
bool in_global_set = global_set.contains(computation);
if ((is_thread_local && in_thread_local_set) ||
(!is_thread_local && in_global_set)) {
continue;
}
if ((is_thread_local && in_global_set) ||
(!is_thread_local && in_thread_local_set)) {
return InvalidArgument(
"computation %s has conflicting allocation requirements (global "
"and thread-local)",
computation->name());
}
if (is_thread_local) {
thread_local_set.insert(computation);
} else {
global_set.insert(computation);
}
for (auto* instruction : computation->instructions()) {
for (HloComputation* subcomputation :
instruction->called_computations()) {
switch (instruction->opcode()) {
case HloOpcode::kCall:
case HloOpcode::kConditional:
case HloOpcode::kWhile:
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncDone:
if (is_thread_local) {
return InvalidArgument(
"computation %s cannot contain call/while op because it "
"requires thread-local buffer allocations",
computation->name());
}
worklist.push_back(std::make_pair(subcomputation,
false));
break;
case HloOpcode::kCustomCall:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kAllReduceStart:
case HloOpcode::kMap:
case HloOpcode::kReduce:
case HloOpcode::kReduceWindow:
case HloOpcode::kScatter:
case HloOpcode::kSelectAndScatter:
case HloOpcode::kSort:
case HloOpcode::kFusion:
worklist.push_back(std::make_pair(subcomputation,
true));
break;
default:
return Internal("Unexpected calling opcode: %s",
HloOpcodeString(instruction->opcode()));
}
}
}
}
for (auto* computation : module->MakeComputationPostOrder()) {
if (thread_local_set.contains(computation)) {
thread_local_computations->push_back(computation);
} else if (global_set.contains(computation)) {
global_computations->push_back(computation);
}
}
return absl::OkStatus();
}
std::string BufferAllocation::Slice::ToString() const {
return absl::StrCat("{index:", allocation_ == nullptr ? -1 : index(),
", offset:", offset_, ", size:", size_, "}");
}
BufferAllocation::Slice BufferAllocation::GetSlice(
const HloValue& buffer) const {
const OffsetSize os = FindOrDie(assigned_buffers_, &buffer);
return Slice(this, os.offset, os.size);
}
void BufferAllocation::AddAssignment(const HloValue& buffer, int64_t offset,
int64_t size) {
VLOG(4) << "Adding the following buffer to allocation #" << index()
<< absl::StrFormat(" (size=%d, offset=%d) %s", size, offset,
buffer.ToShortString());
CHECK(!assigned_buffers_.contains(&buffer))
<< "LogicalBuffer " << buffer << " already assigned to allocation "
<< index_;
CHECK_LE(offset, size_) << "LogicalBuffer " << buffer
<< " offset out of range";
CHECK_LE(offset + size, size_)
<< "LogicalBuffer " << buffer
<< " size out of range at offset: " << offset << " with size: " << size;
if (!(IsPreallocatedTempBuffer() && color() != 0)) {
CHECK_EQ(buffer.color(), color())
<< "Buffer color " << buffer.color() << " for buffer " << buffer
<< " does not match allocation color " << color() << ".";
}
OffsetSize offset_size;
offset_size.offset = offset;
offset_size.size = size;
assigned_buffers_.emplace(&buffer, offset_size);
for (HloPosition position : buffer.positions()) {
Shape* shape = ShapeUtil::GetMutableSubshape(
position.instruction->mutable_shape(), position.index);
if (shape->has_layout()) {
shape->mutable_layout()->set_memory_space(buffer.color());
}
}
}
BufferAllocationProto BufferAllocation::ToProto() const {
BufferAllocationProto proto;
proto.set_index(index_);
proto.set_size(size_);
proto.set_is_thread_local(is_thread_local_);
proto.set_is_tuple(is_tuple_);
proto.set_color(color_);
if (is_entry_computation_parameter_) {
proto.set_is_entry_computation_parameter(true);
for (int64_t idx : param_shape_index()) {
proto.add_parameter_shape_index(idx);
}
proto.set_parameter_number(parameter_number_);
}
proto.set_is_constant(is_constant_);
proto.set_maybe_live_out(maybe_live_out_);
for (const auto& buffer_offset_size : assigned_buffers_) {
BufferAllocationProto::Assigned* proto_assigned = proto.add_assigned();
proto_assigned->set_logical_buffer_id(buffer_offset_size.first->id());
proto_assigned->set_offset(buffer_offset_size.second.offset);
proto_assigned->set_size(buffer_offset_size.second.size);
}
absl::c_sort(*proto.mutable_assigned(),
[](const BufferAllocationProto::Assigned& assign1,
const BufferAllocationProto::Assigned& assign2) {
return assign1.logical_buffer_id() <
assign2.logical_buffer_id();
});
return proto;
}
static bool CompareHloValuesById(const HloValue* a, const HloValue* b) {
return a->id() < b->id();
}
static const HloInstruction* GetEntryParameterInstruction(
const BufferAllocation& alloc) {
for (const auto& p : alloc.assigned_buffers()) {
const HloValue* value = p.first;
const HloInstruction* instr = value->instruction();
if (instr->opcode() == HloOpcode::kParameter &&
instr->parent() == instr->GetModule()->entry_computation()) {
return instr;
}
}
return nullptr;
}
static const HloInstruction* GetOutputInstruction(
const BufferAllocation& alloc) {
for (const auto& p : alloc.assigned_buffers()) {
const HloValue* value = p.first;
for (const HloPosition& position : value->positions()) {
const HloInstruction* instr = position.instruction;
if (position.index.empty() &&
instr->parent()->root_instruction() == instr &&
instr->parent()->IsEntryComputation()) {
return instr;
}
}
}
return nullptr;
}
std::string BufferAllocation::ToShortString() const {
std::string output;
StrAppendFormat(&output, "allocation %d: size %d", index_, size());
if (color() != 0) {
StrAppend(&output, ", color ", color());
}
if (is_entry_computation_parameter()) {
const HloInstruction* param = GetEntryParameterInstruction(*this);
StrAppend(&output, ", parameter ", parameter_number(), ", shape |",
param ? param->shape().ToString(false)
: "<unknown shape>",
"| at ShapeIndex ", param_shape_index().ToString());
}
if (const HloInstruction* instr = GetOutputInstruction(*this)) {
StrAppend(&output, ", output shape is |",
instr->shape().ToString(false), "|");
}
if (is_constant()) {
StrAppend(&output, ", constant");
}
if (is_thread_local()) {
StrAppend(&output, ", thread-local");
}
if (maybe_live_out()) {
StrAppend(&output, ", maybe-live-out");
}
if (IsPreallocatedTempBuffer()) {
StrAppend(&output, ", preallocated-temp");
}
StrAppend(&output, ":\n");
return output;
}
std::string BufferAllocation::ToString() const {
std::string output = ToShortString();
std::vector<const HloValue*> sorted_buffers;
for (const auto& buffer_offset_size : assigned_buffers_) {
sorted_buffers.push_back(buffer_offset_size.first);
}
absl::c_sort(sorted_buffers, &CompareHloValuesById);
for (const HloValue* buffer : sorted_buffers) {
const OffsetSize& offset_size = FindOrDie(assigned_buffers_, buffer);
StrAppend(&output,
absl::StrFormat(
" value: %s (size=%d,offset=%d): %s\n",
buffer->ToShortString(), offset_size.size, offset_size.offset,
ShapeUtil::HumanStringWithLayout(buffer->shape())));
}
return output;
}
std::ostream& operator<<(std::ostream& out, const BufferAllocation& buffer) {
out << buffer.ToString();
return out;
}
std::ostream& operator<<(std::ostream& out, const BufferAllocation::Slice& s) {
out << s.ToString();
return out;
}
bool BufferAssignment::HasAllocation(const HloValue& value) const {
return allocation_index_for_value_.contains(&value);
}
bool BufferAssignment::HasAllocation(HloValue::Id value_id) const {
return HasAllocation(dataflow_analysis().GetValue(value_id));
}
bool BufferAssignment::HasAllocation(const HloBuffer& buffer) const {
return allocation_index_for_value_.contains(buffer.values()[0]);
}
const BufferAllocation& BufferAssignment::GetAssignedAllocation(
const HloValue& value) const {
CHECK(HasAllocation(value));
return GetAllocation(allocation_index_for_value_.at(&value));
}
const BufferAllocation& BufferAssignment::GetAssignedAllocation(
const HloBuffer& hlo_buffer) const {
return GetAssignedAllocation(*hlo_buffer.values()[0]);
}
BufferAllocation* BufferAssignment::GetMutableAssignedAllocation(
const HloBuffer& buffer) {
return const_cast<BufferAllocation*>(&GetAssignedAllocation(buffer));
}
std::set<BufferAllocation::Slice> BufferAssignment::GetAllSlices(
const HloInstruction* instruction, const ShapeIndex& index) const {
std::set<BufferAllocation::Slice> result;
for (const HloValue* value :
dataflow_analysis().GetValueSet(instruction, index).values()) {
if (HasAllocation(*value)) {
result.insert(GetAssignedAllocation(*value).GetSlice(*value));
}
}
return result;
}
const BufferAllocation& BufferAssignment::GetAllocation(
BufferAllocation::Index index) const {
CHECK_GE(index, 0);
CHECK_LT(index, allocations_.size());
return allocations_[index];
}
const BufferAllocation* BufferAssignment::GetInstructionAllocation(
const HloInstruction* hlo, const ShapeIndex& shape_index) const {
const HloValue* value =
dataflow_analysis().GetValueSet(hlo, shape_index).values()[0];
if (!HasAllocation(*value)) {
return nullptr;
}
const BufferAllocation& instruction_allocation =
GetAssignedAllocation(*value);
return &instruction_allocation;
}
BufferAllocation* BufferAssignment::GetMutableAllocation(
BufferAllocation::Index index) {
return const_cast<BufferAllocation*>(&GetAllocation(index));
}
bool BufferAssignment::HasAllocationAt(const HloInstruction* instruction,
const ShapeIndex& index) const {
return absl::c_any_of(
dataflow_analysis().GetValueSet(instruction, index).values(),
IsKeyIn(allocation_index_for_value_));
}
bool BufferAssignment::HasTopLevelAllocation(
const HloInstruction* instruction) const {
return HasAllocationAt(instruction, {});
}
absl::StatusOr<BufferAllocation::Slice> BufferAssignment::GetUniqueSlice(
const HloInstruction* instruction, const ShapeIndex& index) const {
VLOG(3) << "Trying to find unique slice for " << instruction->name() << " ["
<< index << "]";
BufferAllocation::Slice result;
for (const HloValue* value :
dataflow_analysis().GetValueSet(instruction, index).values()) {
VLOG(3) << "Examining value " << *value;
if (HasAllocation(*value)) {
VLOG(3) << "Has allocation";
const BufferAllocation::Slice slice =
GetAssignedAllocation(*value).GetSlice(*value);
if (result.allocation() == nullptr) {
result = slice;
} else if (result != slice) {
return FailedPrecondition(
"BufferAllocation::Slice for instruction %s at index %s cannot "
"be determined at compile-time.",
instruction->name(), index.ToString());
}
} else {
VLOG(3) << "No allocation";
}
}
if (result.allocation() == nullptr) {
return FailedPrecondition(
"BufferAllocation::Slice not assigned for instruction %s at index %s",
instruction->name(), index.ToString());
}
return result;
}
absl::StatusOr<BufferAllocation::Slice>
BufferAssignment::GetUniqueTopLevelSlice(
const HloInstruction* instruction) const {
return GetUniqueSlice(instruction, {});
}
bool BufferAssignment::SharesSliceAtIndex(
const HloInstruction* hlo_a, const ShapeIndex& shape_index_a,
const HloInstruction* hlo_b, const ShapeIndex& shape_index_b) const {
return GetUniqueSlice(hlo_a, shape_index_a).value() ==
GetUniqueSlice(hlo_b, shape_index_b).value();
}
bool BufferAssignment::HaveDisjointSlices(const HloInstruction* hlo_a,
const HloInstruction* hlo_b) const {
using SliceSet = flat_hash_set<BufferAllocation::Slice>;
auto collect_slices = [&](const HloInstruction* instr) -> SliceSet {
SliceSet slices;
absl::Status status = ShapeUtil::ForEachSubshapeWithStatus(
instr->shape(),
[&](const Shape& ,
const ShapeIndex& index) -> absl::Status {
auto shape_slices = GetAllSlices(instr, index);
if (shape_slices.empty()) {
return InvalidArgument("No slices assigned to part of instr.");
}
slices.insert(shape_slices.begin(), shape_slices.end());
return absl::OkStatus();
});
if (!status.ok()) {
return {};
}
return slices;
};
SliceSet slices_a = collect_slices(hlo_a);
SliceSet slices_b = collect_slices(hlo_b);
return !slices_a.empty() && !slices_b.empty() &&
absl::c_none_of(slices_a, [&](const BufferAllocation::Slice& slice) {
return slices_b.contains(slice);
});
}
absl::StatusOr<BufferAllocation::Slice>
BufferAssignment::GetUniqueTopLevelOutputSlice() const {
return GetUniqueTopLevelSlice(
module_->entry_computation()->root_instruction());
}
BufferAllocation* BufferAssignment::NewEmptyAllocation(
int64_t size, LogicalBuffer::Color color) {
BufferAllocation::Index index = allocations_.size();
allocations_.emplace_back(index, size, color);
BufferAllocation* allocation = &allocations_.back();
return allocation;
}
BufferAllocation* BufferAssignment::NewAllocation(const HloBuffer& buffer,
int64_t size) {
BufferAllocation* allocation = NewEmptyAllocation(size, buffer.color());
AddAssignment(allocation, buffer, 0, size);
allocation->peak_buffers_.push_back(buffer.values()[0]);
return allocation;
}
void BufferAssignment::AddAssignment(BufferAllocation* allocation,
const HloBuffer& buffer, int64_t offset,
int64_t size) {
CHECK(allocation->is_reusable() || allocation->assigned_buffers().empty())
<< "Non-reusable allocation already assigned a buffer: "
<< allocation->ToString();
for (const HloValue* buffer_value : buffer.values()) {
CHECK(!allocation_index_for_value_.contains(buffer_value))
<< "BufferValue " << buffer_value << " already has an allocation.";
allocation->AddAssignment(*buffer_value, offset, size);
allocation_index_for_value_[buffer_value] = allocation->index();
}
if (alias_analysis().BufferLivesOut(buffer)) {
VLOG(3) << "HloBuffer lives out: " << buffer.ToString();
VLOG(3) << "Set maybe live out: " << allocation->ToString();
allocation->set_maybe_live_out(true);
}
}
void BufferAssignment::AddAssignment(BufferAllocation* allocation,
const HloValue& value, int64_t offset,
int64_t size) {
allocation->AddAssignment(value, offset, size);
allocation_index_for_value_[&value] = allocation->index();
const HloValue& hlo_value =
*CHECK_NOTNULL(dynamic_cast<const HloValue*>(&value));
if (alias_analysis().ValueLivesOut(hlo_value)) {
VLOG(3) << "HloValue lives out: " << hlo_value.ToString();
VLOG(3) << "Set maybe live out: " << allocation->ToString();
allocation->set_maybe_live_out(true);
}
}
void BufferAssignment::CombineTempAllocations(
const absl::flat_hash_set<BufferValue::Color>& private_stack_colors,
std::optional<BufferValue::Color> temp_buffer_color) {
VLOG(1) << "CombineTempAllocations()";
std::deque<BufferAllocation> combined_allocations;
flat_hash_map<BufferValue::Color, BufferAllocation*> combined_allocation_map;
const auto first_temp_it =
std::partition(allocations_.begin(), allocations_.end(),
[](const BufferAllocation& allocation) {
return !allocation.IsPreallocatedTempBuffer();
});
if (first_temp_it != allocations_.end()) {
for (auto it = first_temp_it; it != allocations_.end(); ++it) {
BufferAllocation& temp_allocation = *it;
BufferValue::Color color = temp_allocation.color();
auto combined_it = combined_allocation_map.find(color);
if (combined_it == combined_allocation_map.end()) {
VLOG(1) << "Combined temp allocation for color " << color
<< " is: " << temp_allocation;
combined_allocations.emplace_back(temp_allocation);
combined_allocation_map.emplace(color, &combined_allocations.back());
continue;
}
if (combined_it->second->size() + it->size() >=
multiheap_size_constraint_per_heap_) {
VLOG(1) << "Due to size constraint, reset temp allocation for color "
<< color << " to: " << temp_allocation;
combined_allocations.emplace_back(temp_allocation);
combined_allocation_map.emplace(color, &combined_allocations.back());
continue;
}
BufferAllocation* combined_allocation = combined_it->second;
VLOG(1) << "Combined allocation absorbing temp allocation: "
<< temp_allocation;
int64_t alignment = color_alignment_(color);
int64_t base;
bool is_private_stack = private_stack_colors.contains(color);
if (is_private_stack) {
base = 0;
combined_allocation->set_size(std::max(base, temp_allocation.size()));
} else {
base = RoundUpTo(combined_allocation->size(), alignment);
combined_allocation->set_size(base + temp_allocation.size());
}
for (const auto& buffer_offset_size : temp_allocation.assigned_buffers_) {
const HloValue* value = buffer_offset_size.first;
const int64_t offset = buffer_offset_size.second.offset;
const int64_t size = buffer_offset_size.second.size;
combined_allocation->AddAssignment(*value, base + offset, size);
}
if (!temp_allocation.HeapTraces().empty()) {
CHECK_EQ(temp_allocation.HeapTraces().size(), 1);
combined_allocation->AddHeapTrace(temp_allocation.HeapTraces().front());
}
if (is_private_stack) {
if (temp_allocation.size() == combined_allocation->size()) {
combined_allocation->peak_buffers_ = temp_allocation.peak_buffers_;
}
} else {
combined_allocation->peak_buffers_.insert(
combined_allocation->peak_buffers_.end(),
temp_allocation.peak_buffers_.begin(),
temp_allocation.peak_buffers_.end());
}
if (temp_buffer_color.has_value()) {
if (combined_allocation->color() == 0) {
combined_allocation->set_color(temp_buffer_color.value());
}
}
}
allocations_.erase(first_temp_it, allocations_.end());
for (BufferAllocation& combined : combined_allocations) {
temp_allocation_total_size_ += combined.size();
allocations_.push_back(std::move(combined));
}
}
allocation_index_for_value_.erase(allocation_index_for_value_.begin(),
allocation_index_for_value_.end());
for (size_t index = 0; index < allocations_.size(); ++index) {
BufferAllocation* allocation = &allocations_[index];
allocation->set_index(index);
std::vector<const HloValue*> sorted_values;
sorted_values.reserve(allocation->assigned_buffers_.size());
for (const auto& buffer_offset_size : allocation->assigned_buffers_) {
const HloValue* value = buffer_offset_size.first;
sorted_values.emplace(sorted_values.end(), value);
}
absl::c_sort(sorted_values, &CompareHloValuesById);
for (const HloValue* value : sorted_values) {
allocation_index_for_value_[value] = index;
}
}
}
absl::Status BufferAssignment::ComputeSummaryStats() {
for (auto& allocation : Allocations()) {
if (allocation.is_entry_computation_parameter()) {
stats_.parameter_allocation_count++;
stats_.parameter_allocation_bytes += allocation.size();
}
if (allocation.is_constant()) {
stats_.constant_allocation_count++;
stats_.constant_allocation_bytes += allocation.size();
}
if (allocation.maybe_live_out()) {
stats_.maybe_live_out_allocation_count++;
stats_.maybe_live_out_allocation_bytes += allocation.size();
}
if (allocation.IsPreallocatedTempBuffer()) {
stats_.preallocated_temp_allocation_count++;
stats_.preallocated_temp_allocation_bytes += allocation.size();
}
stats_.total_allocation_count++;
stats_.total_allocation_bytes += allocation.size();
}
HloSchedule schedule(module_);
bool schedule_complete = true;
for (const auto& computation : module_->computations()) {
if (!computation->IsFusionComputation()) {
const HloInstructionSequence* sequence =
hlo_ordering().SequentialOrder(*computation);
if (sequence == nullptr) {
schedule_complete = false;
} else {
schedule.set_sequence(computation, *sequence);
}
}
}
if (schedule_complete) {
TF_RETURN_IF_ERROR(schedule.Verify());
TF_ASSIGN_OR_RETURN(
const int64_t min_size,
HeapSimulator::MinimumMemoryForModule(schedule, buffer_size_));
stats_.total_fragmentation_bytes = stats_.total_allocation_bytes - min_size;
}
return absl::OkStatus();
}
std::string BufferAssignment::Stats::ToString() const {
std::string s;
StrAppendFormat(&s, "BufferAssignment stats:\n");
StrAppendFormat(&s, " parameter allocation: %10s\n",
HumanReadableNumBytes(parameter_allocation_bytes));
StrAppendFormat(&s, " constant allocation: %10s\n",
HumanReadableNumBytes(constant_allocation_bytes));
StrAppendFormat(&s, " maybe_live_out allocation: %10s\n",
HumanReadableNumBytes(maybe_live_out_allocation_bytes));
StrAppendFormat(&s, " preallocated temp allocation: %10s\n",
HumanReadableNumBytes(preallocated_temp_allocation_bytes));
if (preallocated_temp_fragmentation_bytes >= 0) {
const double percent = 100. * preallocated_temp_fragmentation_bytes /
preallocated_temp_allocation_bytes;
StrAppendFormat(
&s, " preallocated temp fragmentation: %10s (%.2f%%)\n",
HumanReadableNumBytes(preallocated_temp_fragmentation_bytes), percent);
}
StrAppendFormat(&s, " total allocation: %10s\n",
HumanReadableNumBytes(total_allocation_bytes));
if (total_fragmentation_bytes >= 0) {
const double percent =
100. * total_fragmentation_bytes / total_allocation_bytes;
StrAppendFormat(&s, " total fragmentation: %10s (%.2f%%)\n",
HumanReadableNumBytes(total_fragmentation_bytes), percent);
}
return s;
}
std::string BufferAssignment::ToString() const {
std::string output;
absl::StrAppend(&output, "BufferAssignment:\n");
std::vector<const HloValue*> used_values;
int64_t total_size = 0;
for (auto& allocation : allocations_) {
total_size += allocation.size();
absl::StrAppend(&output, allocation.ToString());
for (const auto& p : allocation.assigned_buffers()) {
used_values.push_back(p.first);
}
}
absl::StrAppend(&output, "\nTotal bytes used: ", total_size, " (",
HumanReadableNumBytes(total_size), ")\n");
absl::StrAppend(&output, "\nUsed values:\n");
absl::c_sort(used_values, &CompareHloValuesById);
for (const HloValue* value : used_values) {
absl::StrAppend(&output, value->ToString());
}
return output;
}
std::vector<std::pair<int64_t, const HloValue*>> TopKPeakBuffers(
uint64_t k, const std::vector<BufferAllocation> allocations) {
absl::btree_multimap<int64_t, const HloValue*> topk;
for (const BufferAllocation& allocation : allocations) {
for (const HloValue* value : allocation.PeakMemoryLogicalBuffers()) {
int64_t size = allocation.assigned_buffers().at(value).size;
if (topk.size() < k) {
topk.insert({size, value});
} else {
auto it = topk.begin();
if (size > it->first) {
topk.erase(it);
topk.insert({size, value});
}
}
}
}
std::vector<std::pair<int64_t, const HloValue*>> topk_descending;
topk_descending.reserve(topk.size());
absl::c_reverse_copy(topk, std::back_inserter(topk_descending));
return topk_descending;
}
std::string BufferAssignment::ToVerboseString(
size_t max_buffers_to_show) const {
std::string output =
absl::StrCat("BufferAssignment OOM Debugging.\n", stats_.ToString());
std::vector<std::pair<int64_t, const HloValue*>> peak_buffers =
TopKPeakBuffers(max_buffers_to_show, allocations_);
std::vector<std::string> buf_strs;
for (size_t i = 0; i < std::min(max_buffers_to_show, peak_buffers.size());
++i) {
const HloValue* value = peak_buffers[i].second;
const HloInstruction* instr = value->instruction();
int64_t size = peak_buffers[i].first;
buf_strs.push_back(absl::StrCat("\n\tBuffer ", i + 1, ":\n\t\tSize: ",
xla::HumanReadableNumBytes(size)));
if (!instr->metadata().op_name().empty()) {
buf_strs.push_back(absl::StrCat(
"\n\t\tOperator: ", xla::OpMetadataToString(instr->metadata())));
}
if (instr->opcode() == HloOpcode::kParameter &&
(instr->parent() == instr->GetModule()->entry_computation())) {
buf_strs.push_back(absl::StrCat(
"\n\t\tEntry Parameter Subshape: ",
ShapeUtil::GetSubshape(instr->shape(), value->index()).ToString()));
} else {
buf_strs.push_back(
absl::StrCat("\n\t\tXLA Label: ", HloOpcodeString(instr->opcode()),
"\n\t\tShape: ", value->shape().ToString()));
}
buf_strs.push_back("\n\t\t==========================\n");
}
absl::StrAppend(&output, "Peak buffers:", absl::StrJoin(buf_strs, ""));
return output;
}
std::string BufferAssignment::BufferInfoString() const {
std::string binfo;
absl::StrAppend(&binfo,
"buffer_id,buffer_name,offset,size,"
"definition_time,end_time,num_uses,use_times,use_names\n");
const HloLiveRange& live_ranges = hlo_live_range();
const auto& instruction_schedule = live_ranges.instruction_schedule();
const auto& buffer_live_ranges = live_ranges.buffer_live_ranges();
std::vector<std::pair<const HloValue*, BufferAllocation::OffsetSize>> buffers;
for (const BufferAllocation& allocation : allocations_) {
absl::c_copy(allocation.assigned_buffers(), std::back_inserter(buffers));
}
absl::c_sort(
buffers,
[](const std::pair<const HloValue*, BufferAllocation::OffsetSize>& b1,
const std::pair<const HloValue*, BufferAllocation::OffsetSize>& b2) {
return b1.first->id() < b2.first->id();
});
for (const auto& buffer_pair : buffers) {
const HloValue& buffer = *buffer_pair.first;
const BufferAllocation::OffsetSize& offset_size = buffer_pair.second;
if (!buffer_live_ranges.contains(&buffer)) {
continue;
}
std::vector<std::pair<int64_t, std::string>> uses;
uses.reserve(buffer.GetUses().size());
for (const HloUse& use : buffer.GetUses()) {
uses.emplace_back(instruction_schedule.at(use.instruction),
use.ToString());
}
absl::c_sort(uses);
std::vector<int64_t> use_positions;
std::vector<std::string> use_names;
use_positions.reserve(uses.size());
use_names.reserve(uses.size());
for (const auto& use : uses) {
use_positions.push_back(use.first);
use_names.push_back(use.second);
}
const int64_t definition_time =
instruction_schedule.at(buffer.defining_position().instruction);
const int64_t end_t = buffer_live_ranges.at(&buffer).end;
absl::StrAppend(&binfo, buffer.id(), ",");
absl::StrAppend(&binfo, "\"", buffer.ToShortString(), "\",");
absl::StrAppend(&binfo, offset_size.offset, ",");
absl::StrAppend(&binfo, offset_size.size, ",");
absl::StrAppend(&binfo, definition_time, ",");
absl::StrAppend(&binfo, end_t, ",");
absl::StrAppend(&binfo, use_positions.size(), ",");
absl::StrAppend(&binfo, "\"", absl::StrJoin(use_positions, ";"), "\",");
absl::StrAppend(&binfo, "\"", absl::StrJoin(use_names, ";"), "\"");
absl::StrAppend(&binfo, "\n");
}
return binfo;
}
BufferAssignmentProto BufferAssignment::ToProto() const {
BufferAssignmentProto proto;
const HloDataflowAnalysis& dataflow = this->dataflow_analysis();
for (BufferValue::Id id = 0; id < dataflow.values().size(); id++) {
auto& value = dataflow.values().at(id);
if (HasAllocation(*value)) {
LogicalBufferProto proto_buffer = value->ToProto(buffer_size_);
proto.add_logical_buffers()->Swap(&proto_buffer);
for (const HloValue* alias :
alias_analysis().GetBufferContainingValue(*value).values()) {
if (alias->instruction() == value->instruction() &&
alias->index() == value->index()) {
continue;
}
BufferAssignmentProto::BufferAlias* proto_alias =
proto.add_buffer_aliases();
LogicalBufferProto::Location proto_alias_location =
BufferValue::ToLocationProto(*alias->instruction(), alias->index());
proto_alias->set_source_buffer_id(value->id());
proto_alias->mutable_location()->Swap(&proto_alias_location);
}
}
}
for (const BufferAllocation& allocation : Allocations()) {
BufferAllocationProto proto_allocation = allocation.ToProto();
proto.add_buffer_allocations()->Swap(&proto_allocation);
for (const HeapSimulatorTrace& heap_trace : allocation.HeapTraces()) {
*proto.add_heap_simulator_traces() = heap_trace;
}
}
return proto;
}
absl::StatusOr<std::unique_ptr<BufferAssignment>> BufferAssignment::FromProto(
const BufferAssignmentProto& proto, const HloModule* module,
BufferValue::SizeFunction buffer_size,
HloDataflowAnalysis::CanShareBuffer can_share_buffer) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module, can_share_buffer));
auto id_to_hlo_instruction = BuildIdToHloInstructionMap(module);
absl::flat_hash_map<int64_t, const HloValue*> id_to_logical_buffer;
TF_ASSIGN_OR_RETURN(
id_to_logical_buffer,
BuildIdToLogicalBufferMap(proto, id_to_hlo_instruction, alias_analysis));
std::unique_ptr<BufferAssignment> buffer_assignment =
absl::WrapUnique(new BufferAssignment(
module, nullptr, std::move(buffer_size),
nullptr, std::move(alias_analysis),
nullptr));
for (const auto& alloc_proto : proto.buffer_allocations()) {
BufferAllocation* allocation = buffer_assignment->NewEmptyAllocation(
alloc_proto.size(), alloc_proto.color());
CHECK(allocation->index() == alloc_proto.index())
<< "Expected allocations in BufferAssignment proto to be sorted by "
"index.";
allocation->set_is_thread_local(alloc_proto.is_thread_local());
allocation->set_is_tuple(alloc_proto.is_tuple());
allocation->set_constant(alloc_proto.is_constant());
if (alloc_proto.is_entry_computation_parameter()) {
std::vector<int64_t> shape_idx_vals;
absl::c_copy(alloc_proto.parameter_shape_index(),
std::back_inserter(shape_idx_vals));
ShapeIndex shape_index(shape_idx_vals);
allocation->set_entry_computation_parameter(
alloc_proto.parameter_number(), shape_index, false);
}
for (const auto& assignee : alloc_proto.assigned()) {
HloValue::Id logical_buffer_id = assignee.logical_buffer_id();
const auto& buffer_val = id_to_logical_buffer[logical_buffer_id];
buffer_assignment->AddAssignment(allocation, *buffer_val,
assignee.offset(), assignee.size());
}
CHECK_EQ(allocation->maybe_live_out(), alloc_proto.maybe_live_out())
<< "Dataflow analysis differs from proto.";
}
TF_RET_CHECK(proto.logical_buffers_size() ==
buffer_assignment->allocation_index_for_value_.size());
for (auto& logical_buffer_proto : proto.logical_buffers()) {
TF_RET_CHECK(buffer_assignment->HasAllocation(
*id_to_logical_buffer[logical_buffer_proto.id()]));
}
return buffer_assignment;
}
absl::StatusOr<std::unique_ptr<BufferAssignment>> BufferAssigner::Run(
const HloModule* module, std::unique_ptr<HloOrdering> hlo_ordering,
BufferValue::SizeFunction buffer_size,
LogicalBuffer::AlignmentFunction color_alignment,
bool allocate_buffers_for_constants, BufferAssigner::Colorer colorer,
std::optional<BufferAssigner::MustNotLiveOut> must_not_live_out,
HloDataflowAnalysis::CanShareBuffer can_share_buffer,
std::unique_ptr<PresetAssignments> preset_assignments,
const PrivateStacks& private_stacks,
GlobalDecreasingSizeBestFitHeap<HloValue>::BufferIntervalCompare
heap_buffer_interval_compare,
std::optional<BufferAssignment::BufferIsolationOptions> isolation_options,
std::optional<BufferValue::Color> temp_buffer_color) {
BufferAssigner assigner(allocate_buffers_for_constants, std::move(colorer),
must_not_live_out, std::move(preset_assignments));
return assigner.CreateAssignment(
module, std::move(hlo_ordering), std::move(buffer_size),
std::move(color_alignment), std::move(can_share_buffer), private_stacks,
heap_buffer_interval_compare, isolation_options, temp_buffer_color);
}
bool BufferAssigner::LiveRangeInterferes(const HloValue* buffer1,
const HloValue* buffer2,
BufferAssignment* assignment) {
CHECK((assignment->hlo_live_range().total_order_scheduled()));
const HloLiveRange& hlo_live_range = assignment->hlo_live_range();
const auto& buffer_live_ranges = hlo_live_range.buffer_live_ranges();
auto live_range_it1 = buffer_live_ranges.find(buffer1);
CHECK(live_range_it1 != buffer_live_ranges.end())
<< "Buffer doesn't have a proper live range:" << buffer1->ToString();
auto live_range_it2 = buffer_live_ranges.find(buffer2);
CHECK(live_range_it2 != buffer_live_ranges.end())
<< "Buffer doesn't have a proper live range:" << buffer2->ToString();
auto can_share_as_operand =
[&assignment](const HloValue* user_value, const HloValue* operand_value,
const HloLiveRange::TimeBound& operand_live_range) {
HloPosition operand_end_position = operand_live_range.end_position;
return user_value->instruction()->opcode() != HloOpcode::kCopy &&
user_value->instruction()->IsUserOf(
operand_end_position.instruction) &&
assignment->dataflow_analysis().CanShareOperandBufferWithUser(
operand_end_position.instruction, operand_end_position.index,
user_value->instruction(), user_value->index());
};
const auto& live_range_1 = live_range_it1->second;
const auto& live_range_2 = live_range_it2->second;
if (!(live_range_1.start > live_range_2.end ||
live_range_2.start > live_range_1.end)) {
if (live_range_1.end == live_range_2.start) {
auto operand_value = buffer1;
auto user_value = buffer2;
if (!can_share_as_operand(user_value, operand_value, live_range_1)) {
VLOG(4) << "End of live range of " << buffer1->ToShortString()
<< " is equal to the start of live range of "
<< buffer2->ToShortString() << ", buffer cannot be shared.";
return true;
}
} else if (live_range_2.end == live_range_1.start) {
auto operand_value = buffer2;
auto user_value = buffer1;
if (!can_share_as_operand(user_value, operand_value, live_range_2)) {
VLOG(4) << "End of live range of " << buffer2->ToShortString()
<< " is equal to the start of live range of "
<< buffer1->ToShortString() << ", buffer cannot be shared.";
return true;
}
} else {
VLOG(4) << "Can't assign: assignee " << *buffer1 << " may interfere with "
<< *buffer2;
VLOG(4) << "assigned_buffer.start: " << live_range_1.start;
VLOG(4) << "assigned_buffer.end: " << live_range_1.end;
VLOG(4) << "live_range_2.start" << live_range_2.start;
VLOG(4) << "live_range_2.end" << live_range_2.end;
return true;
}
}
return false;
}
bool BufferAssigner::MaybeAssignBuffer(BufferAllocation* allocation,
const HloBuffer& hlo_buffer,
BufferAssignment* assignment) {
CHECK(!assignment->HasAllocation(hlo_buffer))
<< "buffer " << hlo_buffer << " already has an allocation assigned.";
VLOG(4) << "Trying to assign " << hlo_buffer << " size "
<< assignment->HloBufferSize(hlo_buffer)
<< " to allocation: " << *allocation;
if (hlo_buffer.color() != allocation->color()) {
VLOG(4) << "Can't assign: buffer has color " << hlo_buffer.color()
<< " and allocation has color " << allocation->color() << ".";
return false;
}
if (assignment->HloBufferSize(hlo_buffer) > allocation->size()) {
VLOG(4) << "Can't assign: buffer is larger than allocation ("
<< assignment->HloBufferSize(hlo_buffer) << " > "
<< allocation->size() << ")";
return false;
}
if (allocation->is_readonly()) {
VLOG(4) << "Can't assign: allocation is readonly";
return false;
}
if (must_not_live_out_.has_value()) {
if (allocation->maybe_live_out()) {
for (const HloValue* value : hlo_buffer.values()) {
if ((*must_not_live_out_)(assignment->alias_analysis(),
value->instruction(), value->index())) {
VLOG(4) << "Can't assign: " << value->instruction()->ToString()
<< " cannot live out of the module";
return false;
}
}
}
if (assignment->alias_analysis().BufferLivesOut(hlo_buffer)) {
for (const auto& buffer_offset_size : allocation->assigned_buffers()) {
const HloValue* value = buffer_offset_size.first;
if ((*must_not_live_out_)(assignment->alias_analysis(),
value->instruction(), value->index())) {
VLOG(4) << "Can't assign: " << value->instruction()
<< " cannot live out of the module";
return false;
}
}
}
}
if (!allocation->is_reusable()) {
VLOG(4) << "Can't assign: allocation is not reusable";
return false;
}
for (const auto& buffer_offset_size : allocation->assigned_buffers()) {
const HloValue& assigned_buffer =
*CHECK_NOTNULL(dynamic_cast<const HloValue*>(buffer_offset_size.first));
for (const HloValue* new_value : hlo_buffer.values()) {
if (assignment->hlo_live_range().total_order_scheduled()) {
if (LiveRangeInterferes(new_value, &assigned_buffer, assignment)) {
VLOG(4) << "Can't assign: assignee " << assigned_buffer
<< " live range interferes with "
<< new_value->ToShortString();
return false;
}
} else if (assignment->hlo_ordering().MayInterfere(
assigned_buffer, *new_value,
assignment->dataflow_analysis())) {
VLOG(4) << "Can't assign: assignee " << assigned_buffer
<< " may interfere with " << new_value->ToShortString();
return false;
}
if (new_value->instruction()->opcode() == HloOpcode::kCopy) {
for (const HloPosition& assigned_buffer_position :
assigned_buffer.positions()) {
if (new_value->instruction()->IsUserOf(
assigned_buffer_position.instruction)) {
VLOG(4) << "Can't assign: assignee " << assigned_buffer
<< " is used at copy instruction "
<< new_value->ToShortString();
return false;
}
}
}
}
}
if (assignment->alias_analysis().BufferLivesOut(hlo_buffer) &&
allocation->size() != assignment->HloBufferSize(hlo_buffer)) {
VLOG(4) << "Can't assign: buffer " << hlo_buffer
<< "is live out and size not the same as allocation";
return false;
}
assignment->AddAssignment(allocation, hlo_buffer, 0,
assignment->HloBufferSize(hlo_buffer));
return true;
}
absl::Status BufferAssigner::AssignSingleHloBuffer(
const HloBuffer* hlo_buffer, bool is_thread_local,
absl::flat_hash_map<const HloComputation*,
absl::flat_hash_set<const HloValue*>>*
buffers_to_assign_sequentially,
std::vector<BufferAllocation::Index>* allocation_indices,
BufferAssignment* assignment) {
const int64_t buffer_size = assignment->HloBufferSize(*hlo_buffer);
for (const HloValue* value : hlo_buffer->values()) {
if (value->instruction()->opcode() == HloOpcode::kConstant) {
if (allocate_buffers_for_constants_) {
BufferAllocation* allocation =
assignment->NewAllocation(*hlo_buffer, buffer_size);
allocation->set_constant(true);
VLOG(3) << "New allocation #" << allocation->index() << " for constant "
<< *hlo_buffer << " value ptr: " << value;
}
VLOG(3) << "Not allocating buffer for constant";
return absl::OkStatus();
}
const HloInstruction* instruction = value->instruction();
const bool is_entry_parameter =
instruction->opcode() == HloOpcode::kParameter &&
instruction->parent() == instruction->GetModule()->entry_computation();
if (is_entry_parameter) {
bool parameter_has_alias =
assignment->module().input_output_alias_config().ParameterHasAlias(
instruction->parameter_number(), value->index());
BufferAllocation* allocation =
assignment->NewAllocation(*hlo_buffer, buffer_size);
allocation->set_entry_computation_parameter(
instruction->parameter_number(), value->index(), parameter_has_alias);
if (parameter_has_alias) {
allocation_indices->push_back(allocation->index());
}
VLOG(3) << "New allocation #" << allocation->index()
<< " marked as entry computation parameter: " << *hlo_buffer;
return absl::OkStatus();
}
}
if (is_thread_local) {
BufferAllocation* allocation =
assignment->NewAllocation(*hlo_buffer, buffer_size);
allocation->set_is_thread_local(true);
VLOG(3) << "New allocation #" << allocation->index()
<< " for thread-local: " << *hlo_buffer;
return absl::OkStatus();
}
for (const HloValue* value : hlo_buffer->values()) {
if (value->shape().IsTuple()) {
BufferAllocation* allocation =
assignment->NewAllocation(*hlo_buffer, buffer_size);
allocation->set_is_tuple(true);
VLOG(3) << "New allocation #" << allocation->index()
<< " for tuple-shaped buffer: " << *hlo_buffer;
return absl::OkStatus();
}
if (value->IsTopLevel() && !value->IsTuple()) {
const HloInstruction* instruction = value->instruction();
for (auto* operand : instruction->operands()) {
for (const auto& operand_slice :
assignment->GetAllSlices(operand, {})) {
BufferAllocation* allocation =
assignment->GetMutableAllocation(operand_slice.index());
if (MaybeAssignBuffer(allocation, *hlo_buffer, assignment)) {
VLOG(3) << "Reusing (operand) allocation #" << allocation->index()
<< " for: " << *hlo_buffer;
return absl::OkStatus();
}
}
}
}
}
for (int allocation_index = allocation_indices->size() - 1;
allocation_index >= 0; allocation_index--) {
BufferAllocation* allocation = assignment->GetMutableAllocation(
allocation_indices->at(allocation_index));
if (MaybeAssignBuffer(allocation, *hlo_buffer, assignment)) {
VLOG(3) << "Reusing allocation #" << allocation->index()
<< " for: " << *hlo_buffer;
return absl::OkStatus();
}
}
if (!assignment->HasAllocation(*hlo_buffer) &&
!assignment->alias_analysis().BufferLivesOut(*hlo_buffer)) {
bool all_computations_have_sequential_order = true;
for (const HloValue* hlo_value : hlo_buffer->values()) {
HloComputation* computation = hlo_value->instruction()->parent();
const bool has_sequential_order =
assignment->hlo_ordering().SequentialOrder(*computation) != nullptr;
all_computations_have_sequential_order &= has_sequential_order;
}
if (all_computations_have_sequential_order) {
for (const HloValue* hlo_value : hlo_buffer->values()) {
HloComputation* computation = hlo_value->instruction()->parent();
(*buffers_to_assign_sequentially)[computation].insert(hlo_value);
VLOG(3) << "Delaying assignment of temp buffer: " << *hlo_value;
}
return absl::OkStatus();
}
}
if (!assignment->HasAllocation(*hlo_buffer)) {
BufferAllocation* allocation =
assignment->NewAllocation(*hlo_buffer, buffer_size);
allocation_indices->push_back(allocation->index());
VLOG(3) << "New allocation #" << allocation->index()
<< " for: " << *hlo_buffer;
}
TF_RET_CHECK(assignment->HasAllocation(*hlo_buffer));
return absl::OkStatus();
}
absl::Status BufferAssigner::AssignBuffersForComputations(
const std::vector<const HloComputation*>& computations,
bool is_thread_local,
absl::flat_hash_map<const HloComputation*,
absl::flat_hash_set<const HloValue*>>*
buffers_to_assign_sequentially,
BufferAssignment* assignment) {
if (computations.empty()) {
return absl::OkStatus();
}
std::vector<const HloBuffer*> sorted_buffers;
absl::flat_hash_set<const HloBuffer*> preset_assigned_buffers;
TF_RETURN_IF_ERROR(AssignPresetBuffers(&preset_assigned_buffers, assignment));
const HloAliasAnalysis& alias_analysis = assignment->alias_analysis();
for (const HloBuffer& buffer : alias_analysis.buffers()) {
if (preset_assigned_buffers.find(&buffer) !=
preset_assigned_buffers.end()) {
VLOG(3) << "Skip allocation for buffer: " << buffer;
continue;
}
TF_RET_CHECK(!buffer.values().empty());
const HloComputation* comp = buffer.values()[0]->instruction()->parent();
if (absl::c_linear_search(computations, comp)) {
sorted_buffers.push_back(&buffer);
}
}
flat_hash_map<const HloInstruction*, int> post_order_position;
int position = 0;
std::vector<const HloComputation*> reverse_post_order_computations;
std::unique_ptr<CallGraph> call_graph =
CallGraph::Build(computations[0]->parent());
TF_RETURN_IF_ERROR(call_graph->VisitNodes([&](const CallGraphNode& node) {
if (absl::c_linear_search(computations, node.computation())) {
reverse_post_order_computations.push_back(node.computation());
}
return absl::OkStatus();
}));
absl::c_reverse(reverse_post_order_computations);
for (auto* computation : reverse_post_order_computations) {
for (auto* instruction : computation->MakeInstructionPostOrder()) {
post_order_position.emplace(instruction, position);
position++;
}
}
HloSchedule schedule(&assignment->module());
for (const HloComputation* computation : computations) {
const HloInstructionSequence* instruction_sequence =
assignment->hlo_ordering().SequentialOrder(*computation);
const bool has_sequential_order = instruction_sequence != nullptr;
if (has_sequential_order && buffers_to_assign_sequentially != nullptr) {
buffers_to_assign_sequentially->emplace(computation,
flat_hash_set<const HloValue*>());
schedule.set_sequence(computation, *instruction_sequence);
}
}
absl::c_sort(
sorted_buffers, [&post_order_position, &alias_analysis, assignment](
const HloBuffer* a, const HloBuffer* b) {
const int64_t a_size = assignment->HloBufferSize(*a);
const int64_t b_size = assignment->HloBufferSize(*b);
if (a_size != b_size) {
return a_size > b_size;
}
const bool a_live_out = alias_analysis.BufferLivesOut(*a);
const bool b_live_out = alias_analysis.BufferLivesOut(*b);
if (a_live_out != b_live_out) {
return a_live_out;
}
auto compare = [&post_order_position](const HloValue* value1,
const HloValue* value2) {
return post_order_position.at(value1->instruction()) <
post_order_position.at(value2->instruction());
};
const HloValue* a_min = *absl::c_min_element(a->values(), compare);
const HloValue* b_min = *absl::c_min_element(b->values(), compare);
if (post_order_position.at(a_min->instruction()) <
post_order_position.at(b_min->instruction())) {
return true;
} else if (post_order_position.at(a_min->instruction()) >
post_order_position.at(b_min->instruction())) {
return false;
}
return a->id() < b->id();
});
std::vector<BufferAllocation::Index> allocation_indices;
for (const HloBuffer* buffer : sorted_buffers) {
VLOG(3) << "=================================================";
VLOG(3) << "Assigning buffer for " << *buffer;
TF_RETURN_IF_ERROR(AssignSingleHloBuffer(buffer, is_thread_local,
buffers_to_assign_sequentially,
&allocation_indices, assignment));
}
return absl::OkStatus();
}
flat_hash_map<LogicalBuffer::Color, flat_hash_set<const HloValue*>>
BufferAssigner::SplitBuffersByColor(
const flat_hash_set<const HloValue*>& buffers) const {
flat_hash_map<LogicalBuffer::Color, flat_hash_set<const HloValue*>> color_map;
for (auto buffer : buffers) {
color_map[buffer->color()].insert(buffer);
}
return color_map;
}
absl::flat_hash_map<const HloComputation*, absl::flat_hash_set<const HloValue*>>
BufferAssigner::SplitBuffersByPrivateStackComputation(
const absl::flat_hash_set<const HloValue*>& buffers,
absl::Span<const HloComputation* const> private_stack_computations,
const CallGraph& call_graph) const {
absl::flat_hash_map<const HloComputation*,
absl::flat_hash_set<const HloValue*>>
computation_map;
for (const HloValue* value : buffers) {
bool found_computation = false;
for (const HloComputation* computation : private_stack_computations) {
if (call_graph.InstructionIsNestedIn(value->instruction(), computation)) {
found_computation = true;
computation_map[computation].insert(value);
break;
}
}
CHECK(found_computation);
}
return computation_map;
}
absl::Status BufferAssigner::AssignPresetBuffers(
absl::flat_hash_set<const HloBuffer*>* assigned_buffers,
BufferAssignment* assignment) {
if (!preset_assignments_) {
return absl::OkStatus();
}
absl::flat_hash_map<LogicalBuffer::Color, BufferAllocation*>
preset_allocations;
for (auto& color_and_info : preset_assignments_->assignment_informations()) {
LogicalBuffer::Color color(color_and_info.first);
auto inserted = preset_allocations.emplace(
color,
assignment->NewEmptyAllocation(color_and_info.second.size, color));
BufferAllocation* inserted_allocation = inserted.first->second;
inserted_allocation->AddHeapTrace(
color_and_info.second.heap_simulator_trace);
VLOG(3) << "Created preset buffer allocation "
<< inserted_allocation->index()
<< ", color: " << inserted_allocation->color()
<< ", size: " << inserted_allocation->size();
}
const HloAliasAnalysis& alias_analysis = assignment->alias_analysis();
for (auto& position_and_chunk : preset_assignments_->chunks()) {
const HloPosition& defining_position = position_and_chunk.first;
const HloBuffer& buffer = alias_analysis.GetUniqueBufferAt(
defining_position.instruction, defining_position.index);
for (const HloValue* value : buffer.values()) {
VLOG(3) << "Preset allocation for value: " << value->ToShortString();
const HeapSimulator::Chunk& chunk = position_and_chunk.second;
auto preset_allocations_iter = preset_allocations.find(value->color());
CHECK(preset_allocations_iter != preset_allocations.end())
<< "No preset value allocation for color " << value->color()
<< " for " << value->ToShortString() << " found.";
preset_allocations_iter->second->AddAssignment(*value, chunk.offset,
chunk.size);
}
assigned_buffers->insert(&buffer);
}
preset_assignments_ = {};
return absl::OkStatus();
}
absl::Status BufferAssigner::AssignBuffersWithSequentialOrdering(
const flat_hash_map<const HloComputation*, flat_hash_set<const HloValue*>>&
buffers_to_assign_sequentially,
bool run_whole_module_heap_simulation, BufferAssignment* assignment,
const PrivateStacks& private_stacks,
GlobalDecreasingSizeBestFitHeap<HloValue>::BufferIntervalCompare
heap_buffer_interval_compare,
std::optional<BufferAssignment::BufferIsolationOptions> isolation_options) {
const HloOrdering& hlo_ordering = assignment->hlo_ordering();
auto get_heap_algorithm =
[&](int64_t alignment) -> std::unique_ptr<HeapAlgorithm<HloValue>> {
if (heap_buffer_interval_compare) {
return std::make_unique<ConstrainedGlobalDecreasingSizeBestFitHeap>(
assignment->multiheap_size_constraint_per_heap(), alignment,
GlobalDecreasingSizeBestFitHeap<HloValue>::kCustom,
heap_buffer_interval_compare);
}
auto algorithms = std::make_unique<
std::vector<std::unique_ptr<HeapAlgorithm<HloValue>>>>();
algorithms->push_back(
std::make_unique<ConstrainedGlobalDecreasingSizeBestFitHeap>(
assignment->multiheap_size_constraint_per_heap(), alignment,
GlobalDecreasingSizeBestFitHeap<HloValue>::kSpatial));
algorithms->push_back(
std::make_unique<ConstrainedGlobalDecreasingSizeBestFitHeap>(
assignment->multiheap_size_constraint_per_heap(), alignment,
GlobalDecreasingSizeBestFitHeap<HloValue>::kTemporal));
return std::make_unique<ChooseBestHeapAlgorithm<HloValue>>(
std::move(algorithms));
};
if (run_whole_module_heap_simulation) {
VLOG(1) << "Running whole-module heap simulation";
HloSchedule schedule(&assignment->module());
flat_hash_set<const HloValue*> all_buffers_to_assign;
for (const auto& pair : buffers_to_assign_sequentially) {
const HloComputation* computation = pair.first;
const flat_hash_set<const HloValue*>& buffers_to_assign = pair.second;
const HloInstructionSequence* instruction_sequence =
hlo_ordering.SequentialOrder(*computation);
CHECK(instruction_sequence != nullptr) << computation->name();
schedule.set_sequence(computation, *instruction_sequence);
all_buffers_to_assign.insert(buffers_to_assign.begin(),
buffers_to_assign.end());
}
auto color_map = SplitBuffersByColor(all_buffers_to_assign);
std::vector<LogicalBuffer::Color> sorted_colors;
sorted_colors.reserve(color_map.size());
for (auto& single_colored_set : color_map) {
auto color = single_colored_set.first;
sorted_colors.emplace(sorted_colors.end(), color);
}
absl::c_sort(sorted_colors);
for (auto color : sorted_colors) {
VLOG(2) << "Simulating heap for color " << color;
int64_t alignment = assignment->color_alignment_(color);
HeapSimulator::Options options;
options.alloc_constants = allocate_buffers_for_constants_;
auto private_stacks_it = private_stacks.find(color);
if (private_stacks_it != private_stacks.end()) {
auto computation_map = SplitBuffersByPrivateStackComputation(
color_map[color], private_stacks_it->second,
assignment->alias_analysis().dataflow_analysis().call_graph());
for (const HloComputation* private_stack_computation :
private_stacks_it->second) {
VLOG(2) << "private stack computation: "
<< private_stack_computation->name();
auto computation_map_it =
computation_map.find(private_stack_computation);
CHECK(computation_map_it != computation_map.end());
options.buffers_to_assign = &computation_map_it->second;
const HloInstructionSequence* instruction_sequence =
hlo_ordering.SequentialOrder(*private_stack_computation);
TF_ASSIGN_OR_RETURN(
HeapSimulator::Result<HloValue> result,
HeapSimulator::Run(
get_heap_algorithm(alignment), *private_stack_computation,
*instruction_sequence, assignment->alias_analysis(),
assignment->buffer_size_, &schedule, options));
AssignBuffersFromHeapSimulator(result, assignment, color,
isolation_options);
}
} else {
options.buffers_to_assign = &color_map[color];
TF_ASSIGN_OR_RETURN(
HeapSimulator::Result<HloValue> result,
HeapSimulator::Run(get_heap_algorithm(alignment),
assignment->module(), schedule,
assignment->alias_analysis(),
assignment->buffer_size_, options));
AssignBuffersFromHeapSimulator(result, assignment, color,
isolation_options);
}
}
} else {
VLOG(1) << "Running per-computation heap simulation";
for (const auto& pair : buffers_to_assign_sequentially) {
const HloComputation* computation = pair.first;
const flat_hash_set<const HloValue*>& buffers_to_assign = pair.second;
const HloInstructionSequence* instruction_sequence =
hlo_ordering.SequentialOrder(*computation);
CHECK(instruction_sequence != nullptr) << computation->name();
auto color_map = SplitBuffersByColor(buffers_to_assign);
std::vector<LogicalBuffer::Color> sorted_colors;
sorted_colors.reserve(color_map.size());
for (auto& single_colored_set : color_map) {
auto color = single_colored_set.first;
sorted_colors.emplace(sorted_colors.end(), color);
}
absl::c_sort(sorted_colors);
for (auto color : sorted_colors) {
VLOG(2) << "Simulating heap for color " << color;
int64_t alignment = assignment->color_alignment_(color);
HeapSimulator::Options options;
options.buffers_to_assign = &color_map[color];
TF_ASSIGN_OR_RETURN(
HeapSimulator::Result<HloValue> result,
HeapSimulator::Run(get_heap_algorithm(alignment), *computation,
*instruction_sequence,
assignment->alias_analysis(),
assignment->buffer_size_, options));
AssignBuffersFromHeapSimulator(result, assignment, color,
isolation_options);
}
}
}
return absl::OkStatus();
}
namespace {
std::vector<const HloValue*> ComputePeakMemoryLogicalBuffers(
const BufferAllocation& allocation, const HeapSimulatorTrace& heap_trace) {
absl::flat_hash_map<BufferValue::Id, const HloValue*> id_to_value;
absl::flat_hash_map<const HloValue*, int64_t> buffer_sizes;
for (const auto& pair : allocation.assigned_buffers()) {
const HloValue* value = pair.first;
const BufferAllocation::OffsetSize& offset_size = pair.second;
id_to_value[value->id()] = value;
buffer_sizes[value] = offset_size.size;
}
VLOG(1) << "Compute peak memory logical buffers";
absl::flat_hash_map<int64_t, int> num_outstanding_shared_buffers;
absl::flat_hash_map<int64_t, int64_t> shared_canonical_ids;
absl::flat_hash_map<int64_t, int64_t> allocated_sizes;
auto memory_delta = [&](const HeapSimulatorTrace::Event& event) -> int64_t {
const HloValue* buffer = id_to_value.at(event.buffer_id());
const int64_t buffer_size = buffer_sizes.at(buffer);
if (event.kind() == HeapSimulatorTrace::Event::ALLOC) {
num_outstanding_shared_buffers[event.buffer_id()] = 1;
allocated_sizes[event.buffer_id()] = buffer_size;
return buffer_size;
} else if (event.kind() == HeapSimulatorTrace::Event::SHARE_WITH) {
shared_canonical_ids[event.buffer_id()] = event.share_with_canonical_id();
if (++num_outstanding_shared_buffers[event.share_with_canonical_id()] ==
1) {
allocated_sizes[event.buffer_id()] = buffer_size;
return buffer_size;
}
allocated_sizes[event.buffer_id()] = 0;
return 0;
} else if (event.kind() == HeapSimulatorTrace::Event::FREE) {
auto shared_canonical_id_it =
shared_canonical_ids.find(event.buffer_id());
int64_t buffer_id = (shared_canonical_id_it == shared_canonical_ids.end())
? event.buffer_id()
: shared_canonical_id_it->second;
--num_outstanding_shared_buffers[buffer_id];
return -1 * allocated_sizes[event.buffer_id()];
}
LOG(FATAL) << "Unknown event kind: " << event.kind();
};
int64_t max_live_size = 0;
int64_t live_size = 0;
for (const auto& event : heap_trace.events()) {
if (!id_to_value.contains(event.buffer_id())) {
continue;
}
live_size += memory_delta(event);
if (max_live_size < live_size) {
max_live_size = live_size;
}
}
absl::flat_hash_set<const HloValue*> live_values;
live_size = 0;
num_outstanding_shared_buffers.clear();
for (const auto& event : heap_trace.events()) {
if (!id_to_value.contains(event.buffer_id())) {
continue;
}
const HloValue* value = id_to_value.at(event.buffer_id());
int64_t delta = memory_delta(event);
if (delta > 0) {
InsertOrDie(&live_values, value);
} else if (delta < 0) {
CHECK(ContainsKey(live_values, value));
live_values.erase(value);
}
live_size += delta;
if (live_size == max_live_size) {
break;
}
}
CHECK_EQ(live_size, max_live_size);
std::vector<const HloValue*> live_values_vector;
live_values_vector.insert(live_values_vector.end(), live_values.begin(),
live_values.end());
absl::c_sort(live_values_vector, [](const HloValue* a, const HloValue* b) {
return a->id() < b->id();
});
VLOG(4) << "Peak memory buffer:";
for (auto value : live_values_vector) {
VLOG(4) << " " << value->ToString();
}
return live_values_vector;
}
}
void BufferAssigner::IsolateHeapBuffers(
std::optional<BufferAssignment::BufferIsolationOptions> isolation_options,
const BufferAssignment* assignment, LogicalBuffer::Color color,
HeapSimulator::Result<HloValue>& result) const {
if (!isolation_options) {
return;
}
result.heap_size = 0;
for (HeapSimulator::HeapResult<HloValue>& heap_result : result.heap_results) {
if (absl::c_find(isolation_options->config.isolation_colors(), color) !=
isolation_options->config.isolation_colors().end()) {
VLOG(1) << "Isolating color: " << color;
int64_t alignment = assignment->color_alignment_(color);
std::vector<const HloValue*> sorted_values;
sorted_values.reserve(heap_result.chunk_map.size());
for (const auto& [value, chunk] : heap_result.chunk_map) {
sorted_values.push_back(value);
}
absl::c_sort(sorted_values, isolation_options->hlo_value_compare);
int64_t isolation_offset =
RoundUpTo(isolation_options->config.base_offset_bytes() +
heap_result.heap_size +
isolation_options->config.isolation_padding_bytes(),
alignment);
int64_t value_index;
for (value_index = 0;
value_index < std::min(static_cast<int64_t>(sorted_values.size()),
isolation_options->config.isolation_fuel());
++value_index) {
const HloValue* value = sorted_values[value_index];
HeapSimulator::Chunk& chunk = heap_result.chunk_map.at(value);
VLOG(1) << "Isolating " << value->ToShortString() << " from "
<< chunk.offset << " to " << isolation_offset;
chunk.offset = isolation_offset;
isolation_offset += RoundUpTo(
chunk.size + isolation_options->config.isolation_padding_bytes(),
alignment);
}
for (; value_index < sorted_values.size(); ++value_index) {
const HloValue* value = sorted_values[value_index];
HeapSimulator::Chunk& chunk = heap_result.chunk_map.at(value);
int64_t new_offset = RoundUpTo(
chunk.offset + isolation_options->config.base_offset_bytes(),
alignment);
VLOG(1) << "Not isolating " << value->ToShortString() << ", from "
<< chunk.offset << " to " << new_offset;
chunk.offset = new_offset;
}
heap_result.heap_size = isolation_offset;
}
result.heap_size += heap_result.heap_size;
}
}
void BufferAssigner::AssignBuffersFromHeapSimulator(
HeapSimulator::Result<HloValue>& result, BufferAssignment* assignment,
BufferValue::Color color,
std::optional<BufferAssignment::BufferIsolationOptions> isolation_options) {
IsolateHeapBuffers(isolation_options, assignment, color, result);
if (assignment->stats_.preallocated_temp_fragmentation_bytes == -1) {
assignment->stats_.preallocated_temp_fragmentation_bytes =
result.fragmentation_size;
} else {
assignment->stats_.preallocated_temp_fragmentation_bytes +=
result.fragmentation_size;
}
VLOG(1) << "Result size from heap simulator: " << result.heap_size;
for (const HeapSimulator::HeapResult<HloValue>& heap_result :
result.heap_results) {
BufferAllocation* allocation =
assignment->NewEmptyAllocation(heap_result.heap_size, color);
for (const auto& [value, chunk] : heap_result.chunk_map) {
assignment->AddAssignment(allocation, *value, chunk.offset, chunk.size);
}
allocation->peak_buffers_ =
ComputePeakMemoryLogicalBuffers(*allocation, result.debug_trace);
XLA_VLOG_LINES(2, allocation->ToString());
allocation->AddHeapTrace(result.debug_trace);
}
}
absl::StatusOr<std::unique_ptr<BufferAssignment>>
BufferAssigner::CreateAssignment(
const HloModule* module, std::unique_ptr<HloOrdering> hlo_ordering,
BufferValue::SizeFunction buffer_size,
LogicalBuffer::AlignmentFunction color_alignment,
HloDataflowAnalysis::CanShareBuffer can_share_buffer,
const PrivateStacks& private_stacks,
GlobalDecreasingSizeBestFitHeap<HloValue>::BufferIntervalCompare
heap_buffer_interval_compare,
std::optional<BufferAssignment::BufferIsolationOptions> isolation_options,
std::optional<BufferValue::Color> temp_buffer_color) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module, can_share_buffer));
HloSchedule schedule(module);
for (const HloComputation* computation : module->computations()) {
const HloInstructionSequence* instruction_sequence =
hlo_ordering->SequentialOrder(*computation);
const bool has_sequential_order = instruction_sequence != nullptr;
if (has_sequential_order) {
schedule.set_sequence(computation, *instruction_sequence);
}
}
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloLiveRange> hlo_live_range,
HloLiveRange::Run(schedule, *alias_analysis,
module->entry_computation(), true));
VLOG(1) << "Assigning buffers to module " << module->name();
XLA_VLOG_LINES(3, module->ToString());
XLA_VLOG_LINES(3, alias_analysis->ToString());
XLA_VLOG_LINES(3, alias_analysis->dataflow_analysis().ToString());
VLOG(1) << "Number of buffers to assign: "
<< alias_analysis->buffers().size();
std::unique_ptr<BufferAssignment> assignment(new BufferAssignment(
module, std::move(hlo_ordering), std::move(buffer_size),
std::move(color_alignment), std::move(alias_analysis),
std::move(hlo_live_range)));
TF_RETURN_IF_ERROR(
colorer_(&assignment->alias_analysis(), assignment->hlo_ordering()));
VLOG(3) << "After coloring:";
XLA_VLOG_LINES(3,
assignment->alias_analysis().dataflow_analysis().ToString());
std::vector<const HloComputation*> thread_local_computations;
std::vector<const HloComputation*> global_computations;
TF_RETURN_IF_ERROR(GatherComputationsByAllocationType(
module, &thread_local_computations, &global_computations));
flat_hash_map<const HloComputation*, flat_hash_set<const HloValue*>>
buffers_to_assign_sequentially;
TF_RETURN_IF_ERROR(AssignBuffersForComputations(
global_computations,
false, &buffers_to_assign_sequentially,
assignment.get()));
const bool run_whole_module_heap_simulation =
buffers_to_assign_sequentially.size() == global_computations.size();
VLOG(2) << "Running whole module heap simulation: "
<< run_whole_module_heap_simulation;
const int32_t multiheap_size_constraint_per_heap =
module->config().debug_options().xla_multiheap_size_constraint_per_heap();
VLOG(2) << "Multiheap per heap size limit: "
<< multiheap_size_constraint_per_heap;
TF_RETURN_IF_ERROR(AssignBuffersWithSequentialOrdering(
buffers_to_assign_sequentially, run_whole_module_heap_simulation,
assignment.get(), private_stacks, heap_buffer_interval_compare,
isolation_options));
std::vector<const HloComputation*> thread_local_computations_no_fusion;
for (auto* computation : thread_local_computations) {
TF_RET_CHECK(computation != module->entry_computation());
if (computation->IsFusionComputation()) {
continue;
}
thread_local_computations_no_fusion.push_back(computation);
}
TF_RETURN_IF_ERROR(AssignBuffersForComputations(
thread_local_computations_no_fusion,
true,
nullptr, assignment.get()));
for (const HloBuffer* buffer :
assignment->alias_analysis().LiveOutBuffers()) {
VLOG(3) << "maybe_live_out LogicalBuffer: " << *buffer;
if (assignment->HasAllocation(*buffer)) {
BufferAllocation* alloc =
assignment->GetMutableAssignedAllocation(*buffer);
alloc->set_maybe_live_out(true);
VLOG(3) << "maybe_live_out BufferAllocation: " << *alloc;
}
}
absl::flat_hash_set<BufferValue::Color> private_stack_colors;
for (const auto& [color, computations] : private_stacks) {
private_stack_colors.insert(color);
}
assignment->CombineTempAllocations(private_stack_colors, temp_buffer_color);
XLA_VLOG_LINES(2, assignment->ToString());
TF_RETURN_IF_ERROR(assignment->ComputeSummaryStats());
XLA_VLOG_LINES(1, assignment->GetStats().ToString());
VLOG(1) << "Buffer assignment done.";
return std::move(assignment);
}
} | #include "xla/service/buffer_assignment.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/buffer_value.h"
#include "xla/service/call_graph.h"
#include "xla/service/copy_insertion.h"
#include "xla/service/flatten_call_graph.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/hlo_ordering.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_value.h"
#include "xla/service/logical_buffer.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using memory_space_assignment::PresetAssignments;
using ::testing::UnorderedElementsAre;
class InstructionListVisitor : public DfsHloVisitorWithDefault {
public:
explicit InstructionListVisitor(const HloInstruction* root) : root_(root) {}
absl::Status DefaultAction(HloInstruction* hlo) override {
instructions_.push_back(hlo);
VLOG(0) << "List instruction " << hlo->ToString();
return absl::OkStatus();
}
std::vector<const HloInstruction*> GetInstructions() { return instructions_; }
private:
const HloInstruction* root_;
std::vector<const HloInstruction*> instructions_;
InstructionListVisitor(const InstructionListVisitor&) = delete;
InstructionListVisitor& operator=(const InstructionListVisitor&) = delete;
};
const std::vector<const HloInstruction*> GetInstructions(HloInstruction* root) {
InstructionListVisitor main_list(root);
TF_CHECK_OK(root->Accept(&main_list));
return main_list.GetInstructions();
}
class BufferAssignmentTest : public HloTestBase {
protected:
~BufferAssignmentTest() override {}
std::unique_ptr<BufferAssignment> RunBufferAssignment(HloModule* module,
int64_t alignment = 1) {
return BufferAssigner::Run(
module, std::make_unique<DependencyHloOrdering>(module),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
true)
.value();
}
absl::StatusOr<std::unique_ptr<BufferAssignment>> ConvertToProtoAndBack(
const BufferAssignment* buffers, const HloModule* module) {
auto proto = buffers->ToProto();
return BufferAssignment::FromProto(
proto, module, backend().compiler()->BufferSizeBytesFunction(),
nullptr);
}
std::unique_ptr<BufferAssignment> RunBufferAssignmentWithSequentialOrdering(
HloModule* module, int64_t alignment = 1,
BufferAssigner::Colorer colorer = BufferAssigner::DefaultColorer(),
const BufferAssigner::PrivateStacks& private_stacks = {},
std::optional<BufferAssignment::BufferIsolationOptions>
isolation_options = std::nullopt) {
return BufferAssigner::Run(
module,
std::make_unique<SequentialHloOrdering>(module->schedule()),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
true, colorer,
std::nullopt, nullptr,
{}, private_stacks,
nullptr, isolation_options)
.value();
}
std::unique_ptr<BufferAssignment> RunBufferAssignmentNoBuffersForConstants(
HloModule* module, int64_t alignment = 1) {
return BufferAssigner::Run(
module, std::make_unique<DependencyHloOrdering>(module),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
false)
.value();
}
std::unique_ptr<BufferAssignment> RunBufferAssignmentNoBuffersReuseForAdd(
HloModule* module, int64_t alignment = 1) {
auto must_not_live_out = [](const HloAliasAnalysis& alias_analysis,
const HloInstruction* instruction,
const ShapeIndex&) {
return instruction->opcode() == HloOpcode::kAdd;
};
return BufferAssigner::Run(
module, std::make_unique<DependencyHloOrdering>(module),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
false,
BufferAssigner::DefaultColorer(),
must_not_live_out)
.value();
}
std::unique_ptr<BufferAssignment> RunColoredBufferAssignment(
HloModule* module, BufferAssigner::Colorer colorer,
int64_t alignment = 1) {
return BufferAssigner::Run(
module, std::make_unique<DependencyHloOrdering>(module),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
true, std::move(colorer))
.value();
}
std::unique_ptr<BufferAssignment> RunBufferAssignmentWithInstructionSequence(
HloModule* module, absl::Span<HloInstruction* const> instruction_sequence,
int64_t alignment = 1) {
HloSchedule schedule(module);
schedule.set_sequence(module->entry_computation(), instruction_sequence);
return BufferAssigner::Run(
module, std::make_unique<SequentialHloOrdering>(schedule),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
true)
.value();
}
std::unique_ptr<BufferAssignment> RunBufferAssignmentWithPresetAssignments(
HloModule* module, std::unique_ptr<PresetAssignments> preset_assignments,
int64_t alignment = 1) {
return BufferAssigner::Run(
module, std::make_unique<DependencyHloOrdering>(module),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
true,
BufferAssigner::DefaultColorer(),
std::nullopt,
nullptr, std::move(preset_assignments))
.value();
}
std::unique_ptr<BufferAssignment> RunBufferAssignmentWithIsolationOptions(
HloModule* module, std::optional<BufferAssignment::BufferIsolationOptions>
isolation_options = std::nullopt) {
return BufferAssigner::Run(
module,
std::make_unique<SequentialHloOrdering>(module->schedule()),
backend().compiler()->BufferSizeBytesFunction(),
[](LogicalBuffer::Color) { return 1; },
true,
BufferAssigner::DefaultColorer(),
std::nullopt, nullptr,
{}, {},
nullptr, isolation_options)
.value();
}
std::unique_ptr<HloComputation> BuildMapComputationPlus1(
const std::string& name) {
auto builder = HloComputation::Builder(name);
auto param =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "x"));
auto value = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, param, value));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildReduceComputation(
const std::string& name) {
auto builder = HloComputation::Builder(name);
auto param =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "x"));
auto param2 =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "y"));
builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, param, param2));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildWhileConditionComputation(
const std::string& name) {
auto builder = HloComputation::Builder(name);
auto const4 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(4)));
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, t_s32_f32v4_, "x"));
auto index = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(const4->shape(), param, 0));
builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), index,
const4, ComparisonDirection::kLt));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildWhileBodyComputation(
const std::string& name) {
auto builder = HloComputation::Builder(name);
auto const1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(1)));
auto constv = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f})));
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, t_s32_f32v4_, "x"));
auto indexc = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(const1->shape(), param, 0));
auto addc = builder.AddInstruction(HloInstruction::CreateBinary(
indexc->shape(), HloOpcode::kAdd, indexc, const1));
auto indexv = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(constv->shape(), param, 1));
auto addv = builder.AddInstruction(HloInstruction::CreateBinary(
constv->shape(), HloOpcode::kAdd, indexv, constv));
builder.AddInstruction(HloInstruction::CreateTuple({addc, addv}));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildR0F32UnaryOpComputation(
HloOpcode opcode, const std::string& name) {
auto builder = HloComputation::Builder(name);
auto param =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "x"));
builder.AddInstruction(HloInstruction::CreateUnary(r0f32_, opcode, param));
return builder.Build();
}
const BufferAllocation& GetAssignedInputAllocation(
const BufferAssignment& buffers, HloInstruction* hlo) {
LOG(INFO) << "Checking input: " << hlo->ToString();
const BufferAllocation& buffer =
*buffers.GetUniqueTopLevelSlice(hlo).value().allocation();
EXPECT_EQ(hlo->parameter_number(), buffer.parameter_number());
return buffer;
}
const BufferAllocation& GetAssignedOutputAllocation(
const BufferAssignment& buffers, HloInstruction* hlo) {
LOG(INFO) << "Checking output: " << hlo->ToString();
const BufferAllocation& buffer = GetTopLevelAllocation(buffers, hlo);
return buffer;
}
const BufferAllocation& GetAllocation(const BufferAssignment& buffers,
const HloInstruction* hlo,
const ShapeIndex& index) {
return *buffers.GetUniqueSlice(hlo, index).value().allocation();
}
const BufferAllocation& GetTopLevelAllocation(const BufferAssignment& buffers,
const HloInstruction* hlo) {
return *buffers.GetUniqueTopLevelSlice(hlo).value().allocation();
}
int64_t ValidateBuffers(
const std::vector<const HloInstruction*>& instructions,
const BufferAssignment& buffers) {
for (const HloInstruction* hlo : instructions) {
if (!buffers.HasTopLevelAllocation(hlo)) {
EXPECT_TRUE(HloOpcode::kConstant == hlo->opcode() ||
HloOpcode::kParameter == hlo->opcode());
continue;
}
}
int64_t total_size = 0;
for (auto& allocation : buffers.Allocations()) {
total_size += allocation.size();
}
return total_size;
}
Shape s32_ = ShapeUtil::MakeShape(xla::S32, {});
Shape r0f32_ = ShapeUtil::MakeShape(xla::F32, {});
Shape f32vec4_ = ShapeUtil::MakeShape(F32, {4});
Shape f32vec10_ = ShapeUtil::MakeShape(F32, {10});
Shape f32vec100_ = ShapeUtil::MakeShape(F32, {100});
Shape f32a100x10_ = ShapeUtil::MakeShape(F32, {100, 10});
Shape t_s32_f32v4_ = ShapeUtil::MakeTupleShape({s32_, f32vec4_});
Shape t_s32_f32v10_ = ShapeUtil::MakeTupleShape({s32_, f32vec10_});
};
static bool BuffersDistinct(const std::vector<const HloInstruction*>& a,
const std::vector<const HloInstruction*>& b,
const BufferAssignment& assignment) {
absl::flat_hash_set<BufferAllocation::Slice> a_slices;
for (const HloInstruction* instruction : a) {
if (assignment.HasTopLevelAllocation(instruction)) {
a_slices.insert(assignment.GetUniqueTopLevelSlice(instruction).value());
}
}
for (const HloInstruction* instruction : b) {
if (assignment.HasTopLevelAllocation(instruction)) {
if (a_slices.contains(
assignment.GetUniqueTopLevelSlice(instruction).value())) {
return false;
}
}
}
return true;
}
TEST_F(BufferAssignmentTest, ScalarConstant) {
auto builder = HloComputation::Builder(TestName());
auto const0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
{
auto buffers = RunBufferAssignment(module.get());
EXPECT_TRUE(buffers->HasTopLevelAllocation(const0));
}
{
auto buffers = RunBufferAssignmentNoBuffersForConstants(module.get());
EXPECT_FALSE(buffers->HasTopLevelAllocation(const0));
}
}
TEST_F(BufferAssignmentTest, BufferForConst) {
auto builder = HloComputation::Builder(TestName());
auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f})));
auto const1 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({4.1f, 4.2f, 4.3f, 4.4f})));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, const0, const1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
{
auto buffers = RunBufferAssignment(module.get());
EXPECT_TRUE(buffers->HasTopLevelAllocation(const0));
EXPECT_TRUE(buffers->HasTopLevelAllocation(const1));
GetAssignedOutputAllocation(*buffers, add);
}
{
auto buffers = RunBufferAssignmentNoBuffersForConstants(module.get());
EXPECT_FALSE(buffers->HasTopLevelAllocation(const0));
EXPECT_FALSE(buffers->HasTopLevelAllocation(const1));
GetAssignedOutputAllocation(*buffers, add);
}
}
TEST_F(BufferAssignmentTest, HasAllocationAt) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "param0"));
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(1)));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({negate, param0, constant}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto buffers = RunBufferAssignment(module.get());
EXPECT_EQ(buffers->HasTopLevelAllocation(tuple),
buffers->HasAllocationAt(tuple, {}));
EXPECT_EQ(buffers->HasTopLevelAllocation(negate),
buffers->HasAllocationAt(tuple, {0}));
EXPECT_EQ(buffers->HasTopLevelAllocation(param0),
buffers->HasAllocationAt(tuple, {1}));
EXPECT_EQ(buffers->HasTopLevelAllocation(constant),
buffers->HasAllocationAt(tuple, {2}));
}
TEST_F(BufferAssignmentTest, BufferForOutputConst) {
auto builder = HloComputation::Builder(TestName());
auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f})));
auto copy = builder.AddInstruction(
HloInstruction::CreateUnary(const0->shape(), HloOpcode::kCopy, const0));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto buffers = RunBufferAssignment(module.get());
GetAssignedOutputAllocation(*buffers, copy);
}
TEST_F(BufferAssignmentTest, Basic) {
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, broadcast, param0));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));
auto sub = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kSubtract, add, param1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto buffers_orig = RunBufferAssignment(module.get());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> buffers,
ConvertToProtoAndBack(buffers_orig.get(), module.get()));
BufferAllocation paramscalar_buffer =
GetAssignedInputAllocation(*buffers, paramscalar);
BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);
BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1);
EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index());
EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index());
EXPECT_NE(param0_buffer.index(), param1_buffer.index());
const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul);
EXPECT_NE(mul_buffer.index(), param0_buffer.index());
const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add);
EXPECT_EQ(add_buffer.index(), mul_buffer.index());
GetAssignedOutputAllocation(*buffers, sub);
}
TEST_F(BufferAssignmentTest, BasicToFromProto) {
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, broadcast, param0));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));
builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kSubtract, add, param1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto buffers_orig = RunBufferAssignment(module.get());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> buffers_from_proto,
ConvertToProtoAndBack(buffers_orig.get(), module.get()));
const HloDataflowAnalysis& dataflow_orig = buffers_orig->dataflow_analysis();
const HloDataflowAnalysis& dataflow_proto =
buffers_from_proto->dataflow_analysis();
EXPECT_EQ(buffers_orig->Allocations().size(),
buffers_from_proto->Allocations().size());
for (BufferValue::Id id = 0; id < dataflow_orig.values().size(); id++) {
auto& orig_value = dataflow_orig.values().at(id);
if (buffers_orig->HasAllocation(*orig_value)) {
auto& value_proto = dataflow_proto.GetUniqueValueAt(
orig_value->instruction(), orig_value->index());
EXPECT_TRUE(buffers_from_proto->HasAllocation(value_proto));
EXPECT_EQ(orig_value->color(), value_proto.color());
EXPECT_EQ(buffers_orig->GetAssignedAllocation(*orig_value).index(),
buffers_from_proto->GetAssignedAllocation(value_proto).index());
}
}
}
TEST_F(BufferAssignmentTest, AliasedParamCanBeReused) {
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "p0"));
auto neg_1 = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param));
auto neg_2 = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, neg_1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK(module->input_output_alias_config().SetUpAlias({}, 0, {}));
auto buffers_orig = RunBufferAssignment(module.get());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> buffers,
ConvertToProtoAndBack(buffers_orig.get(), module.get()));
BufferAllocation param_buffer = GetAssignedInputAllocation(*buffers, param);
BufferAllocation neg_1_buffer = GetAllocation(*buffers, neg_1, {});
BufferAllocation neg_2_buffer = GetAllocation(*buffers, neg_2, {});
EXPECT_EQ(param_buffer.index(), neg_1_buffer.index());
EXPECT_EQ(neg_2_buffer.index(), neg_1_buffer.index());
}
TEST_F(BufferAssignmentTest, AddCannotReuse) {
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, broadcast, param0));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));
auto sub = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kSubtract, add, param1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto buffers_orig = RunBufferAssignmentNoBuffersReuseForAdd(module.get());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> buffers,
ConvertToProtoAndBack(buffers_orig.get(), module.get()));
BufferAllocation paramscalar_buffer =
GetAssignedInputAllocation(*buffers, paramscalar);
BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);
BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1);
EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index());
EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index());
EXPECT_NE(param0_buffer.index(), param1_buffer.index());
const BufferAllocation& sub_buffer = GetTopLevelAllocation(*buffers, sub);
EXPECT_NE(sub_buffer.index(), param0_buffer.index());
const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add);
EXPECT_NE(add_buffer.index(), sub_buffer.index());
GetAssignedOutputAllocation(*buffers, sub);
}
TEST_F(BufferAssignmentTest, BasicUniquelyColored) {
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, broadcast, param0));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));
auto sub = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kSubtract, add, param1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
absl::flat_hash_map<const HloInstruction*, int> color_map;
auto colorer = [&](HloAliasAnalysis* alias_analysis, const HloOrdering&) {
int color = 0;
for (HloValue::Id id = 0;
id < alias_analysis->dataflow_analysis().values().size(); id++) {
auto& value = alias_analysis->dataflow_analysis().GetValue(id);
color_map[value.defining_instruction()] = color;
value.set_color(BufferValue::Color(color++));
}
return absl::OkStatus();
};
auto buffers = RunColoredBufferAssignment(module.get(), colorer);
BufferAllocation paramscalar_buffer =
GetAssignedInputAllocation(*buffers, paramscalar);
BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);
BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1);
EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index());
EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index());
EXPECT_NE(param0_buffer.index(), param1_buffer.index());
const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul);
EXPECT_NE(mul_buffer.index(), param0_buffer.index());
const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add);
EXPECT_NE(add_buffer.index(), mul_buffer.index());
GetAssignedOutputAllocation(*buffers, sub);
EXPECT_EQ(param0->shape().layout().memory_space(), color_map[param0]);
EXPECT_EQ(param1->shape().layout().memory_space(), color_map[param1]);
EXPECT_EQ(mul->shape().layout().memory_space(), color_map[mul]);
EXPECT_EQ(add->shape().layout().memory_space(), color_map[add]);
EXPECT_EQ(sub->shape().layout().memory_space(), color_map[sub]);
}
TEST_F(BufferAssignmentTest, BasicPartiallyColored) {
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, broadcast, param0));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));
auto sub = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kSubtract, add, param1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto colorer = [](HloAliasAnalysis* alias_analysis, const HloOrdering&) {
for (HloValue::Id id = 0;
id < alias_analysis->dataflow_analysis().values().size(); id++) {
auto& value = alias_analysis->dataflow_analysis().GetValue(id);
auto& buffer = alias_analysis->GetBufferContainingValue(value);
for (const auto& alias : buffer.values()) {
if (alias->instruction()->opcode() == HloOpcode::kAdd ||
alias->instruction()->opcode() == HloOpcode::kMultiply) {
value.set_color(LogicalBuffer::Color(1));
}
}
if (!value.has_color()) {
value.set_color(LogicalBuffer::Color(0));
}
}
return absl::OkStatus();
};
auto buffers = RunColoredBufferAssignment(module.get(), colorer);
BufferAllocation paramscalar_buffer =
GetAssignedInputAllocation(*buffers, paramscalar);
BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);
BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1);
EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index());
EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index());
EXPECT_NE(param0_buffer.index(), param1_buffer.index());
const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul);
EXPECT_NE(mul_buffer.index(), param0_buffer.index());
const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add);
EXPECT_EQ(add_buffer.index(), mul_buffer.index());
GetAssignedOutputAllocation(*buffers, sub);
EXPECT_EQ(mul->shape().layout().memory_space(), 1);
EXPECT_EQ(add->shape().layout().memory_space(), 1);
EXPECT_EQ(sub->shape().layout().memory_space(), 0);
EXPECT_EQ(param0->shape().layout().memory_space(), 0);
EXPECT_EQ(param1->shape().layout().memory_space(), 0);
}
TEST_F(BufferAssignmentTest, PresetAssignments) {
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec100_, "p2"));
Shape f32vec100_color1 = ShapeUtil::MakeShapeWithDenseLayout(
F32, {100}, {0}, {}, 1,
0,
1);
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_color1, HloOpcode::kMultiply, broadcast, param0));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_color1, HloOpcode::kAdd, mul, param1));
auto sub = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kSubtract, add, param1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto preset_assignments = std::make_unique<PresetAssignments>();
preset_assignments->add_chunk({mul, {}},
HeapSimulator::Chunk::FromOffsetSize(100, 400));
preset_assignments->add_chunk({add, {}},
HeapSimulator::Chunk::FromOffsetSize(550, 400));
preset_assignments->assignment_information_for_space(1)
->size = 950;
auto buffers = RunBufferAssignmentWithPresetAssignments(
module.get(), std::move(preset_assignments));
BufferAllocation paramscalar_buffer =
GetAssignedInputAllocation(*buffers, paramscalar);
BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);
BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1);
EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index());
EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index());
EXPECT_EQ(paramscalar_buffer.color(), LogicalBuffer::Color(0));
EXPECT_NE(param0_buffer.index(), param1_buffer.index());
EXPECT_EQ(param0_buffer.color(), LogicalBuffer::Color(0));
const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul);
const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add);
EXPECT_EQ(mul_buffer, add_buffer);
EXPECT_NE(mul_buffer.index(), param0_buffer.index());
EXPECT_EQ(mul_buffer.color(), LogicalBuffer::Color(1));
EXPECT_EQ(mul_buffer.assigned_buffers().size(), 2);
for (const auto& value_and_offsetsize : mul_buffer.assigned_buffers()) {
if (value_and_offsetsize.first->instruction() == mul) {
EXPECT_EQ(value_and_offsetsize.second.offset, 100);
EXPECT_EQ(value_and_offsetsize.second.size, 400);
} else {
EXPECT_EQ(value_and_offsetsize.first->instruction(), add);
EXPECT_EQ(value_and_offsetsize.second.offset, 550);
EXPECT_EQ(value_and_offsetsize.second.size, 400);
}
}
GetAssignedOutputAllocation(*buffers, sub);
}
TEST_F(BufferAssignmentTest, PresetAssignmentsWhile) {
auto module = CreateNewVerifiedModule();
Shape f32vec10_color1 = ShapeUtil::MakeShapeWithDenseLayout(
F32, {10}, {0}, {}, 1,
0,
1);
Shape t_s32_f32v10_color1 =
ShapeUtil::MakeTupleShape({s32_, f32vec10_color1});
auto cond_builder = HloComputation::Builder("WhileCond");
HloInstruction* cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, t_s32_f32v10_color1, "cond_param"));
HloInstruction* cond_iter = cond_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(s32_, cond_param, 0));
HloInstruction* cond_limit = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(50)));
cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), cond_iter,
cond_limit, ComparisonDirection::kLt));
HloComputation* cond_computation =
module->AddEmbeddedComputation(cond_builder.Build());
auto body_builder = HloComputation::Builder("WhileBody");
HloInstruction* body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, t_s32_f32v10_color1, "body_param"));
HloInstruction* body_iter = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(s32_, body_param, 0));
HloInstruction* body_data = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32vec10_color1, body_param, 1));
HloInstruction* body_data_increment = body_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f})));
HloInstruction* body_data_next =
body_builder.AddInstruction(HloInstruction::CreateBinary(
f32vec10_color1, HloOpcode::kAdd, body_data, body_data_increment));
HloInstruction* body_iter_increment = body_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(1)));
HloInstruction* body_iter_next =
body_builder.AddInstruction(HloInstruction::CreateBinary(
s32_, HloOpcode::kAdd, body_iter, body_iter_increment));
body_builder.AddInstruction(
HloInstruction::CreateTuple({body_iter_next, body_data_next}));
HloComputation* body_computation =
module->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* iter = builder.AddInstruction(
HloInstruction::CreateParameter(0, s32_, "param_iter"));
HloInstruction* data = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec10_, "param_data"));
HloInstruction* negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec10_color1, HloOpcode::kNegate, data));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({iter, negate}));
HloInstruction* while_op = builder.AddInstruction(HloInstruction::CreateWhile(
t_s32_f32v10_color1, cond_computation, body_computation, tuple));
HloInstruction* while_data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32vec10_color1, while_op, 1));
builder.AddInstruction(HloInstruction::CreateBinary(
f32vec10_, HloOpcode::kAdd, while_data, data));
module->AddEntryComputation(builder.Build());
auto preset_assignments = std::make_unique<PresetAssignments>();
preset_assignments->add_chunk({negate, {}},
HeapSimulator::Chunk::FromOffsetSize(100, 40));
preset_assignments->assignment_information_for_space(1)
->size = 140;
auto buffers_orig = RunBufferAssignmentWithPresetAssignments(
module.get(), std::move(preset_assignments));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> buffers,
ConvertToProtoAndBack(buffers_orig.get(), module.get()));
const BufferAllocation& data_buffer = GetTopLevelAllocation(*buffers, negate);
EXPECT_EQ(data_buffer.assigned_buffers().size(), 5);
for (const auto& value_and_offsetsize : data_buffer.assigned_buffers()) {
EXPECT_EQ(value_and_offsetsize.second.offset, 100);
EXPECT_EQ(value_and_offsetsize.second.size, 40);
EXPECT_EQ(value_and_offsetsize.first->color(), LogicalBuffer::Color(1));
}
}
TEST_F(BufferAssignmentTest, MultipleUsersForNode) {
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, broadcast, param0));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));
auto sub = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec100_, HloOpcode::kSubtract, add, mul));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto buffers_orig = RunBufferAssignment(module.get());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> buffers,
ConvertToProtoAndBack(buffers_orig.get(), module.get()));
BufferAllocation paramscalar_buffer =
GetAssignedInputAllocation(*buffers, paramscalar);
BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);
BufferAllocation param1_index = GetAssignedInputAllocation(*buffers, param1);
EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index());
EXPECT_NE(paramscalar_buffer.index(), param1_index.index());
EXPECT_NE(param0_buffer.index(), param1_index.index());
const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul);
const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add);
EXPECT_NE(add_buffer.index(), mul_buffer.index());
const std::vector<const HloInstruction*> level0 = GetInstructions(sub);
int64_t size0 = ValidateBuffers(level0, *buffers);
LOG(INFO) << "LogicalBuffer count " << buffers->Allocations().size()
<< " for " << level0.size() << " instructions; "
<< "total buffer size " << size0;
}
TEST_F(BufferAssignmentTest, TrivialMap) {
auto module = CreateNewVerifiedModule();
auto map_computation =
module->AddEmbeddedComputation(BuildMapComputationPlus1("f32+1"));
auto inner_last = map_computation->root_instruction();
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32a100x10_, "p"));
auto map = builder.AddInstruction(
HloInstruction::CreateMap(f32a100x10_, {param0}, map_computation));
module->AddEntryComputation(builder.Build());
const std::vector<const HloInstruction*> level0 = GetInstructions(map);
EXPECT_EQ(2, level0.size()) << "Invalid main kernel size";
const std::vector<const HloInstruction*> level1 = GetInstructions(inner_last);
EXPECT_EQ(3, level1.size()) << "Invalid nested add+1 size";
auto buffers = RunBufferAssignment(module.get());
int64_t size0 = ValidateBuffers(level0, *buffers);
int64_t size1 = ValidateBuffers(level1, *buffers);
EXPECT_TRUE(BuffersDistinct(level0, level1, *buffers))
<< "Reuse between main kernel and embedded mapping.";
BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);
BufferAllocation map_buffer = GetAssignedOutputAllocation(*buffers, map);
EXPECT_NE(param0_buffer.index(), map_buffer.index());
EXPECT_EQ(HloOpcode::kAdd, inner_last->opcode());
const BufferAllocation& inner_add_buffer =
GetTopLevelAllocation(*buffers, inner_last);
EXPECT_NE(inner_add_buffer.index(), map_buffer.index());
LOG(INFO) << "LogicalBuffer count " << buffers->Allocations().size()
<< " for " << level0.size() + level1.size() << " instructions; "
<< "total buffer size " << size0 + size1;
}
TEST_F(BufferAssignmentTest, CannotReuseInputBufferOfReduce) {
auto module = CreateNewVerifiedModule();
auto reduce_computation =
module->AddEmbeddedComputation(BuildReduceComputation("f32+f32"));
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32a100x10_, "p"));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(f32a100x10_, HloOpcode::kExp, param0));
auto exp2 = builder.AddInstruction(
HloInstruction::CreateUnary(f32a100x10_, HloOpcode::kExp, exp1));
auto const0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto reduce = builder.AddInstruction(HloInstruction::CreateReduce(
f32vec10_,
exp2,
const0,
{0}, reduce_computation));
auto exp3 = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec10_, HloOpcode::kExp, reduce));
module->AddEntryComputation(builder.Build());
auto buffers = RunBufferAssignment(module.get());
const std::vector<const HloInstruction*> instrs = GetInstructions(exp3);
ValidateBuffers(instrs, *buffers);
const BufferAllocation& exp1_buffer = GetTopLevelAllocation(*buffers, exp1);
const BufferAllocation& exp2_buffer = GetTopLevelAllocation(*buffers, exp2);
const BufferAllocation& reduce_buffer =
GetTopLevelAllocation(*buffers, reduce);
EXPECT_EQ(exp1_buffer.index(), exp2_buffer.index());
EXPECT_NE(exp2_buffer.index(), reduce_buffer.index());
}
TEST_F(BufferAssignmentTest, ExampleWhile) {
auto module = CreateNewVerifiedModule();
auto condition_computation =
module->AddEmbeddedComputation(BuildWhileConditionComputation("if<4"));
auto body_computation =
module->AddEmbeddedComputation(BuildWhileBodyComputation("add-update"));
auto builder = HloComputation::Builder(TestName());
auto const3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(0)));
auto const4 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f})));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({const3, const4}));
auto while_op = builder.AddInstruction(HloInstruction::CreateWhile(
t_s32_f32v4_, condition_computation, body_computation, tuple));
module->AddEntryComputation(builder.Build());
const std::vector<const HloInstruction*> level0 = GetInstructions(while_op);
EXPECT_EQ(4, level0.size()) << "Invalid while kernel size";
const std::vector<const HloInstruction*> levelc =
GetInstructions(condition_computation->root_instruction());
EXPECT_EQ(4, levelc.size()) << "Invalid nested condition size";
const std::vector<const HloInstruction*> levelb =
GetInstructions(body_computation->root_instruction());
EXPECT_EQ(8, levelb.size()) << "Invalid nested body size";
auto buffers = RunBufferAssignment(module.get());
int64_t size0 = ValidateBuffers(level0, *buffers);
int64_t sizec = ValidateBuffers(levelc, *buffers);
int64_t sizeb = ValidateBuffers(levelb, *buffers);
EXPECT_FALSE(BuffersDistinct(level0, levelc, *buffers))
<< "Should be reuse between main kernel and embedded condition.";
EXPECT_FALSE(BuffersDistinct(levelb, levelc, *buffers))
<< "Should be reuse between embedded condition and body.";
EXPECT_FALSE(BuffersDistinct(level0, levelb, *buffers))
<< "Should be reuse between main kernel and embedded body.";
HloInstruction* body_root = body_computation->root_instruction();
EXPECT_EQ(HloOpcode::kTuple, body_root->opcode());
ShapeUtil::ForEachSubshape(
while_op->shape(),
[this, &buffers, while_op, body_root](const Shape& ,
const ShapeIndex& index) {
auto while_op_allocation = GetAllocation(*buffers, while_op, index);
auto body_root_allocation = GetAllocation(*buffers, body_root, index);
EXPECT_EQ(while_op_allocation.index(), body_root_allocation.index());
});
LOG(INFO) << "LogicalBuffer count " << buffers->Allocations().size()
<< " for " << level0.size() + levelc.size() + levelb.size()
<< " instructions; total buffer size " << size0 + sizec + sizeb;
}
TEST_F(BufferAssignmentTest, ExampleConditional) {
auto module = CreateNewVerifiedModule();
auto true_computation = module->AddEmbeddedComputation(
BuildR0F32UnaryOpComputation(HloOpcode::kCeil, "Ceil"));
auto false_computation = module->AddEmbeddedComputation(
BuildR0F32UnaryOpComputation(HloOpcode::kFloor, "Floor"));
auto builder = HloComputation::Builder(TestName());
auto pred = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
auto const1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(56.4f)));
auto const2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(12.4f)));
auto conditional = builder.AddInstruction(HloInstruction::CreateConditional(
r0f32_, pred, const1, true_computation, const2, false_computation));
module->AddEntryComputation(builder.Build());
const std::vector<const HloInstruction*> conditional_instrs =
GetInstructions(conditional);
const std::vector<const HloInstruction*> true_instrs =
GetInstructions(true_computation->root_instruction());
const std::vector<const HloInstruction*> false_instrs =
GetInstructions(false_computation->root_instruction());
EXPECT_EQ(4, conditional_instrs.size());
EXPECT_EQ(2, true_instrs.size());
EXPECT_EQ(2, false_instrs.size());
auto buffers = RunBufferAssignment(module.get());
ValidateBuffers(conditional_instrs, *buffers);
ValidateBuffers(true_instrs, *buffers);
ValidateBuffers(false_instrs, *buffers);
EXPECT_FALSE(BuffersDistinct(conditional_instrs, true_instrs, *buffers))
<< "Should be reuse between conditional and true computation.";
EXPECT_FALSE(BuffersDistinct(conditional_instrs, false_instrs, *buffers))
<< "Should be reuse between conditional and false computation.";
EXPECT_FALSE(BuffersDistinct(true_instrs, false_instrs, *buffers))
<< "Should be reuse between true and false computations.";
const BufferAllocation& conditional_buffer =
GetTopLevelAllocation(*buffers, conditional);
const BufferAllocation& true_buffer =
GetTopLevelAllocation(*buffers, true_computation->root_instruction());
const BufferAllocation& false_buffer =
GetTopLevelAllocation(*buffers, false_computation->root_instruction());
EXPECT_EQ(conditional_buffer.size(), true_buffer.size());
EXPECT_EQ(conditional_buffer.size(), false_buffer.size());
}
TEST_F(BufferAssignmentTest, UnaryOpReuseChain) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "p"));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kExp, param0));
auto tanh = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kTanh, exp1));
auto exp2 = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kExp, tanh));
auto neg = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, exp2));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
EXPECT_TRUE(assignment->HasTopLevelAllocation(exp1));
auto& buffer_for_exp1 = GetTopLevelAllocation(*assignment, exp1);
EXPECT_EQ(buffer_for_exp1, GetTopLevelAllocation(*assignment, tanh));
EXPECT_EQ(buffer_for_exp1, GetTopLevelAllocation(*assignment, exp2));
EXPECT_EQ(buffer_for_exp1, GetTopLevelAllocation(*assignment, neg));
}
TEST_F(BufferAssignmentTest, ReuseNonOperandBuffer) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "param0"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));
auto slice = builder.AddInstruction(
HloInstruction::CreateSlice(f32vec10_, negate, {0}, {10}, {1}));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32a100x10_, slice, {1}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
EXPECT_TRUE(assignment->HasTopLevelAllocation(broadcast));
auto& buffer_for_bcast = GetTopLevelAllocation(*assignment, broadcast);
EXPECT_EQ(buffer_for_bcast, GetTopLevelAllocation(*assignment, negate));
EXPECT_NE(buffer_for_bcast, GetTopLevelAllocation(*assignment, slice));
}
TEST_F(BufferAssignmentTest, NoReuseLiveBuffer) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "param0"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));
auto slice = builder.AddInstruction(
HloInstruction::CreateSlice(f32vec10_, negate, {0}, {10}, {1}));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32a100x10_, slice, {1}));
builder.AddInstruction(HloInstruction::CreateTuple({negate, broadcast}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment_orig = RunBufferAssignment(module.get());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> assignment,
ConvertToProtoAndBack(assignment_orig.get(), module.get()));
EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),
GetTopLevelAllocation(*assignment, negate));
EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),
GetTopLevelAllocation(*assignment, slice));
EXPECT_NE(GetTopLevelAllocation(*assignment, negate),
GetTopLevelAllocation(*assignment, slice));
}
TEST_F(BufferAssignmentTest, NoReuseAliasedBuffer) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "param0"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({negate}));
auto tuple_element = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32vec100_, tuple, 0));
auto slice = builder.AddInstruction(
HloInstruction::CreateSlice(f32vec10_, tuple_element, {0}, {10}, {1}));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32a100x10_, slice, {1}));
builder.AddInstruction(HloInstruction::CreateTuple({tuple, broadcast}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment_orig = RunBufferAssignment(module.get());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> assignment,
ConvertToProtoAndBack(assignment_orig.get(), module.get()));
EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),
GetTopLevelAllocation(*assignment, negate));
EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),
GetTopLevelAllocation(*assignment, slice));
EXPECT_NE(GetTopLevelAllocation(*assignment, negate),
GetTopLevelAllocation(*assignment, slice));
}
TEST_F(BufferAssignmentTest, DoNotReuseOversizedOutputBuffer) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "param0"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));
auto slice = builder.AddInstruction(
HloInstruction::CreateSlice(f32vec10_, negate, {0}, {10}, {1}));
auto broadcast = builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {10, 4}), slice, {0}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),
GetTopLevelAllocation(*assignment, negate));
EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),
GetTopLevelAllocation(*assignment, slice));
}
TEST_F(BufferAssignmentTest, ReuseOutputBufferIfExactlySized) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "param0"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));
auto slice = builder.AddInstruction(
HloInstruction::CreateSlice(f32vec10_, negate, {0}, {10}, {1}));
auto broadcast = builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {10, 10}), slice, {0}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
EXPECT_TRUE(assignment->HasTopLevelAllocation(broadcast));
auto& buffer_for_bcast = GetTopLevelAllocation(*assignment, broadcast);
EXPECT_EQ(buffer_for_bcast, GetTopLevelAllocation(*assignment, negate));
EXPECT_NE(buffer_for_bcast, GetTopLevelAllocation(*assignment, slice));
}
TEST_F(BufferAssignmentTest, DoNotReuseOversizedOutputBufferInTuple) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "param0"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));
auto slice = builder.AddInstruction(
HloInstruction::CreateSlice(f32vec10_, negate, {0}, {10}, {1}));
auto broadcast = builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {10, 4}), slice, {0}));
builder.AddInstruction(HloInstruction::CreateTuple({broadcast}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),
GetTopLevelAllocation(*assignment, negate));
EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),
GetTopLevelAllocation(*assignment, slice));
}
TEST_F(BufferAssignmentTest, EmbeddedComputationBuffers) {
auto module = CreateNewVerifiedModule();
auto vec_shape = ShapeUtil::MakeShape(F32, {42});
auto scalar_shape = ShapeUtil::MakeShape(F32, {});
auto map_builder = HloComputation::Builder(TestName() + "_map");
auto map_param = map_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "map_param"));
auto map_root = map_builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, map_param));
auto map_computation = module->AddEmbeddedComputation(map_builder.Build());
auto call_builder = HloComputation::Builder(TestName() + "_call");
auto call_param = call_builder.AddInstruction(
HloInstruction::CreateParameter(0, vec_shape, "vec_param"));
auto call_root = call_builder.AddInstruction(
HloInstruction::CreateUnary(vec_shape, HloOpcode::kExp, call_param));
auto call_computation = module->AddEmbeddedComputation(call_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, vec_shape, "param"));
auto call = builder.AddInstruction(
HloInstruction::CreateCall(vec_shape, {param}, call_computation));
auto map = builder.AddInstruction(
HloInstruction::CreateMap(vec_shape, {call}, map_computation));
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
auto& map_param_alloc = GetTopLevelAllocation(*assignment, map_param);
EXPECT_FALSE(map_param_alloc.is_entry_computation_parameter());
EXPECT_FALSE(map_param_alloc.maybe_live_out());
EXPECT_TRUE(map_param_alloc.is_thread_local());
auto& map_root_alloc = GetTopLevelAllocation(*assignment, map_root);
EXPECT_FALSE(map_root_alloc.is_entry_computation_parameter());
EXPECT_FALSE(map_root_alloc.maybe_live_out());
EXPECT_TRUE(map_root_alloc.is_thread_local());
auto& call_param_alloc = GetTopLevelAllocation(*assignment, call_param);
EXPECT_TRUE(call_param_alloc.is_entry_computation_parameter());
EXPECT_FALSE(call_param_alloc.maybe_live_out());
EXPECT_FALSE(call_param_alloc.is_thread_local());
auto& call_root_alloc = GetTopLevelAllocation(*assignment, call_root);
EXPECT_FALSE(call_root_alloc.is_entry_computation_parameter());
EXPECT_FALSE(call_root_alloc.is_thread_local());
auto& param_alloc = GetTopLevelAllocation(*assignment, param);
EXPECT_TRUE(param_alloc.is_entry_computation_parameter());
EXPECT_FALSE(param_alloc.maybe_live_out());
EXPECT_FALSE(param_alloc.is_thread_local());
auto& map_alloc = GetTopLevelAllocation(*assignment, map);
EXPECT_FALSE(map_alloc.is_entry_computation_parameter());
EXPECT_TRUE(map_alloc.maybe_live_out());
EXPECT_FALSE(map_alloc.is_thread_local());
}
TEST_F(BufferAssignmentTest, CustomCallEmbeddedComputationBuffers) {
auto module = CreateNewVerifiedModule();
auto scalar_shape = ShapeUtil::MakeShape(F32, {});
auto map_builder = HloComputation::Builder(TestName() + "_map");
auto map_param = map_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "map_param"));
auto map_root = map_builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, map_param));
auto map_computation = module->AddEmbeddedComputation(map_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
builder.AddInstruction(HloInstruction::CreateCustomCall(
scalar_shape, {param}, map_computation, "call_name"));
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
auto& map_param_alloc = GetTopLevelAllocation(*assignment, map_param);
EXPECT_FALSE(map_param_alloc.is_entry_computation_parameter());
EXPECT_FALSE(map_param_alloc.maybe_live_out());
EXPECT_TRUE(map_param_alloc.is_thread_local());
auto& map_root_alloc = GetTopLevelAllocation(*assignment, map_root);
EXPECT_FALSE(map_root_alloc.is_entry_computation_parameter());
EXPECT_FALSE(map_root_alloc.maybe_live_out());
EXPECT_TRUE(map_root_alloc.is_thread_local());
}
TEST_F(BufferAssignmentTest, TupleParameterAsOutput) {
auto builder = HloComputation::Builder(TestName());
auto tuple_param = builder.AddInstruction(HloInstruction::CreateParameter(
0,
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(PRED, {1, 2, 3, 4}),
ShapeUtil::MakeShape(F32, {}),
ShapeUtil::MakeShape(S32, {42})}),
"param0"));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
EXPECT_EQ(4, assignment->Allocations().size());
ShapeUtil::ForEachSubshape(
tuple_param->shape(),
[this, &assignment, tuple_param](const Shape& ,
const ShapeIndex& index) {
auto allocation = GetAllocation(*assignment, tuple_param, index);
EXPECT_TRUE(allocation.is_entry_computation_parameter());
EXPECT_EQ(0, allocation.parameter_number());
EXPECT_TRUE(allocation.maybe_live_out());
});
}
TEST_F(BufferAssignmentTest, ElementOfNestedTupleParameterAsOutput) {
auto builder = HloComputation::Builder(TestName());
auto tuple_param = builder.AddInstruction(HloInstruction::CreateParameter(
0,
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {1, 2, 3, 4}),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(S32, {42}),
ShapeUtil::MakeShape(S32, {101})})}),
"param0"));
auto tuple_element =
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(tuple_param->shape(), {1}), tuple_param, 1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
EXPECT_FALSE(
GetAllocation(*assignment, tuple_param, {}).maybe_live_out());
EXPECT_TRUE(
GetAllocation(*assignment, tuple_param, {1}).maybe_live_out());
EXPECT_TRUE(GetAllocation(*assignment, tuple_param, {1, 0})
.maybe_live_out());
EXPECT_TRUE(GetAllocation(*assignment, tuple_param, {1, 1})
.maybe_live_out());
EXPECT_TRUE(
GetTopLevelAllocation(*assignment, tuple_element).maybe_live_out());
EXPECT_EQ(GetAllocation(*assignment, tuple_param, {1, 0}),
GetAllocation(*assignment, tuple_element, {0}));
EXPECT_EQ(GetAllocation(*assignment, tuple_param, {1, 1}),
GetAllocation(*assignment, tuple_element, {1}));
EXPECT_EQ(GetAllocation(*assignment, tuple_param, {1}),
GetTopLevelAllocation(*assignment, tuple_element));
}
TEST_F(BufferAssignmentTest, TupleConstantAsOutput) {
auto builder = HloComputation::Builder(TestName());
Literal elements[] = {LiteralUtil::CreateR0<int64_t>(0),
LiteralUtil::CreateR0<int64_t>(1)};
builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::MakeTuple({&elements[0], &elements[1]})));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
EXPECT_EQ(3, assignment->Allocations().size());
}
TEST_F(BufferAssignmentTest, TupleCustomCallAsOutput) {
auto builder = HloComputation::Builder(TestName());
auto custom_call = builder.AddInstruction(HloInstruction::CreateCustomCall(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(PRED, {1, 2, 3, 4}),
ShapeUtil::MakeShape(S32, {101})}),
{}, "foo_function"));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
EXPECT_EQ(3, assignment->Allocations().size());
EXPECT_TRUE(
GetAllocation(*assignment, custom_call, {}).maybe_live_out());
EXPECT_TRUE(
GetAllocation(*assignment, custom_call, {0}).maybe_live_out());
EXPECT_TRUE(
GetAllocation(*assignment, custom_call, {1}).maybe_live_out());
}
TEST_F(BufferAssignmentTest, CustomCallAliasedBuffer) {
const char* const kModuleString = R"(
HloModule xla_computation_f
ENTRY xla_computation_f {
parameter.1 = f32[2,3,4,5] parameter(0)
parameter.2 = f32[2,3,4,5] parameter(1)
add = f32[2,3,4,5] add(parameter.1, parameter.2)
ROOT custom-call = f32[2,3,4,5] custom-call(add, parameter.2), custom_call_target="dm_softmax", operand_layout_constraints={f32[2,3,4,5], f32[2,3,4,5]}, output_to_operand_aliasing={{}: (0, {})}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnUnverifiedModule(kModuleString));
std::unique_ptr<BufferAssignment> assignment =
RunBufferAssignment(module.get());
HloInstruction* custom_call = module->entry_computation()->root_instruction();
EXPECT_TRUE(
assignment->SharesTopLevelSlice(custom_call, custom_call->operand(0)));
}
TEST_F(BufferAssignmentTest, TupleCallAsOutput) {
auto module = CreateNewVerifiedModule();
auto elem_shape = f32vec4_;
auto tuple_shape = ShapeUtil::MakeTupleShape({elem_shape});
auto sub_builder = HloComputation::Builder(TestName() + "_sub");
auto sub_param = sub_builder.AddInstruction(
HloInstruction::CreateParameter(0, elem_shape, "sub_param"));
auto sub_tuple =
sub_builder.AddInstruction(HloInstruction::CreateTuple({sub_param}));
auto sub_computation = module->AddEmbeddedComputation(sub_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, elem_shape, "param"));
auto call = builder.AddInstruction(
HloInstruction::CreateCall(tuple_shape, {param}, sub_computation));
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
EXPECT_EQ(2, assignment->Allocations().size());
EXPECT_EQ(GetAllocation(*assignment, call, {}),
GetAllocation(*assignment, sub_tuple, {}));
EXPECT_EQ(GetAllocation(*assignment, call, {0}),
GetAllocation(*assignment, sub_param, {}));
EXPECT_NE(GetTopLevelAllocation(*assignment, param),
GetTopLevelAllocation(*assignment, sub_tuple));
EXPECT_EQ(GetTopLevelAllocation(*assignment, param),
GetTopLevelAllocation(*assignment, sub_param));
}
TEST_F(BufferAssignmentTest, TupleChainedCallAsOutput) {
auto module = CreateNewVerifiedModule();
auto elem_shape = f32vec4_;
auto tuple_shape = ShapeUtil::MakeTupleShape({elem_shape});
auto d_builder = HloComputation::Builder(TestName() + "_d");
auto d_param = d_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "d_param"));
auto d_computation = d_builder.Build();
auto c_builder = HloComputation::Builder(TestName() + "_c");
auto c_param = c_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "c_param"));
auto c_call = c_builder.AddInstruction(
HloInstruction::CreateCall(tuple_shape, {c_param}, d_computation.get()));
auto c_computation = c_builder.Build();
auto b_builder = HloComputation::Builder(TestName() + "_b");
auto b_param = b_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "b_param"));
auto b_call = b_builder.AddInstruction(
HloInstruction::CreateCall(tuple_shape, {b_param}, c_computation.get()));
auto b_computation = b_builder.Build();
auto a_builder = HloComputation::Builder(TestName());
auto a_param = a_builder.AddInstruction(
HloInstruction::CreateParameter(0, elem_shape, "param"));
auto a_tuple =
a_builder.AddInstruction(HloInstruction::CreateTuple({a_param}));
auto a_call = a_builder.AddInstruction(
HloInstruction::CreateCall(tuple_shape, {a_tuple}, b_computation.get()));
auto a_computation = a_builder.Build();
module->AddEmbeddedComputation(std::move(d_computation));
module->AddEmbeddedComputation(std::move(c_computation));
module->AddEntryComputation(std::move(a_computation));
module->AddEmbeddedComputation(std::move(b_computation));
auto assignment = RunBufferAssignment(module.get());
EXPECT_EQ(GetAllocation(*assignment, a_call, {}),
GetAllocation(*assignment, b_call, {}));
EXPECT_EQ(GetAllocation(*assignment, b_call, {}),
GetAllocation(*assignment, c_call, {}));
EXPECT_EQ(GetAllocation(*assignment, c_call, {}),
GetAllocation(*assignment, d_param, {}));
EXPECT_EQ(GetAllocation(*assignment, a_call, {0}),
GetAllocation(*assignment, b_call, {0}));
EXPECT_EQ(GetAllocation(*assignment, b_call, {0}),
GetAllocation(*assignment, c_call, {0}));
EXPECT_EQ(GetAllocation(*assignment, c_call, {0}),
GetAllocation(*assignment, d_param, {0}));
EXPECT_TRUE(BuffersDistinct({a_param}, {b_param}, *assignment));
EXPECT_TRUE(BuffersDistinct({a_param}, {c_param}, *assignment));
EXPECT_TRUE(BuffersDistinct({a_param}, {d_param}, *assignment));
EXPECT_EQ(GetAllocation(*assignment, b_param, {0}),
GetAllocation(*assignment, c_param, {0}));
EXPECT_EQ(GetAllocation(*assignment, c_param, {0}),
GetAllocation(*assignment, d_param, {0}));
}
TEST_F(BufferAssignmentTest, BitcastAsOutput) {
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {42}), "param"));
auto bitcast = builder.AddInstruction(
HloInstruction::CreateBitcast(param->shape(), param));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get());
EXPECT_EQ(1, assignment->Allocations().size());
EXPECT_EQ(GetTopLevelAllocation(*assignment, param),
GetTopLevelAllocation(*assignment, bitcast));
}
TEST_F(BufferAssignmentTest, TupleBufferNotReused) {
auto builder = HloComputation::Builder(TestName());
auto scalar_shape = ShapeUtil::MakeShape(F32, {});
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param0"));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({param}));
auto tuple_element = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, tuple, 0));
auto copy = builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape, HloOpcode::kCopy, tuple_element));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment_orig = RunBufferAssignment(module.get());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> assignment,
ConvertToProtoAndBack(assignment_orig.get(), module.get()));
EXPECT_EQ(3, assignment->Allocations().size());
EXPECT_NE(GetTopLevelAllocation(*assignment, tuple),
GetTopLevelAllocation(*assignment, copy));
}
TEST_F(BufferAssignmentTest, OneTempAllocation) {
auto builder = HloComputation::Builder(TestName());
Shape shape_2x3 = ShapeUtil::MakeShape(F32, {2, 3});
Shape shape_2x4 = ShapeUtil::MakeShape(F32, {2, 4});
Shape shape_3x4 = ShapeUtil::MakeShape(F32, {3, 4});
Shape shape_4x4 = ShapeUtil::MakeShape(F32, {4, 4});
Shape shape_5x4 = ShapeUtil::MakeShape(F32, {5, 4});
auto param_a = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape_2x3, "param_a"));
auto param_b = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape_3x4, "param_b"));
auto param_c = builder.AddInstruction(
HloInstruction::CreateParameter(2, shape_4x4, "param_c"));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
2, PrecisionConfig::DEFAULT);
auto dot_ab = builder.AddInstruction(HloInstruction::CreateDot(
shape_2x4, param_a, param_b, dot_dnums, precision_config));
auto dot_bc = builder.AddInstruction(HloInstruction::CreateDot(
shape_3x4, param_b, param_c, dot_dnums, precision_config));
builder.AddInstruction(
HloInstruction::CreateConcatenate(shape_5x4, {dot_ab, dot_bc}, 0));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto assignment = RunBufferAssignment(module.get(), 1);
EXPECT_EQ(5, assignment->Allocations().size());
BufferAllocation::Slice slice_ab =
assignment->GetUniqueTopLevelSlice(dot_ab).value();
BufferAllocation::Slice slice_bc =
assignment->GetUniqueTopLevelSlice(dot_bc).value();
EXPECT_EQ(slice_ab.allocation(), slice_bc.allocation());
EXPECT_NE(slice_ab, slice_bc);
EXPECT_EQ(32, slice_ab.size());
EXPECT_EQ(48, slice_bc.size());
EXPECT_EQ(80, slice_ab.allocation()->size());
EXPECT_EQ(80, slice_bc.allocation()->size());
assignment = RunBufferAssignment(module.get(), 64);
EXPECT_EQ(5, assignment->Allocations().size());
slice_ab = assignment->GetUniqueTopLevelSlice(dot_ab).value();
slice_bc = assignment->GetUniqueTopLevelSlice(dot_bc).value();
EXPECT_EQ(slice_ab.allocation(), slice_bc.allocation());
EXPECT_NE(slice_ab, slice_bc);
EXPECT_EQ(32, slice_ab.size());
EXPECT_EQ(48, slice_bc.size());
if (slice_ab.offset() == 0) {
EXPECT_EQ(64, slice_bc.offset());
EXPECT_EQ(64 + 48, slice_ab.allocation()->size());
EXPECT_EQ(64 + 48, slice_bc.allocation()->size());
} else {
EXPECT_EQ(64, slice_ab.offset());
EXPECT_EQ(0, slice_bc.offset());
EXPECT_EQ(64 + 32, slice_ab.allocation()->size());
EXPECT_EQ(64 + 32, slice_bc.allocation()->size());
}
}
TEST_F(BufferAssignmentTest, TrivialPeakBuffers) {
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, broadcast, param0));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));
auto sub = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kSubtract, add, param1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto buffers = RunBufferAssignment(module.get());
const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul);
const std::vector<const HloValue*>& peak_buffers =
mul_buffer.PeakMemoryLogicalBuffers();
ASSERT_EQ(peak_buffers.size(), 1);
EXPECT_EQ(peak_buffers[0]->instruction(), sub);
}
TEST_F(BufferAssignmentTest, PeakBuffers) {
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec100_, "p"));
auto log = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kLog, param));
auto rev = builder.AddInstruction(
HloInstruction::CreateReverse(f32vec100_, log, {0}));
auto neg = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param));
const Shape concat_shape = ShapeUtil::MakeShape(F32, {200});
auto concat = builder.AddInstruction(
HloInstruction::CreateConcatenate(concat_shape, {rev, neg}, 0));
auto root = builder.AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(F32, {1}), concat, {0}, {1}, {1}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto buffers = RunBufferAssignmentWithInstructionSequence(
module.get(), {param, log, rev, neg, concat, root});
const BufferAllocation& buffer = GetTopLevelAllocation(*buffers, concat);
EXPECT_FALSE(buffer.IsInputOrOutput());
EXPECT_TRUE(buffer.IsPreallocatedTempBuffer());
ASSERT_EQ(buffer.assigned_buffers().size(), 4);
const std::vector<const HloValue*>& peak_buffers =
buffer.PeakMemoryLogicalBuffers();
ASSERT_EQ(peak_buffers.size(), 3);
std::vector<const HloInstruction*> peak_instructions;
for (const HloValue* logical_buffer : peak_buffers) {
peak_instructions.push_back(logical_buffer->instruction());
}
EXPECT_THAT(peak_instructions, UnorderedElementsAre(rev, neg, concat));
}
TEST_F(BufferAssignmentTest, AliasedBuffersShouldntCoexistInPeakBuffers) {
std::string hlo_text = R"(
HloModule test_module, is_scheduled=true
cond {
param = (s32[], s32[]) parameter(0)
ROOT constant = pred[] constant(true)
}
body {
param.0 = (s32[], s32[]) parameter(0)
gte = s32[] get-tuple-element(param.0), index=0
add = s32[] add(gte, gte)
ROOT tuple = (s32[], s32[]) tuple(add, add)
}
ENTRY test_module {
param.3 = s32[] parameter(0)
copy = s32[] copy(param.3)
tuple = (s32[], s32[]) tuple(copy, copy)
while = (s32[], s32[]) while(tuple), condition=cond, body=body
gte = s32[] get-tuple-element(while), index=0
ROOT negate = s32[] negate(gte)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
auto assignment = RunBufferAssignmentWithSequentialOrdering(module.get());
const BufferAllocation& buffer =
GetTopLevelAllocation(*assignment, FindInstruction(module.get(), "copy"));
const std::vector<const HloValue*>& peak_buffers =
buffer.PeakMemoryLogicalBuffers();
int num_peak_buffers = 0;
for (const HloValue* peak_buffer : peak_buffers) {
if (peak_buffer->shape().IsArray()) {
++num_peak_buffers;
}
}
EXPECT_EQ(num_peak_buffers, 1);
}
TEST_F(BufferAssignmentTest, InPlaceBuffer) {
const char* hlo_text = R"(
HloModule Module
ENTRY main {
state = (s32[], f32[1280,1,128]{2,1,0}) parameter(0)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128]{2,1,0} broadcast(constant.1), dimensions={}
get-tuple-element.4 = f32[1280,1,128]{2,1,0} get-tuple-element(state), index=1
get-tuple-element.3 = s32[] get-tuple-element(state), index=0
constant.2 = s32[] constant(128)
add.5 = s32[] add(get-tuple-element.3, constant.2)
constant.3 = s32[] constant(0)
dynamic-update-slice.5 = f32[1280,1,128]{2,1,0} dynamic-update-slice(get-tuple-element.4, broadcast.6, constant.3, constant.3, constant.3)
dynamic-update-slice.9 = f32[1280,1,128]{2,1,0} dynamic-update-slice(dynamic-update-slice.5, broadcast.6, constant.3, constant.3, constant.3)
ROOT tuple.85 = (s32[], f32[1280,1,128]{2,1,0}) tuple(add.5, dynamic-update-slice.9)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text));
HloInstruction* parameter =
m->entry_computation()->GetInstructionWithName("get-tuple-element.4");
HloInstruction* dus1 =
m->entry_computation()->GetInstructionWithName("dynamic-update-slice.5");
HloInstruction* dus2 =
m->entry_computation()->GetInstructionWithName("dynamic-update-slice.9");
auto buffers = RunBufferAssignment(m.get());
{
const BufferAllocation& parameter_alloc =
GetTopLevelAllocation(*buffers, parameter);
const BufferAllocation& dus1_alloc = GetTopLevelAllocation(*buffers, dus1);
EXPECT_EQ(parameter_alloc, dus1_alloc);
const BufferAllocation& dus2_alloc = GetTopLevelAllocation(*buffers, dus2);
EXPECT_EQ(parameter_alloc, dus2_alloc);
}
}
TEST_F(BufferAssignmentTest, ConstantBuffersAreNotReused) {
const char* hlo_text = R"(
HloModule Module
True {
ROOT x.0.1 = f32[] parameter(0)
}
False {
x.0.0 = f32[] parameter(0)
ROOT copy.1 = f32[] copy(x.0.0)
}
ENTRY main {
pred.1.0 = pred[] parameter(0)
constant.1.1 = f32[] constant(56)
copy.2 = f32[] copy(constant.1.1)
constant.1.2 = f32[] constant(12)
ROOT conditional.1.3 = f32[] conditional(pred.1.0, copy.2, constant.1.2),
true_computation=True, false_computation=False
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text));
HloInstruction* constant_1 =
m->entry_computation()->GetInstructionWithName("constant.1.1");
HloInstruction* constant_2 =
m->entry_computation()->GetInstructionWithName("constant.1.2");
auto buffers = RunBufferAssignment(m.get());
{
const BufferAllocation& allocation_for_const_1 =
GetTopLevelAllocation(*buffers, constant_1);
EXPECT_TRUE(allocation_for_const_1.is_constant());
for (const auto& buffer_offset_pair :
allocation_for_const_1.assigned_buffers()) {
EXPECT_NE(buffer_offset_pair.first->instruction()->opcode(),
HloOpcode::kCopy);
EXPECT_NE(buffer_offset_pair.first->instruction()->opcode(),
HloOpcode::kConditional);
}
}
{
const BufferAllocation& allocation_for_const_2 =
GetTopLevelAllocation(*buffers, constant_2);
EXPECT_TRUE(allocation_for_const_2.is_constant());
for (const auto& buffer_offset_pair :
allocation_for_const_2.assigned_buffers()) {
EXPECT_NE(buffer_offset_pair.first->instruction()->opcode(),
HloOpcode::kCopy);
EXPECT_NE(buffer_offset_pair.first->instruction()->opcode(),
HloOpcode::kConditional);
}
}
}
class WhileBufferAssignmentTest : public HloTestBase {
protected:
std::unique_ptr<HloComputation> BuildWhileConditionComputation(
const std::string& name) {
auto builder = HloComputation::Builder(name);
builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape_, "loop_state"));
auto zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(0)));
auto ten = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(10)));
builder.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), zero, ten, ComparisonDirection::kLt));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildWhileBodyComputation(
const std::string& name) {
auto builder = HloComputation::Builder(name);
auto loop_state = builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape_, "loop_state"));
auto input = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 0));
auto weights = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 1));
auto output = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kMultiply, input, weights));
builder.AddInstruction(
HloInstruction::CreateTuple({input, weights, output}));
return builder.Build();
}
std::unique_ptr<BufferAssignment> RunBufferAssignment(HloModule* module,
int64_t alignment = 1) {
HloSchedule schedule = ScheduleModule(module, ByteSizeOf).value();
return BufferAssigner::Run(
module, std::make_unique<SequentialHloOrdering>(schedule),
ByteSizeOf,
[alignment](LogicalBuffer::Color) { return alignment; },
true)
.value();
}
static int64_t ByteSizeOf(const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), sizeof(void*));
}
Shape data_shape_ = ShapeUtil::MakeShape(F32, {4});
Shape loop_state_shape_ =
ShapeUtil::MakeTupleShape({data_shape_, data_shape_, data_shape_});
};
static void RunCopyInsertion(HloModule* module) {
CopyInsertion copy_insertion;
EXPECT_IS_OK(copy_insertion.Run(module).status());
}
TEST_F(WhileBufferAssignmentTest, TwoForwardWhileLoops) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder("entry");
auto input0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, data_shape_, "input0"));
auto weights0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, data_shape_, "weights0"));
auto weights1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, data_shape_, "weights1"));
auto zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto output0 = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, zero, {}));
auto output1 = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, zero, {}));
auto cond0 =
module->AddEmbeddedComputation(BuildWhileConditionComputation("cond"));
auto body0 =
module->AddEmbeddedComputation(BuildWhileBodyComputation("body"));
auto tuple0 = builder.AddInstruction(
HloInstruction::CreateTuple({input0, weights0, output0}));
auto while0 = builder.AddInstruction(
HloInstruction::CreateWhile(loop_state_shape_, cond0, body0, tuple0));
auto cond1 =
module->AddEmbeddedComputation(BuildWhileConditionComputation("cond"));
auto body1 =
module->AddEmbeddedComputation(BuildWhileBodyComputation("body"));
auto input1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, while0, 2));
auto tuple1 = builder.AddInstruction(
HloInstruction::CreateTuple({input1, weights1, output1}));
auto while1 = builder.AddInstruction(
HloInstruction::CreateWhile(loop_state_shape_, cond1, body1, tuple1));
module->AddEntryComputation(builder.Build());
RunCopyInsertion(module.get());
auto assignment = RunBufferAssignment(module.get());
EXPECT_EQ(assignment->GetUniqueSlice(input0, {}).value(),
assignment->GetUniqueSlice(while0, {0}).value());
EXPECT_EQ(assignment->GetUniqueSlice(weights0, {}).value(),
assignment->GetUniqueSlice(while0, {1}).value());
EXPECT_EQ(assignment->GetUniqueSlice(while0, {2}).value(),
assignment->GetUniqueSlice(while1, {0}).value());
EXPECT_EQ(assignment->GetUniqueSlice(weights1, {}).value(),
assignment->GetUniqueSlice(while1, {1}).value());
}
TEST_F(WhileBufferAssignmentTest, ColocatedBufferWithEntryParameter) {
const Shape r0s32 = ShapeUtil::MakeShape(S32, {});
const char* module_str = R"(
HloModule test_module
%cond.v0 {
%param = s32[] parameter(0)
ROOT %constant = pred[] constant(true)
}
%cond.v1 {
%param.0 = s32[] parameter(0)
ROOT %constant.0 = pred[] constant(true)
}
%body.v0 {
ROOT %param.1 = s32[] parameter(0)
}
%body.v1 {
%param.2 = s32[] parameter(0)
ROOT add = s32[] add(%param.2, %param.2)
}
ENTRY %test_module {
%param.3 = s32[] parameter(0)
%while.0 = s32[] while(%param.3), condition=%cond.v0, body=%body.v0
%mul = s32[] multiply(%while.0, %while.0)
%while.1 = s32[] while(%mul), condition=%cond.v1, body=%body.v1
ROOT %bcast = s32[1024,1024]{1,0} broadcast(s32[] %while.1), dimensions={}
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
int64_t instruction_count = m->instruction_count();
CopyInsertion copy_insertion;
ASSERT_IS_OK(copy_insertion.Run(m.get()).status());
ASSERT_EQ(instruction_count, m->instruction_count());
const HloInstruction* bcast = m->entry_computation()->root_instruction();
const HloInstruction* param =
m->entry_computation()->parameter_instruction(0);
ASSERT_EQ(bcast->opcode(), HloOpcode::kBroadcast);
const HloInstruction* while1 = bcast->operand(0);
ASSERT_EQ(while1->opcode(), HloOpcode::kWhile);
const HloInstruction* while0 = while1->operand(0)->operand(0);
ASSERT_EQ(while0->opcode(), HloOpcode::kWhile);
auto assignment = RunBufferAssignment(m.get());
TF_ASSERT_OK_AND_ASSIGN(auto slice_param,
assignment->GetUniqueSlice(param, {}));
TF_ASSERT_OK_AND_ASSIGN(auto slice_while0,
assignment->GetUniqueSlice(while0, {}));
TF_ASSERT_OK_AND_ASSIGN(auto slice_while1,
assignment->GetUniqueSlice(while1, {}));
EXPECT_EQ(slice_param, slice_while0);
EXPECT_NE(slice_param, slice_while1);
}
TEST_F(WhileBufferAssignmentTest, ColocatedBufferWithConstant) {
const Shape r0s32 = ShapeUtil::MakeShape(S32, {});
const char* module_str = R"(
HloModule test_module
%cond.v0 {
%param = s32[] parameter(0)
ROOT %constant = pred[] constant(true)
}
%cond.v1 {
%param.0 = s32[] parameter(0)
ROOT %constant.0 = pred[] constant(true)
}
%body.v0 {
ROOT %param.1 = s32[] parameter(0)
}
%body.v1 {
%param.2 = s32[] parameter(0)
ROOT add = s32[] add(%param.2, %param.2)
}
ENTRY %test_module {
%constant.42 = s32[] constant(42)
%while.0 = s32[] while(%constant.42), condition=%cond.v0, body=%body.v0
%mul = s32[] multiply(%while.0, %while.0)
%while.1 = s32[] while(%mul), condition=%cond.v1, body=%body.v1
ROOT %bcast = s32[1024,1024]{1,0} broadcast(s32[] %while.1), dimensions={}
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
int64_t instruction_count = m->instruction_count();
CopyInsertion copy_insertion;
ASSERT_IS_OK(copy_insertion.Run(m.get()).status());
ASSERT_EQ(instruction_count, m->instruction_count());
const HloInstruction* bcast = m->entry_computation()->root_instruction();
const HloInstruction* constant =
m->entry_computation()->GetInstructionWithName("constant.42");
ASSERT_EQ(bcast->opcode(), HloOpcode::kBroadcast);
const HloInstruction* while1 = bcast->operand(0);
ASSERT_EQ(while1->opcode(), HloOpcode::kWhile);
const HloInstruction* while0 = while1->operand(0)->operand(0);
ASSERT_EQ(while0->opcode(), HloOpcode::kWhile);
auto assignment = RunBufferAssignment(m.get());
TF_ASSERT_OK_AND_ASSIGN(auto slice_constant,
assignment->GetUniqueSlice(constant, {}));
TF_ASSERT_OK_AND_ASSIGN(auto slice_while0,
assignment->GetUniqueSlice(while0, {}));
TF_ASSERT_OK_AND_ASSIGN(auto slice_while1,
assignment->GetUniqueSlice(while1, {}));
EXPECT_EQ(slice_constant, slice_while0);
EXPECT_NE(slice_constant, slice_while1);
}
TEST_F(WhileBufferAssignmentTest, ColocatedBuffers) {
const Shape r0s32 = ShapeUtil::MakeShape(S32, {});
auto build_cond = [&]() {
auto builder = HloComputation::Builder("cond");
auto const4 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(4)));
auto param =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0s32, "x"));
builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), param,
const4, ComparisonDirection::kLt));
return builder.Build();
};
auto build_body = [&]() {
auto builder = HloComputation::Builder("body");
auto const9 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(9)));
auto param =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0s32, "x"));
builder.AddInstruction(
HloInstruction::CreateBinary(r0s32, HloOpcode::kAdd, param, const9));
return builder.Build();
};
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder("entry");
auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto infeed =
builder.AddInstruction(HloInstruction::CreateInfeed(r0s32, token, ""));
auto infeed_data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(r0s32, infeed, 0));
auto cond0 = module->AddEmbeddedComputation(build_cond());
auto body0 = module->AddEmbeddedComputation(build_body());
auto while0 = builder.AddInstruction(
HloInstruction::CreateWhile(r0s32, cond0, body0, infeed_data));
auto cond1 = module->AddEmbeddedComputation(build_cond());
auto body1 = module->AddEmbeddedComputation(build_body());
auto while1 = builder.AddInstruction(
HloInstruction::CreateWhile(r0s32, cond1, body1, while0));
auto zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0s32, HloOpcode::kAdd, zero, zero));
auto cond2 = module->AddEmbeddedComputation(build_cond());
auto body2 = module->AddEmbeddedComputation(build_body());
auto while2 = builder.AddInstruction(
HloInstruction::CreateWhile(r0s32, cond2, body2, add));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({while2, while1}));
module->AddEntryComputation(builder.Build());
int64_t instruction_count = module->instruction_count();
CopyInsertion copy_insertion;
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
ASSERT_EQ(instruction_count, module->instruction_count());
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(),
sizeof(void*));
}));
schedule.set_sequence(
module->entry_computation(),
{token, infeed, infeed_data, while0, while1, zero, add, while2, tuple});
TF_ASSERT_OK(schedule.Verify());
TF_ASSERT_OK_AND_ASSIGN(
auto assignment,
BufferAssigner::Run(
module.get(), std::make_unique<SequentialHloOrdering>(schedule),
backend().compiler()->BufferSizeBytesFunction(),
[](LogicalBuffer::Color) { return 1; },
true));
TF_ASSERT_OK_AND_ASSIGN(auto slice0, assignment->GetUniqueSlice(tuple, {0}));
TF_ASSERT_OK_AND_ASSIGN(auto slice1, assignment->GetUniqueSlice(tuple, {1}));
EXPECT_NE(slice0, slice1);
TF_ASSERT_OK_AND_ASSIGN(auto slice_while0,
assignment->GetUniqueSlice(while0, {}));
TF_ASSERT_OK_AND_ASSIGN(auto slice_while1,
assignment->GetUniqueSlice(while1, {}));
EXPECT_EQ(slice1, slice_while0);
EXPECT_EQ(slice1, slice_while1);
TF_ASSERT_OK_AND_ASSIGN(auto slice_while2,
assignment->GetUniqueSlice(while2, {}));
EXPECT_EQ(slice0, slice_while2);
}
TEST_F(WhileBufferAssignmentTest, OneForwardBackwardWhileLoopSet) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder("entry");
auto input0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, data_shape_, "input0"));
auto weights0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, data_shape_, "weights0"));
auto zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto output0 = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, zero, {}));
auto cond0 =
module->AddEmbeddedComputation(BuildWhileConditionComputation("cond"));
auto body0 =
module->AddEmbeddedComputation(BuildWhileBodyComputation("body"));
auto tuple0 = builder.AddInstruction(
HloInstruction::CreateTuple({input0, weights0, output0}));
auto while0 = builder.AddInstruction(
HloInstruction::CreateWhile(loop_state_shape_, cond0, body0, tuple0));
auto cond1 =
module->AddEmbeddedComputation(BuildWhileConditionComputation("cond"));
auto body1 =
module->AddEmbeddedComputation(BuildWhileBodyComputation("body"));
auto while1 = builder.AddInstruction(
HloInstruction::CreateWhile(loop_state_shape_, cond1, body1, while0));
module->AddEntryComputation(builder.Build());
RunCopyInsertion(module.get());
auto assignment = RunBufferAssignment(module.get());
EXPECT_EQ(assignment->GetUniqueSlice(while0, {0}).value(),
assignment->GetUniqueSlice(while1, {0}).value());
EXPECT_EQ(assignment->GetUniqueSlice(while0, {1}).value(),
assignment->GetUniqueSlice(while1, {1}).value());
EXPECT_EQ(assignment->GetUniqueSlice(while0, {2}).value(),
assignment->GetUniqueSlice(while1, {2}).value());
}
TEST_F(BufferAssignmentTest, TwoCalls) {
auto module = CreateNewVerifiedModule();
Shape r0f32 = ShapeUtil::MakeShape(xla::F32, {});
HloComputation* sub_computation;
{
auto builder = HloComputation::Builder(TestName() + "_sub_comp");
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "param"));
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, param, constant1));
sub_computation = module->AddEmbeddedComputation(builder.Build(add));
}
auto builder = HloComputation::Builder(TestName());
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto constant3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0)));
auto call1 = builder.AddInstruction(
HloInstruction::CreateCall(r0f32, {constant2}, sub_computation));
auto call2 = builder.AddInstruction(
HloInstruction::CreateCall(r0f32, {constant3}, sub_computation));
auto add1 = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, call1, constant2));
auto add2 = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, call2, add1));
module->AddEntryComputation(builder.Build(add2));
{
FlattenCallGraph flatten;
TF_ASSERT_OK_AND_ASSIGN(bool result, flatten.Run(module.get()));
EXPECT_TRUE(result);
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
}
RunCopyInsertion(module.get());
auto assignment = RunBufferAssignment(module.get());
EXPECT_TRUE(BuffersDistinct({call1}, {call2}, *assignment));
}
TEST_F(BufferAssignmentTest, CallParamCoAllocation) {
const char* hlo_text = R"(
HloModule CallParamCoAllocation
Callee {
param0 = (f32[100],(f32[200],f32[300])) parameter(0)
param1 = s32[20] parameter(1)
ROOT constant = f32[] constant(1)
}
ENTRY Main {
entry_param0 = f32[100] parameter(0)
entry_param1 = s32[20] parameter(1)
custom_call = (f32[200],f32[300]) custom-call(), custom_call_target="call-target"
call_op0 = (f32[100],(f32[200],f32[300])) tuple(entry_param0, custom_call)
ROOT call_result = f32[] call(call_op0, entry_param1), to_apply=Callee
}
)";
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsFromFlags());
TF_ASSERT_OK_AND_ASSIGN(auto m,
ParseAndReturnVerifiedModule(hlo_text, config));
auto buffers = RunBufferAssignment(m.get());
HloComputation* main = m->entry_computation();
HloComputation* callee = m->GetComputationWithName("Callee");
EXPECT_NE(callee, nullptr);
HloInstruction* param0 = callee->parameter_instruction(0);
HloInstruction* param1 = callee->parameter_instruction(1);
HloInstruction* entry_param0 = main->parameter_instruction(0);
HloInstruction* entry_param1 = main->parameter_instruction(1);
HloInstruction* custom_call = main->GetInstructionWithName("custom_call");
EXPECT_EQ(GetAllocation(*buffers, entry_param0, {}),
GetAllocation(*buffers, param0, {0}));
EXPECT_EQ(GetAllocation(*buffers, entry_param1, {}),
GetAllocation(*buffers, param1, {}));
EXPECT_EQ(GetAllocation(*buffers, custom_call, {}),
GetAllocation(*buffers, param0, {1}));
EXPECT_EQ(GetAllocation(*buffers, custom_call, {0}),
GetAllocation(*buffers, param0, {1, 0}));
EXPECT_EQ(GetAllocation(*buffers, custom_call, {1}),
GetAllocation(*buffers, param0, {1, 1}));
}
TEST_F(BufferAssignmentTest, AsyncCall) {
const char* hlo_text = R"(
HloModule AsyncCall, is_scheduled=true
%called_computation (param_0: f32[4096], param_1: f32[4096]) -> f32[4096] {
%param_0 = f32[4096]{0} parameter(0)
%param_1 = f32[4096]{0} parameter(1)
%negate_0 = f32[4096]{0} negate(f32[4096]{0} %param_0)
%negate_1 = f32[4096]{0} negate(f32[4096]{0} %param_1)
%negate_2 = f32[4096]{0} negate(f32[4096]{0} %negate_1)
%negate_3 = f32[4096]{0} negate(f32[4096]{0} %negate_2)
ROOT %result.1 = f32[4096]{0} add(f32[4096]{0} %negate_0, f32[4096]{0} %negate_3)
}
ENTRY %main (a: f32[4096], b: f32[4096]) -> f32[4096] {
%a = f32[4096]{0} parameter(0)
%b = f32[4096]{0} parameter(1)
%async-start = ((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) call-start(f32[4096]{0} %a, f32[4096]{0} %b), to_apply=%called_computation
%negate_4 = f32[4096]{0} negate(f32[4096]{0} %a)
%negate_5 = f32[4096]{0} negate(f32[4096]{0} %b)
%negate_6 = f32[4096]{0} negate(f32[4096]{0} %negate_5)
%negate_7 = f32[4096]{0} negate(f32[4096]{0} %negate_6)
%add_0 = f32[4096]{0} add(f32[4096]{0} %negate_4, f32[4096]{0} %negate_7)
%async-done = f32[4096]{0} call-done(((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) %async-start)
ROOT %add_1 = f32[4096]{0} add(f32[4096]{0} %add_0, f32[4096]{0} %async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text));
auto buffers = RunBufferAssignmentWithSequentialOrdering(m.get());
LOG(INFO) << buffers->ToString();
auto get_slice = [&](std::string_view hlo_name, const ShapeIndex& index) {
return buffers->GetUniqueSlice(FindInstruction(m.get(), hlo_name), index)
.value();
};
EXPECT_EQ(get_slice("param_0", {}), get_slice("a", {}));
EXPECT_EQ(get_slice("param_1", {}), get_slice("b", {}));
EXPECT_EQ(get_slice("result.1", {}), get_slice("async-done", {}));
for (const auto& hlo_name :
{"negate_0", "negate_1", "negate_2", "negate_3"}) {
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_4", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_5", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_6", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_7", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("add_0", {}));
}
}
TEST_F(BufferAssignmentTest, AsyncCallPrivateStack) {
const char* hlo_text = R"(
HloModule AsyncCall, is_scheduled=true
%called_computation (param_0: f32[4096], param_1: f32[4096]) -> f32[4096] {
%param_0 = f32[4096]{0} parameter(0)
%param_1 = f32[4096]{0} parameter(1)
%negate_0 = f32[4096]{0} negate(f32[4096]{0} %param_0)
%negate_1 = f32[4096]{0} negate(f32[4096]{0} %param_1)
%negate_2 = f32[4096]{0} negate(f32[4096]{0} %negate_1)
%negate_3 = f32[4096]{0} negate(f32[4096]{0} %negate_2)
ROOT %result.1 = f32[4096]{0} add(f32[4096]{0} %negate_0, f32[4096]{0} %negate_3)
}, execution_thread="foobar"
ENTRY %main (a: f32[4096], b: f32[4096]) -> f32[4096] {
%a = f32[4096]{0} parameter(0)
%b = f32[4096]{0} parameter(1)
%async-start = ((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) call-start(f32[4096]{0} %a, f32[4096]{0} %b), async_execution_thread="foobar", to_apply=%called_computation
%negate_4 = f32[4096]{0} negate(f32[4096]{0} %a)
%negate_5 = f32[4096]{0} negate(f32[4096]{0} %b)
%negate_6 = f32[4096]{0} negate(f32[4096]{0} %negate_5)
%negate_7 = f32[4096]{0} negate(f32[4096]{0} %negate_6)
%add_0 = f32[4096]{0} add(f32[4096]{0} %negate_4, f32[4096]{0} %negate_7)
%async-done = f32[4096]{0} call-done(((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) %async-start)
ROOT %add_1 = f32[4096]{0} add(f32[4096]{0} %add_0, f32[4096]{0} %async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text));
auto colorer = [](HloAliasAnalysis* alias_analysis, const HloOrdering&) {
for (const HloBuffer& buffer : alias_analysis->buffers()) {
int color = 1;
for (const HloValue* value : buffer.values()) {
if (absl::c_any_of(
value->positions(),
[](const HloPosition& position) {
return position.instruction->parent()->execution_thread() !=
"foobar";
}) ||
absl::c_any_of(value->GetUses(), [](const HloUse& use) {
return use.instruction->parent()->execution_thread() != "foobar";
})) {
color = 0;
}
}
for (const HloValue* value : buffer.values()) {
const HloPosition& defining_position = value->defining_position();
if (defining_position.shape().has_layout()) {
const int memory_space =
defining_position.shape().layout().memory_space();
if (memory_space != 0) {
color = memory_space;
}
}
alias_analysis->dataflow_analysis()
.GetValue(value->id())
.set_color(BufferValue::Color(color));
}
}
return absl::OkStatus();
};
BufferAssigner::PrivateStacks private_stacks;
private_stacks[1] = {FindComputation(m.get(), "called_computation")};
auto buffers = RunBufferAssignmentWithSequentialOrdering(
m.get(), 1, colorer, private_stacks);
LOG(INFO) << buffers->ToString();
auto get_slice = [&](std::string_view hlo_name, const ShapeIndex& index) {
return buffers->GetUniqueSlice(FindInstruction(m.get(), hlo_name), index)
.value();
};
EXPECT_EQ(get_slice("param_0", {}), get_slice("a", {}));
EXPECT_EQ(get_slice("param_1", {}), get_slice("b", {}));
EXPECT_EQ(get_slice("result.1", {}), get_slice("async-done", {}));
for (const auto& hlo_name :
{"negate_0", "negate_1", "negate_2", "negate_3"}) {
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_4", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_5", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_6", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_7", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("add_0", {}));
}
EXPECT_NE(get_slice("negate_0", {}), get_slice("negate_1", {}));
EXPECT_EQ(get_slice("negate_1", {}), get_slice("negate_2", {}));
EXPECT_EQ(get_slice("negate_1", {}), get_slice("negate_3", {}));
}
TEST_F(BufferAssignmentTest, MultipleAsyncCallPrivateStack) {
const char* hlo_text = R"(
HloModule AsyncCall, is_scheduled=true
%called_computation1 {
%param_0 = f32[4096]{0} parameter(0)
%param_1 = f32[4096]{0} parameter(1)
%negate_0 = f32[4096]{0} negate(f32[4096]{0} %param_0)
%negate_1 = f32[4096]{0} negate(f32[4096]{0} %param_1)
%negate_2 = f32[4096]{0} negate(f32[4096]{0} %negate_1)
%negate_3 = f32[4096]{0} negate(f32[4096]{0} %negate_2)
ROOT %result.1 = f32[4096]{0} add(f32[4096]{0} %negate_0, f32[4096]{0} %negate_3)
}, execution_thread="foobar"
%called_computation2 {
%param_2 = f32[4096]{0} parameter(0)
%param_3 = f32[4096]{0} parameter(1)
%negate_4 = f32[4096]{0} negate(f32[4096]{0} %param_2)
%negate_5 = f32[4096]{0} negate(f32[4096]{0} %param_3)
ROOT %result.2 = f32[4096]{0} add(f32[4096]{0} %negate_4, f32[4096]{0} %negate_5)
}, execution_thread="foobar"
ENTRY %main (a: f32[4096], b: f32[4096]) -> f32[4096] {
%a = f32[4096]{0} parameter(0)
%b = f32[4096]{0} parameter(1)
%async-start.1 = ((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) call-start(f32[4096]{0} %a, f32[4096]{0} %b), async_execution_thread="foobar", to_apply=%called_computation1
%async-start.2 = ((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) call-start(f32[4096]{0} %b, f32[4096]{0} %a), async_execution_thread="foobar", to_apply=%called_computation2
%negate_6 = f32[4096]{0} negate(f32[4096]{0} %a)
%negate_7 = f32[4096]{0} negate(f32[4096]{0} %b)
%negate_8 = f32[4096]{0} negate(f32[4096]{0} %negate_7)
%negate_9 = f32[4096]{0} negate(f32[4096]{0} %negate_8)
%add_0 = f32[4096]{0} add(f32[4096]{0} %negate_6, f32[4096]{0} %negate_9)
%async-done.1 = f32[4096]{0} call-done(((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) %async-start.1)
%async-done.2 = f32[4096]{0} call-done(((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) %async-start.2)
%add_1 = f32[4096]{0} add(f32[4096]{0} %add_0, f32[4096]{0} %async-done.1)
ROOT %add_2 = f32[4096]{0} add(f32[4096]{0} %add_1, f32[4096]{0} %async-done.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text));
auto colorer = [](HloAliasAnalysis* alias_analysis, const HloOrdering&) {
for (const HloBuffer& buffer : alias_analysis->buffers()) {
int color = 1;
for (const HloValue* value : buffer.values()) {
if (absl::c_any_of(
value->positions(),
[](const HloPosition& position) {
return position.instruction->parent()->execution_thread() !=
"foobar";
}) ||
absl::c_any_of(value->GetUses(), [](const HloUse& use) {
return use.instruction->parent()->execution_thread() != "foobar";
})) {
color = 0;
}
}
for (const HloValue* value : buffer.values()) {
const HloPosition& defining_position = value->defining_position();
if (defining_position.shape().has_layout()) {
const int memory_space =
defining_position.shape().layout().memory_space();
if (memory_space != 0) {
color = memory_space;
}
}
alias_analysis->dataflow_analysis()
.GetValue(value->id())
.set_color(BufferValue::Color(color));
}
}
return absl::OkStatus();
};
BufferAssigner::PrivateStacks private_stacks;
private_stacks[1] = {FindComputation(m.get(), "called_computation1"),
FindComputation(m.get(), "called_computation2")};
auto buffers = RunBufferAssignmentWithSequentialOrdering(
m.get(), 1, colorer, private_stacks);
LOG(INFO) << buffers->ToString();
auto get_slice = [&](std::string_view hlo_name, const ShapeIndex& index) {
return buffers->GetUniqueSlice(FindInstruction(m.get(), hlo_name), index)
.value();
};
EXPECT_EQ(get_slice("param_0", {}), get_slice("a", {}));
EXPECT_EQ(get_slice("param_3", {}), get_slice("a", {}));
EXPECT_EQ(get_slice("param_1", {}), get_slice("b", {}));
EXPECT_EQ(get_slice("param_2", {}), get_slice("b", {}));
EXPECT_EQ(get_slice("result.1", {}), get_slice("async-done.1", {}));
EXPECT_EQ(get_slice("result.2", {}), get_slice("async-done.2", {}));
for (const auto& hlo_name : {"negate_0", "negate_1", "negate_2", "negate_3",
"negate_4", "negate_5"}) {
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_6", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_7", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_8", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_9", {}));
EXPECT_NE(get_slice(hlo_name, {}), get_slice("add_0", {}));
}
EXPECT_NE(get_slice("negate_0", {}), get_slice("negate_1", {}));
EXPECT_EQ(get_slice("negate_1", {}), get_slice("negate_2", {}));
EXPECT_EQ(get_slice("negate_1", {}), get_slice("negate_3", {}));
EXPECT_TRUE(get_slice("negate_4", {}) == get_slice("negate_0", {}) ||
get_slice("negate_4", {}) == get_slice("negate_1", {}));
EXPECT_TRUE(get_slice("negate_5", {}) == get_slice("negate_0", {}) ||
get_slice("negate_5", {}) == get_slice("negate_1", {}));
}
TEST_F(BufferAssignmentTest, AsyncCallImplicitSharding) {
std::string hlo_string = R"(
HloModule module, is_scheduled=true
called_computation {
param0 = f32[4] parameter(0)
constant = f32[1] constant(1)
dynamic-update-slice = f32[4] dynamic-update-slice(param0, constant, constant)
ROOT negate = f32[4] negate(dynamic-update-slice)
}
ENTRY entry {
p0 = f32[8] parameter(0)
call-start = ((f32[8]), f32[8], s32[]) call-start(p0), async_execution_thread="foo", to_apply=called_computation
ROOT call-done = f32[8] call-done(call-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto buffers = RunBufferAssignmentWithSequentialOrdering(module.get());
LOG(INFO) << buffers->ToString();
auto get_slice = [&](std::string_view hlo_name, const ShapeIndex& index) {
return buffers
->GetUniqueSlice(FindInstruction(module.get(), hlo_name), index)
.value();
};
EXPECT_EQ(get_slice("p0", {}).size(), 32);
EXPECT_EQ(get_slice("dynamic-update-slice", {}).size(), 32);
}
TEST_F(BufferAssignmentTest, AsyncCustomCall) {
const char* hlo_text = R"(
HloModule AsyncCustomCall, is_scheduled=true
ENTRY %main (a: f32[4096]) -> f32[4096] {
%a = f32[4096]{0} parameter(0)
%neg_0 = f32[4096]{0} negate(f32[4096]{0} %a)
%async-start = ((f32[4096]{0}), f32[4096]{0}, u32[])
custom-call-start(f32[4096]{0} %neg_0),
custom_call_target="Foo"
%async-done = f32[4096]{0} custom-call-done(((f32[4096]{0}), f32[4096]{0}, u32[]) %async-start)
ROOT %neg_1 = f32[4096]{0} negate(f32[4096]{0} %async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text));
auto buffers = RunBufferAssignmentWithSequentialOrdering(m.get());
HloInstruction* neg_0 = FindInstruction(m.get(), "neg_0");
HloInstruction* async_done = FindInstruction(m.get(), "async-done");
EXPECT_FALSE(buffers->SharesTopLevelSlice(neg_0, async_done));
}
TEST_F(BufferAssignmentTest, AsyncCustomCallWithAliasing) {
const char* hlo_text = R"(
HloModule AsyncCustomCall, is_scheduled=true
ENTRY %main (a: f32[4096]) -> f32[4096] {
%a = f32[4096]{0} parameter(0)
%neg_0 = f32[4096]{0} negate(f32[4096]{0} %a)
%async-start = ((f32[4096]{0}), f32[4096]{0}, u32[])
custom-call-start(f32[4096]{0} %neg_0),
custom_call_target="Foo",
output_to_operand_aliasing={{}: (0, {})}
%async-done = f32[4096]{0} custom-call-done(((f32[4096]{0}), f32[4096]{0}, u32[]) %async-start)
ROOT %neg_1 = f32[4096]{0} negate(f32[4096]{0} %async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text));
auto buffers = RunBufferAssignmentWithSequentialOrdering(m.get());
HloInstruction* neg_0 = FindInstruction(m.get(), "neg_0");
HloInstruction* async_done = FindInstruction(m.get(), "async-done");
EXPECT_TRUE(buffers->SharesTopLevelSlice(neg_0, async_done));
}
TEST_F(BufferAssignmentTest, BufferIsolation) {
absl::string_view module_str = R"(
HloModule test_module, is_scheduled=true
ENTRY %test_module {
param.0 = s32[1024]{0} parameter(0)
param.1 = s32[1024]{0} parameter(1)
mul1 = s32[1024]{0} multiply(param.0, param.1)
bcast1 = s32[4,1024]{1,0} broadcast(mul1), dimensions={1}
bcast2 = s32[4,1024]{1,0} broadcast(param.0), dimensions={1}
mul2 = s32[1024]{0} multiply(mul1, param.0)
add1 = s32[1024]{0} add(mul1, mul2)
sub2 = s32[1024]{0} subtract(mul1, mul2)
mul3 = s32[1024]{0} multiply(mul2, add1)
mul4 = s32[1024]{0} multiply(mul3, sub2)
bcast3 = s32[4,1024]{1,0} broadcast(mul4), dimensions={1}
add2 = s32[4,1024]{1,0} add(bcast3, bcast2)
ROOT add3 = s32[4,1024]{1,0} add(add2, bcast1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
std::unique_ptr<BufferAssignment> nonisolation_assignment =
RunBufferAssignmentWithIsolationOptions(m.get());
auto nonisolation_allocation =
absl::c_find_if(nonisolation_assignment->Allocations(),
[](const BufferAllocation& allocation) {
return allocation.IsPreallocatedTempBuffer();
});
ASSERT_NE(nonisolation_allocation,
nonisolation_assignment->Allocations().end());
LOG(INFO) << "Non-isolation buffers";
for (const auto& [value, offset_size] :
nonisolation_allocation->assigned_buffers()) {
LOG(INFO) << value->ToShortString() << ": off: " << offset_size.offset
<< ", size: " << offset_size.size;
}
BufferAssignment::BufferIsolationOptions isolation_options;
isolation_options.hlo_value_compare =
[](const HloValue* a, const HloValue* b) { return a->id() < b->id(); };
isolation_options.config.add_isolation_colors(0);
isolation_options.config.set_isolation_order_salt(10);
isolation_options.config.set_isolation_fuel(5);
isolation_options.config.set_isolation_padding_bytes(1024);
isolation_options.config.set_base_offset_bytes(12288);
std::unique_ptr<BufferAssignment> isolation_assignment =
RunBufferAssignmentWithIsolationOptions(m.get(), isolation_options);
auto isolation_allocation =
absl::c_find_if(isolation_assignment->Allocations(),
[](const BufferAllocation& allocation) {
return allocation.IsPreallocatedTempBuffer();
});
ASSERT_NE(isolation_allocation, isolation_assignment->Allocations().end());
std::vector<const HloValue*> ordered_values;
for (const auto& [value, _] : isolation_allocation->assigned_buffers()) {
ordered_values.push_back(value);
}
absl::c_sort(ordered_values, isolation_options.hlo_value_compare);
int i;
int64_t expected_offset = nonisolation_allocation->size() +
isolation_options.config.base_offset_bytes() +
isolation_options.config.isolation_padding_bytes();
ASSERT_GT(ordered_values.size(), isolation_options.config.isolation_fuel());
LOG(INFO) << "Isolation buffers";
for (i = 0; i < isolation_options.config.isolation_fuel(); ++i) {
const HloValue* value = ordered_values[i];
auto offset_size = isolation_allocation->assigned_buffers().at(value);
LOG(INFO) << value->ToShortString() << ": off: " << offset_size.offset
<< ", size: " << offset_size.size;
EXPECT_EQ(offset_size.offset, expected_offset);
expected_offset +=
offset_size.size + isolation_options.config.isolation_padding_bytes();
}
for (; i < ordered_values.size(); ++i) {
const HloValue* value = ordered_values[i];
auto offset_size = isolation_allocation->assigned_buffers().at(value);
auto nonisolation_offset_size = absl::c_find_if(
nonisolation_allocation->assigned_buffers(), [&](const auto& pair) {
return pair.first->defining_position() == value->defining_position();
});
ASSERT_NE(nonisolation_offset_size,
nonisolation_allocation->assigned_buffers().end());
LOG(INFO) << value->ToShortString() << ": off: " << offset_size.offset
<< ", size: " << offset_size.size;
EXPECT_EQ(offset_size.offset,
nonisolation_offset_size->second.offset +
isolation_options.config.base_offset_bytes());
}
}
TEST_F(BufferAssignmentTest, BufferInfoStringTest) {
absl::string_view module_str = R"(
HloModule test_module
ENTRY %test_module {
%param.0 = s32[1024]{0} parameter(0)
%param.1 = s32[1024]{0} parameter(1)
%mul = s32[1024]{0} multiply(%param.0, %param.1)
%add = s32[1024]{0} add(%mul, %param.0)
ROOT %bcast = s32[1024,1024]{1,0} broadcast(s32[1024] %add), dimensions={0}
})";
absl::string_view reference_str =
R"(buffer_id,buffer_name,offset,size,definition_time,end_time,num_uses,use_times,use_names
0,"<0 param.0 @0>",0,4096,0,5,2,"2;3","mul, operand 0;add, operand 1"
1,"<1 param.1 @0>",0,4096,1,5,1,"2","mul, operand 1"
2,"<2 mul @0>",0,4096,2,3,1,"3","add, operand 0"
3,"<3 add @0>",0,4096,3,4,1,"4","bcast, operand 0"
4,"<4 bcast @0>",0,4194304,4,5,0,"",""
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
HloInstruction* const param0 = FindInstruction(m.get(), "param.0");
HloInstruction* const param1 = FindInstruction(m.get(), "param.1");
HloInstruction* const mul = FindInstruction(m.get(), "mul");
HloInstruction* const add = FindInstruction(m.get(), "add");
HloInstruction* const bcast = FindInstruction(m.get(), "bcast");
auto assignment = RunBufferAssignmentWithInstructionSequence(
m.get(), {param0, param1, mul, add, bcast});
const std::string buffer_info_str = assignment->BufferInfoString();
EXPECT_EQ(buffer_info_str, reference_str);
}
TEST_F(WhileBufferAssignmentTest, WhileLoopsInterferingResultRange) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto input0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, data_shape_, "input0"));
auto weights0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, data_shape_, "weights0"));
auto output0 = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, zero, {}));
auto input1 = builder.AddInstruction(
HloInstruction::CreateParameter(2, data_shape_, "input1"));
auto weights1 = builder.AddInstruction(
HloInstruction::CreateParameter(3, data_shape_, "weights1"));
auto output1 = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, one, {}));
auto cond =
module->AddEmbeddedComputation(BuildWhileConditionComputation("cond"));
auto body = module->AddEmbeddedComputation(BuildWhileBodyComputation("body"));
auto tuple0 = builder.AddInstruction(
HloInstruction::CreateTuple({input0, weights0, output0}));
auto tuple1 = builder.AddInstruction(
HloInstruction::CreateTuple({input1, weights1, output1}));
auto while0 = builder.AddInstruction(
HloInstruction::CreateWhile(loop_state_shape_, cond, body, tuple0));
auto while1 = builder.AddInstruction(
HloInstruction::CreateWhile(loop_state_shape_, cond, body, tuple1));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, while0, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, while1, 1));
auto root_add = builder.AddInstruction(
HloInstruction::CreateBinary(data_shape_, HloOpcode::kAdd, gte0, gte1));
module->AddEntryComputation(builder.Build());
{
FlattenCallGraph flatten;
TF_ASSERT_OK_AND_ASSIGN(bool result, flatten.Run(module.get()));
EXPECT_TRUE(result);
}
RunCopyInsertion(module.get());
HloSchedule schedule = ScheduleModule(module.get(), ByteSizeOf).value();
schedule.set_sequence(
module->entry_computation(),
{input1, weights1, one, output1, while1->mutable_operand(0), while1,
input0, weights0, zero, output0, while0->mutable_operand(0), while0,
gte0, gte1, root_add});
TF_ASSERT_OK(schedule.Verify());
auto assignment =
BufferAssigner::Run(
module.get(), std::make_unique<SequentialHloOrdering>(schedule),
ByteSizeOf, [](LogicalBuffer::Color) { return 1; },
true)
.value();
EXPECT_TRUE(BuffersDistinct({while0}, {while1}, *assignment));
}
TEST_F(WhileBufferAssignmentTest, WhilesDontShareEntryParamIfLiveOut) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder("entry");
auto input0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, data_shape_, "input0"));
auto weights0 = builder.AddInstruction(
HloInstruction::CreateParameter(1, data_shape_, "weights0"));
auto zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto output0 = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, zero, {}));
auto output1 = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, zero, {}));
auto cond0 =
module->AddEmbeddedComputation(BuildWhileConditionComputation("cond"));
auto body0 =
module->AddEmbeddedComputation(BuildWhileBodyComputation("body"));
auto tuple0 = builder.AddInstruction(
HloInstruction::CreateTuple({input0, weights0, output0}));
auto while0 = builder.AddInstruction(
HloInstruction::CreateWhile(loop_state_shape_, cond0, body0, tuple0));
auto while0_out = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, while0, 2));
auto cond1 =
module->AddEmbeddedComputation(BuildWhileConditionComputation("cond"));
auto body1 =
module->AddEmbeddedComputation(BuildWhileBodyComputation("body"));
auto tuple1 = builder.AddInstruction(
HloInstruction::CreateTuple({while0_out, weights0, output1}));
auto while1 = builder.AddInstruction(
HloInstruction::CreateWhile(loop_state_shape_, cond1, body1, tuple1));
auto while1_out = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, while1, 2));
module->AddEntryComputation(builder.Build());
RunCopyInsertion(module.get());
auto assignment = RunBufferAssignment(module.get());
auto* root_alloc =
assignment->GetUniqueTopLevelSlice(while1_out).value().allocation();
EXPECT_TRUE(root_alloc->maybe_live_out());
EXPECT_FALSE(root_alloc->is_entry_computation_parameter());
}
TEST_F(WhileBufferAssignmentTest, WhileWithDynamicUpdateSliceShare) {
const char* const hlo_string = R"(
HloModule test
while_body {
state = (s32[], f32[1280,1,128]{2,1,0}) parameter(0)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128]{2,1,0} broadcast(constant.1), dimensions={}
get-tuple-element.4 = f32[1280,1,128]{2,1,0} get-tuple-element(state), index=1
get-tuple-element.3 = s32[] get-tuple-element(state), index=0
constant.2 = s32[] constant(128)
add.5 = s32[] add(get-tuple-element.3, constant.2)
constant.3 = s32[] constant(0)
dynamic-update-slice.5 = f32[1280,1,128]{2,1,0} dynamic-update-slice(get-tuple-element.4, broadcast.6, constant.3, constant.3, constant.3)
dynamic-update-slice.9 = f32[1280,1,128]{2,1,0} dynamic-update-slice(dynamic-update-slice.5, broadcast.6, constant.3, constant.3, constant.3)
ROOT tuple.85 = (s32[], f32[1280,1,128]{2,1,0}) tuple(add.5, dynamic-update-slice.9)
}
while_condition {
state = (s32[], f32[1280,1,128]{2,1,0}) parameter(0)
get-tuple-element = s32[] get-tuple-element(state), index=0
get-tuple-element.1 = s32[] constant(3)
ROOT less-than.339.338 = pred[] compare(get-tuple-element, get-tuple-element.1), direction=LT
}
ENTRY entry_computation {
constant.7 = s32[] constant(0)
copy.1 = s32[] copy(constant.7)
constant.6 = f32[] constant(0)
broadcast.6 = f32[1280,1,128]{2,1,0} broadcast(constant.6), dimensions={}
tuple.1 = (s32[], f32[1280,1,128]{2,1,0}) tuple(copy.1, broadcast.6)
while.0 = (s32[], f32[1280,1,128]{2,1,0}) while(tuple.1), condition=while_condition, body=while_body
ROOT get-tuple-element.2 = s32[] get-tuple-element(while.0), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
RunCopyInsertion(module.get());
auto assignment = RunBufferAssignment(module.get());
auto dus9 = FindInstruction(module.get(), "dynamic-update-slice.9");
auto dus9_alloc_slice = assignment->GetUniqueTopLevelSlice(dus9).value();
auto dus5 = FindInstruction(module.get(), "dynamic-update-slice.5");
auto dus5_alloc_slice = assignment->GetUniqueTopLevelSlice(dus5).value();
EXPECT_EQ(dus9_alloc_slice.allocation(), dus5_alloc_slice.allocation());
EXPECT_EQ(dus9_alloc_slice, dus5_alloc_slice);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/buffer_assignment.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/buffer_assignment_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
464b7a60-847b-4ac6-bf92-ce88e1c0c36b | cpp | tensorflow/tensorflow | conditional_canonicalizer | third_party/xla/xla/service/conditional_canonicalizer.cc | third_party/xla/xla/service/conditional_canonicalizer_test.cc | #include "xla/service/conditional_canonicalizer.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/status_macros.h"
namespace xla {
namespace {
absl::Status CanonicalizeNonTupleConditional(HloInstruction* conditional) {
TF_RET_CHECK(conditional->opcode() == HloOpcode::kConditional);
for (auto* branch : conditional->called_computations()) {
HloInstruction* root = branch->root_instruction();
TF_RET_CHECK(!root->shape().IsTuple());
HloInstruction* tuple =
branch->AddInstruction(HloInstruction::CreateTuple({root}));
branch->set_root_instruction(tuple, true);
}
auto parent = conditional->parent();
const Shape& root_shape = conditional->shape();
auto new_shape = ShapeUtil::MakeTupleShape(absl::MakeSpan(&root_shape, 1));
auto new_conditional =
parent->AddInstruction(conditional->CloneWithNewShape(new_shape));
auto gte = parent->AddInstruction(
HloInstruction::CreateGetTupleElement(root_shape, new_conditional, 0));
TF_RETURN_IF_ERROR(parent->ReplaceInstruction(conditional, gte));
return absl::OkStatus();
}
}
absl::StatusOr<bool> ConditionalCanonicalizer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
2, "ConditionalCanonicalizer::Run(), before:\n" + module->ToString());
bool changed = false;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto* inst : comp->MakeInstructionPostOrder()) {
if (inst->opcode() == HloOpcode::kConditional &&
!inst->shape().IsTuple()) {
TF_RETURN_IF_ERROR(CanonicalizeNonTupleConditional(inst));
changed = true;
}
}
}
XLA_VLOG_LINES(
2, "ConditionalCanonicalizer::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/conditional_canonicalizer.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_utils.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
#include "xla/util.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class ConditionalCanonicalizerTest : public HloTestBase {
protected:
ConditionalCanonicalizerTest() {}
};
TEST_F(ConditionalCanonicalizerTest, DenseArrayConditionalRewrite) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule _
true_branch {
true_param = (s32[3,2]) parameter(0)
ROOT root = s32[] constant(0)
}
false_branch {
false_param = (s32[3,2]) parameter(0)
ROOT root = s32[] constant(1)
}
ENTRY entry {
param0 = s32[3,2] parameter(0)
branch = pred[] constant(false)
param_tuple = (s32[3 ,2]) tuple(param0)
ROOT conditional = s32[] conditional(branch, param_tuple, param_tuple),
true_computation=true_branch, false_computation=false_branch
}
)")
.value();
ConditionalCanonicalizer pass;
EXPECT_TRUE(pass.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::GetTupleElement(op::Conditional()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_canonicalizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_canonicalizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
62bf0644-5132-4e87-bff3-4b0982acdd07 | cpp | tensorflow/tensorflow | infeed_token_propagation | third_party/xla/xla/service/infeed_token_propagation.cc | third_party/xla/xla/service/infeed_token_propagation_test.cc | #include "xla/service/infeed_token_propagation.h"
#include <cstdint>
#include <string_view>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsDanglingInfeed(HloInstruction* infeed) {
CHECK(infeed->opcode() == HloOpcode::kInfeed);
if (infeed->has_sharding()) {
return false;
}
if (const HloInstruction* after_all = infeed->operand(0);
after_all->opcode() != HloOpcode::kAfterAll ||
after_all->operand_count() != 0) {
return false;
}
for (const HloInstruction* user : infeed->users()) {
if (user->opcode() == HloOpcode::kGetTupleElement &&
user->tuple_index() == 1) {
return false;
}
}
return true;
}
bool IsDanglingOutfeed(HloInstruction* outfeed) {
CHECK(outfeed->opcode() == HloOpcode::kOutfeed);
if (outfeed->has_sharding()) {
return false;
}
if (const HloInstruction* after_all = outfeed->operand(1);
after_all->opcode() != HloOpcode::kAfterAll ||
after_all->operand_count() != 0) {
return false;
}
if (outfeed->user_count() != 0) {
return false;
}
return true;
}
HloInstruction* ReconstructTuple(HloInstruction* tuple) {
CHECK(tuple->shape().IsTuple());
HloComputation* computation = tuple->parent();
std::vector<HloInstruction*> gtes;
gtes.resize(tuple->shape().tuple_shapes_size());
for (int64_t idx = 0; idx < gtes.size(); ++idx) {
gtes[idx] = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(tuple, idx));
}
return computation->AddInstruction(HloInstruction::CreateTuple(gtes));
}
absl::StatusOr<HloInstruction*> InsertTokenIntoTuple(HloInstruction* tuple,
bool add_token_operand) {
CHECK(tuple->shape().IsTuple());
HloComputation* computation = tuple->parent();
std::vector<HloInstruction*> original_users = tuple->users();
HloInstruction* original_tuple = ReconstructTuple(tuple);
for (HloInstruction* original_user : original_users) {
for (int64_t idx : original_user->operand_indices(tuple)) {
TF_RETURN_IF_ERROR(
original_user->ReplaceOperandWith(idx, original_tuple));
}
}
*tuple->mutable_shape()->add_tuple_shapes() = ShapeUtil::MakeTokenShape();
if (add_token_operand) {
tuple->AppendOperand(
computation->AddInstruction(HloInstruction::CreateToken()));
}
HloInstruction* input_token_gte =
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
tuple, tuple->shape().tuple_shapes_size() - 1));
return input_token_gte;
}
}
absl::Status CanonicalizeConditionalInstruction(HloInstruction* conditional) {
CHECK_EQ(conditional->opcode(), HloOpcode::kConditional);
for (HloComputation* branch : conditional->branch_computations()) {
HloInstruction* parameter = branch->parameter_instruction(0);
if (!parameter->shape().IsTuple()) {
*parameter->mutable_shape() =
ShapeUtil::MakeTupleShape({parameter->shape()});
HloInstruction* original = branch->AddInstruction(
HloInstruction::CreateGetTupleElement(parameter, 0));
TF_RETURN_IF_ERROR(parameter->ReplaceAllUsesWithDifferentShape(original));
}
int64_t branch_operand_idx = conditional->branch_index(branch) + 1;
HloInstruction* branch_tuple =
conditional->mutable_operand(branch_operand_idx);
if (!branch_tuple->shape().IsTuple()) {
branch_tuple = conditional->parent()->AddInstruction(
HloInstruction::CreateTuple({branch_tuple}));
TF_RETURN_IF_ERROR(conditional->ReplaceOperandWithDifferentShape(
branch_operand_idx, branch_tuple));
}
if (branch_tuple->opcode() == HloOpcode::kParameter) {
branch_tuple = ReconstructTuple(branch_tuple);
TF_RETURN_IF_ERROR(
conditional->ReplaceOperandWith(branch_operand_idx, branch_tuple));
}
HloInstruction* root = branch->root_instruction();
if (root->opcode() != HloOpcode::kTuple) {
root = ReconstructTuple(root);
branch->set_root_instruction(root);
}
}
CHECK(conditional->shape().IsTuple());
if (conditional->IsRoot()) {
HloInstruction* new_root = ReconstructTuple(conditional);
conditional->parent()->set_root_instruction(new_root);
}
return absl::OkStatus();
}
absl::Status CanonicalizeWhileInstruction(HloInstruction* loop) {
CHECK_EQ(loop->opcode(), HloOpcode::kWhile);
HloComputation* body = loop->while_body();
HloComputation* cond = loop->while_condition();
HloInstruction* body_parameter = body->parameter_instruction(0);
if (!body_parameter->shape().IsTuple()) {
*body_parameter->mutable_shape() =
ShapeUtil::MakeTupleShape({body_parameter->shape()});
HloInstruction* original = body->AddInstruction(
HloInstruction::CreateGetTupleElement(body_parameter, 0));
TF_RETURN_IF_ERROR(
body_parameter->ReplaceAllUsesWithDifferentShape(original));
}
HloInstruction* root = body->root_instruction();
if (!root->shape().IsTuple()) {
root = body->AddInstruction(HloInstruction::CreateTuple({root}));
body->set_root_instruction(root, true);
}
HloInstruction* cond_parameter = cond->parameter_instruction(0);
if (!cond_parameter->shape().IsTuple()) {
*cond_parameter->mutable_shape() =
ShapeUtil::MakeTupleShape({cond_parameter->shape()});
HloInstruction* original = cond->AddInstruction(
HloInstruction::CreateGetTupleElement(cond_parameter, 0));
TF_RETURN_IF_ERROR(
cond_parameter->ReplaceAllUsesWithDifferentShape(original));
}
if (!loop->shape().IsTuple()) {
*loop->mutable_shape() = ShapeUtil::MakeTupleShape({loop->shape()});
HloInstruction* original = loop->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(loop, 0));
TF_RETURN_IF_ERROR(loop->ReplaceAllUsesWithDifferentShape(original));
}
HloInstruction* loop_tuple = loop->mutable_operand(0);
if (!loop_tuple->shape().IsTuple()) {
loop_tuple = loop->parent()->AddInstruction(
HloInstruction::CreateTuple({loop_tuple}));
TF_RETURN_IF_ERROR(loop->ReplaceOperandWithDifferentShape(0, loop_tuple));
}
if (loop_tuple->opcode() == HloOpcode::kParameter) {
loop_tuple = ReconstructTuple(loop_tuple);
TF_RETURN_IF_ERROR(loop->ReplaceOperandWith(0, loop_tuple));
}
if (root->opcode() != HloOpcode::kTuple) {
root = ReconstructTuple(root);
body->set_root_instruction(root);
}
if (loop->IsRoot()) {
HloInstruction* new_root = ReconstructTuple(loop);
loop->parent()->set_root_instruction(new_root);
}
return absl::OkStatus();
}
absl::Status InfeedTokenPropagation::PropagateTokenThroughConditionalBranch() {
HloComputation* comp = dangling_instruction_->parent();
dangling_instruction_ = call_graph_->GetComputationCallers(comp)[0];
CHECK_EQ(dangling_instruction_->opcode(), HloOpcode::kConditional);
for (HloComputation* branch : dangling_instruction_->branch_computations()) {
HloInstruction* root = branch->root_instruction();
if (branch == comp) {
TF_RETURN_IF_ERROR(
InsertTokenIntoTuple(root, false).status());
root->AppendOperand(output_token_);
} else {
TF_RETURN_IF_ERROR(
InsertTokenIntoTuple(root, true).status());
}
}
HloInstruction* parameter = comp->parameter_instruction(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * input_token_gte,
InsertTokenIntoTuple(parameter, false));
TF_RETURN_IF_ERROR(input_token_->ReplaceAllUsesWith(input_token_gte));
int64_t branch_operand_idx = dangling_instruction_->branch_index(comp) + 1;
HloInstruction* branch_tuple =
dangling_instruction_->mutable_operand(branch_operand_idx);
TF_ASSIGN_OR_RETURN(
HloInstruction * next_input_token_gte,
InsertTokenIntoTuple(branch_tuple, true));
TF_RETURN_IF_ERROR(dangling_instruction_->ReplaceOperandWithDifferentShape(
branch_operand_idx, branch_tuple));
input_token_ =
branch_tuple->mutable_operand(next_input_token_gte->tuple_index());
TF_ASSIGN_OR_RETURN(
output_token_,
InsertTokenIntoTuple(dangling_instruction_, false));
return absl::OkStatus();
}
absl::Status InfeedTokenPropagation::PropagateTokenThroughWhileBody() {
HloComputation* comp = dangling_instruction_->parent();
dangling_instruction_ = call_graph_->GetComputationCallers(comp)[0];
CHECK_EQ(dangling_instruction_->opcode(), HloOpcode::kWhile);
HloInstruction* root = comp->root_instruction();
TF_RETURN_IF_ERROR(
InsertTokenIntoTuple(root, false).status());
root->AppendOperand(output_token_);
HloInstruction* body_parameter = comp->parameter_instruction(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * input_token_gte,
InsertTokenIntoTuple(body_parameter, false));
TF_RETURN_IF_ERROR(input_token_->ReplaceAllUsesWith(input_token_gte));
HloComputation* cond = dangling_instruction_->while_condition();
HloInstruction* cond_parameter = cond->parameter_instruction(0);
TF_RETURN_IF_ERROR(
InsertTokenIntoTuple(cond_parameter, false)
.status());
HloInstruction* while_tuple = dangling_instruction_->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
input_token_,
InsertTokenIntoTuple(while_tuple, true));
TF_RETURN_IF_ERROR(
dangling_instruction_->ReplaceOperandWithDifferentShape(0, while_tuple));
TF_ASSIGN_OR_RETURN(
output_token_,
InsertTokenIntoTuple(dangling_instruction_, false));
return absl::OkStatus();
}
absl::Status InfeedTokenPropagation::PropagateToken() {
HloComputation* comp = dangling_instruction_->parent();
if (comp->IsEntryComputation()) {
return absl::OkStatus();
}
VLOG(2) << "Propagating tokens for: " << dangling_instruction_->name();
HloInstruction* caller = call_graph_->GetComputationCallers(comp)[0];
if (caller->has_sharding()) {
return absl::OkStatus();
}
if (caller->opcode() == HloOpcode::kConditional) {
TF_RETURN_IF_ERROR(CanonicalizeConditionalInstruction(caller));
TF_RETURN_IF_ERROR(PropagateTokenThroughConditionalBranch());
} else if (caller->opcode() == HloOpcode::kWhile &&
comp == caller->while_body()) {
TF_RETURN_IF_ERROR(CanonicalizeWhileInstruction(caller));
TF_RETURN_IF_ERROR(PropagateTokenThroughWhileBody());
} else {
VLOG(2) << "Unhandled computation: " << comp->name();
return absl::OkStatus();
}
return PropagateToken();
}
absl::StatusOr<bool> InfeedTokenPropagation::Run(
HloModule* module,
const absl::flat_hash_set<std::string_view>& execution_threads) {
VLOG(5) << "Before InfeedTokenPropagation:";
XLA_VLOG_LINES(5, module->ToString());
std::vector<HloInstruction*> dangling_infeeds;
std::vector<HloInstruction*> dangling_outfeeds;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
if (!computation->IsEntryComputation()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kInfeed &&
IsDanglingInfeed(instruction)) {
VLOG(1) << "Found dangling infeed: " << instruction->ToString();
dangling_infeeds.push_back(instruction);
} else if (instruction->opcode() == HloOpcode::kOutfeed &&
IsDanglingOutfeed(instruction)) {
VLOG(1) << "Found dangling outfeed: " << instruction->ToString();
dangling_outfeeds.push_back(instruction);
}
}
}
}
bool changed = !dangling_infeeds.empty() || !dangling_outfeeds.empty();
if (changed) {
call_graph_ = CallGraph::Build(module);
if (!call_graph_->IsFlattened()) {
return FailedPrecondition(
"Call graph must be flattened before infeed token propagation.");
}
}
for (HloInstruction* dangling_infeed : dangling_infeeds) {
dangling_instruction_ = dangling_infeed;
input_token_ = dangling_infeed->mutable_operand(0);
output_token_ = dangling_infeed->AddInstruction(
HloInstruction::CreateGetTupleElement(dangling_infeed, 1));
TF_RETURN_IF_ERROR(PropagateToken());
}
for (HloInstruction* dangling_outfeed : dangling_outfeeds) {
dangling_instruction_ = dangling_outfeed;
input_token_ = dangling_outfeed->mutable_operand(1);
output_token_ = dangling_outfeed;
TF_RETURN_IF_ERROR(PropagateToken());
}
if (changed) {
TF_RETURN_IF_ERROR(
TupleSimplifier().Run(module, execution_threads).status());
TF_RETURN_IF_ERROR(HloDCE().Run(module, execution_threads).status());
}
VLOG(5) << "After InfeedTokenPropagation:";
XLA_VLOG_LINES(5, module->ToString());
return changed;
}
} | #include "xla/service/infeed_token_propagation.h"
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
class InfeedTokenPropagationTest : public HloTestBase {
protected:
InfeedTokenPropagationTest() = default;
};
TEST_F(InfeedTokenPropagationTest, EntryComputationInfeed) {
constexpr std::string_view hlo = R"(
HloModule main
ENTRY main {
token.0 = after-all()
infeed.0 = (s32[], token[]) infeed(token.0)
ROOT gte.0 = get-tuple-element(infeed.0), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(InfeedTokenPropagationTest, EntryComputationOutfeed) {
constexpr std::string_view hlo = R"(
HloModule main
ENTRY main {
arg.0 = s32[] parameter(0)
tuple.0 = tuple(arg.0)
token.0 = after-all()
outfeed.0 = token[] outfeed(tuple.0, token.0), outfeed_shape=(s32[])
ROOT tuple.1 = tuple()
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(InfeedTokenPropagationTest, ConditionalInfeed) {
constexpr std::string_view hlo = R"(
HloModule main
true_comp {
arg.0 = () parameter(0)
token.0 = after-all()
infeed.0 = (s32[], token[]) infeed(token.0)
ROOT tuple.0 = tuple()
}
false_comp {
arg.0 = () parameter(0)
ROOT tuple.0 = tuple()
}
ENTRY main {
pred.0 = pred[] constant(true)
true_tuple.0 = tuple()
false_tuple.0 = tuple()
ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* cond = FindInstruction(module.get(), "cond.0");
EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());
HloInstruction* true_tuple = FindInstruction(module.get(), "true_tuple.0");
EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(true_tuple->shape().tuple_shapes()[0].IsToken());
HloInstruction* false_tuple = FindInstruction(module.get(), "false_tuple.0");
EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);
HloComputation* true_comp = FindComputation(module.get(), "true_comp");
EXPECT_THAT(true_comp->root_instruction(),
op::Tuple(op::GetTupleElement(op::Infeed(), 1)));
HloComputation* false_comp = FindComputation(module.get(), "false_comp");
EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));
}
TEST_F(InfeedTokenPropagationTest, ConditionalOutfeed) {
constexpr std::string_view hlo = R"(
HloModule main
true_comp {
arg.0 = (s32[]) parameter(0)
token.0 = after-all()
outfeed.0 = token[] outfeed(arg.0, token.0), outfeed_shape=(s32[])
ROOT tuple.0 = tuple()
}
false_comp {
arg.0 = () parameter(0)
ROOT tuple.0 = tuple()
}
ENTRY main {
arg.0 = s32[] parameter(0)
pred.0 = pred[] constant(true)
true_tuple.0 = tuple(arg.0)
false_tuple.0 = tuple()
ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* cond = FindInstruction(module.get(), "cond.0");
EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());
HloInstruction* true_tuple = FindInstruction(module.get(), "true_tuple.0");
EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(true_tuple->shape().tuple_shapes()[1].IsToken());
HloInstruction* false_tuple = FindInstruction(module.get(), "false_tuple.0");
EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);
HloComputation* true_comp = FindComputation(module.get(), "true_comp");
EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed()));
HloComputation* false_comp = FindComputation(module.get(), "false_comp");
EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));
}
TEST_F(InfeedTokenPropagationTest, ConditionalDuplicateOperand) {
constexpr std::string_view hlo = R"(
HloModule main
true_comp {
arg.0 = () parameter(0)
token.0 = after-all()
infeed.0 = (s32[], token[]) infeed(token.0)
ROOT tuple.0 = tuple()
}
false_comp {
arg.0 = () parameter(0)
ROOT tuple.0 = tuple()
}
ENTRY main {
pred.0 = pred[] constant(true)
tuple.0 = tuple()
ROOT cond.0 = () conditional(pred.0, tuple.0, tuple.0), true_computation=true_comp, false_computation=false_comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* cond = FindInstruction(module.get(), "cond.0");
EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());
const HloInstruction* true_tuple = cond->operand(1);
EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(true_tuple->shape().tuple_shapes()[0].IsToken());
const HloInstruction* false_tuple = cond->operand(2);
EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);
HloComputation* true_comp = FindComputation(module.get(), "true_comp");
EXPECT_THAT(true_comp->root_instruction(),
op::Tuple(op::GetTupleElement(op::Infeed(), 1)));
HloComputation* false_comp = FindComputation(module.get(), "false_comp");
EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));
}
TEST_F(InfeedTokenPropagationTest, NonTupleConditional) {
constexpr std::string_view hlo = R"(
HloModule main
true_comp {
arg.0 = s32[] parameter(0)
outfeed_tuple.0 = tuple(arg.0)
token.0 = after-all()
outfeed.0 = token[] outfeed(outfeed_tuple.0, token.0), outfeed_shape=(s32[])
ROOT tuple.0 = tuple()
}
false_comp {
arg.0 = () parameter(0)
ROOT tuple.0 = tuple()
}
ENTRY main {
arg.0 = s32[] parameter(0)
pred.0 = pred[] constant(true)
false_tuple.0 = tuple()
ROOT cond.0 = () conditional(pred.0, arg.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* cond = FindInstruction(module.get(), "cond.0");
EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());
HloInstruction* true_tuple = cond->mutable_operand(1);
EXPECT_TRUE(true_tuple->shape().IsTuple());
EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(true_tuple->shape().tuple_shapes()[1].IsToken());
HloInstruction* false_tuple = FindInstruction(module.get(), "false_tuple.0");
EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);
HloComputation* true_comp = FindComputation(module.get(), "true_comp");
EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed()));
HloComputation* false_comp = FindComputation(module.get(), "false_comp");
EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));
}
TEST_F(InfeedTokenPropagationTest, DisjointConditionalOutfeed) {
constexpr std::string_view hlo = R"(
HloModule main
true_comp {
ROOT arg.0 = () parameter(0)
one.0 = s32[] constant(1)
outfeed_tuple.0 = tuple(one.0)
token.0 = after-all()
outfeed.0 = token[] outfeed(outfeed_tuple.0, token.0), outfeed_shape=(s32[])
}
false_comp {
arg.0 = () parameter(0)
ROOT tuple.0 = tuple()
}
ENTRY main {
pred.0 = pred[] constant(true)
true_tuple.0 = tuple()
false_tuple.0 = tuple()
ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* cond = FindInstruction(module.get(), "cond.0");
EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());
HloInstruction* true_tuple = FindInstruction(module.get(), "true_tuple.0");
EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(true_tuple->shape().tuple_shapes()[0].IsToken());
HloInstruction* false_tuple = FindInstruction(module.get(), "false_tuple.0");
EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);
HloComputation* true_comp = FindComputation(module.get(), "true_comp");
EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed()));
HloComputation* false_comp = FindComputation(module.get(), "false_comp");
EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));
}
TEST_F(InfeedTokenPropagationTest, WhileInfeed) {
constexpr std::string_view hlo = R"(
HloModule main
comp {
arg.0 = () parameter(0)
token.0 = after-all()
infeed.0 = (s32[], token[]) infeed(token.0)
ROOT tuple.0 = tuple()
}
cond {
arg.0 = () parameter(0)
ROOT true.0 = pred[] constant(true)
}
ENTRY main {
while_tuple.0 = tuple()
ROOT while.0 = () while(while_tuple.0), condition=cond, body=comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* loop = FindInstruction(module.get(), "while.0");
EXPECT_EQ(loop->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(loop->shape().tuple_shapes()[0].IsToken());
HloInstruction* loop_tuple = FindInstruction(module.get(), "while_tuple.0");
EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[0].IsToken());
HloComputation* body_comp = FindComputation(module.get(), "comp");
EXPECT_THAT(body_comp->root_instruction(),
op::Tuple(op::GetTupleElement(op::Infeed(), 1)));
HloInstruction* body_param = body_comp->parameter_instruction(0);
EXPECT_EQ(body_param->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(body_param->shape().tuple_shapes()[0].IsToken());
HloComputation* cond_comp = FindComputation(module.get(), "cond");
HloInstruction* cond_param = cond_comp->parameter_instruction(0);
EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(cond_param->shape().tuple_shapes()[0].IsToken());
}
TEST_F(InfeedTokenPropagationTest, WhileOutfeed) {
constexpr std::string_view hlo = R"(
HloModule main
comp {
arg.0 = (s32[]) parameter(0)
token.0 = after-all()
outfeed.0 = token[] outfeed(arg.0, token.0), outfeed_shape=(s32[])
gte.0 = get-tuple-element(arg.0), index=0
ROOT tuple.0 = tuple(gte.0)
}
cond {
arg.0 = (s32[]) parameter(0)
ROOT true.0 = pred[] constant(true)
}
ENTRY main {
arg.0 = s32[] parameter(0)
while_tuple.0 = tuple(arg.0)
ROOT while.0 = (s32[]) while(while_tuple.0), condition=cond, body=comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* loop = FindInstruction(module.get(), "while.0");
EXPECT_EQ(loop->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(loop->shape().tuple_shapes()[1].IsToken());
HloInstruction* loop_tuple = FindInstruction(module.get(), "while_tuple.0");
EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[1].IsToken());
HloComputation* body_comp = FindComputation(module.get(), "comp");
EXPECT_THAT(body_comp->root_instruction(),
op::Tuple(op::GetTupleElement(), op::Outfeed()));
HloInstruction* body_param = body_comp->parameter_instruction(0);
EXPECT_EQ(body_param->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(body_param->shape().tuple_shapes()[1].IsToken());
HloComputation* cond_comp = FindComputation(module.get(), "cond");
HloInstruction* cond_param = cond_comp->parameter_instruction(0);
EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(cond_param->shape().tuple_shapes()[1].IsToken());
}
TEST_F(InfeedTokenPropagationTest, DisjointWhileOutfeed) {
constexpr std::string_view hlo = R"(
HloModule main
comp {
ROOT arg.0 = () parameter(0)
one.0 = s32[] constant(1)
outfeed_tuple.0 = tuple(one.0)
token.0 = after-all()
outfeed.0 = token[] outfeed(outfeed_tuple.0, token.0), outfeed_shape=(s32[])
}
cond {
arg.0 = () parameter(0)
ROOT true.0 = pred[] constant(true)
}
ENTRY main {
while_tuple.0 = tuple()
ROOT while.0 = () while(while_tuple.0), condition=cond, body=comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* loop = FindInstruction(module.get(), "while.0");
EXPECT_EQ(loop->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(loop->shape().tuple_shapes()[0].IsToken());
HloInstruction* loop_tuple = FindInstruction(module.get(), "while_tuple.0");
EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[0].IsToken());
HloComputation* body_comp = FindComputation(module.get(), "comp");
EXPECT_THAT(body_comp->root_instruction(), op::Tuple(op::Outfeed()));
HloInstruction* body_param = body_comp->parameter_instruction(0);
EXPECT_EQ(body_param->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(body_param->shape().tuple_shapes()[0].IsToken());
HloComputation* cond_comp = FindComputation(module.get(), "cond");
HloInstruction* cond_param = cond_comp->parameter_instruction(0);
EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(cond_param->shape().tuple_shapes()[0].IsToken());
}
TEST_F(InfeedTokenPropagationTest, NonTupleWhile) {
constexpr std::string_view hlo = R"(
HloModule main
comp {
ROOT arg.0 = s32[] parameter(0)
tuple.0 = tuple(arg.0)
token.0 = after-all()
outfeed.0 = token[] outfeed(tuple.0, token.0), outfeed_shape=(s32[])
}
cond {
arg.0 = s32[] parameter(0)
ROOT true.0 = pred[] constant(true)
}
ENTRY main {
arg.0 = s32[] parameter(0)
ROOT while.0 = s32[] while(arg.0), condition=cond, body=comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* loop = FindInstruction(module.get(), "while.0");
EXPECT_TRUE(loop->shape().IsTuple());
EXPECT_EQ(loop->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(loop->shape().tuple_shapes()[1].IsToken());
EXPECT_THAT(loop->operand(0), op::Tuple(op::Parameter(), op::AfterAll()));
HloComputation* body_comp = FindComputation(module.get(), "comp");
EXPECT_THAT(body_comp->root_instruction(),
op::Tuple(op::GetTupleElement(), op::Outfeed()));
HloInstruction* body_param = body_comp->parameter_instruction(0);
EXPECT_EQ(body_param->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(body_param->shape().tuple_shapes()[1].IsToken());
HloComputation* cond_comp = FindComputation(module.get(), "cond");
HloInstruction* cond_param = cond_comp->parameter_instruction(0);
EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(cond_param->shape().tuple_shapes()[1].IsToken());
}
TEST_F(InfeedTokenPropagationTest, NestedInfeedOutfeed) {
constexpr std::string_view hlo = R"(
HloModule main
true_comp {
arg.0 = (s32[]) parameter(0)
token.0 = after-all()
outfeed.0 = token[] outfeed(arg.0, token.0), outfeed_shape=(s32[])
ROOT tuple.0 = tuple()
}
false_comp {
arg.0 = () parameter(0)
ROOT tuple.0 = tuple()
}
comp {
arg.0 = () parameter(0)
token.0 = after-all()
infeed.0 = (s32[], token[]) infeed(token.0)
gte.0 = get-tuple-element(infeed.0), index=0
pred.0 = pred[] constant(true)
true_tuple.0 = tuple(gte.0)
false_tuple.0 = tuple()
ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp
}
cond {
arg.0 = () parameter(0)
ROOT true.0 = pred[] constant(true)
}
ENTRY main {
while_tuple.0 = tuple()
ROOT while.0 = () while(while_tuple.0), condition=cond, body=comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* loop = FindInstruction(module.get(), "while.0");
EXPECT_EQ(loop->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(loop->shape().tuple_shapes()[0].IsToken());
EXPECT_TRUE(loop->shape().tuple_shapes()[1].IsToken());
HloInstruction* loop_tuple = FindInstruction(module.get(), "while_tuple.0");
EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[0].IsToken());
EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[1].IsToken());
HloComputation* body_comp = FindComputation(module.get(), "comp");
EXPECT_THAT(body_comp->root_instruction(),
op::Tuple(op::GetTupleElement(op::Infeed(), 1),
op::GetTupleElement(op::Conditional(), 0)));
HloInstruction* cond = FindInstruction(module.get(), "cond.0");
EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());
HloInstruction* true_tuple = FindInstruction(module.get(), "true_tuple.0");
EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(true_tuple->shape().tuple_shapes()[1].IsToken());
HloInstruction* false_tuple = FindInstruction(module.get(), "false_tuple.0");
EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);
HloComputation* true_comp = FindComputation(module.get(), "true_comp");
EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed()));
HloComputation* false_comp = FindComputation(module.get(), "false_comp");
EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/infeed_token_propagation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/infeed_token_propagation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a9a26e45-6caf-4a8e-84d6-4f99d3d89f0a | cpp | tensorflow/tensorflow | while_loop_constant_sinking | third_party/xla/xla/service/while_loop_constant_sinking.cc | third_party/xla/xla/service/while_loop_constant_sinking_test.cc | #include "xla/service/while_loop_constant_sinking.h"
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "xla/service/while_util.h"
#include "xla/shape_util.h"
#include "xla/util.h"
namespace xla {
namespace {
absl::Status ReplaceUsesWhileKeepingLoopInvariance(
HloInstruction* old_instr, HloInstruction* new_instr,
HloInstruction* while_body_root, int64_t tuple_index) {
CHECK_EQ(while_body_root->opcode(), HloOpcode::kTuple);
std::vector<HloInstruction*> users;
users.reserve(old_instr->user_count());
absl::c_copy(old_instr->users(), std::back_inserter(users));
for (auto* user : users) {
for (int64_t i = 0, e = user->operand_count(); i < e; i++) {
if (user->operand(i) == old_instr &&
!(user == while_body_root && i == tuple_index)) {
TF_RETURN_IF_ERROR(user->ReplaceOperandWith(i, new_instr));
}
}
}
return absl::OkStatus();
}
HloInstruction* CloneHelper(const HloInstruction* instruction,
HloComputation* computation) {
if (instruction->opcode() == HloOpcode::kConstant) {
return computation->AddInstruction(instruction->Clone(".sunk"));
}
if (instruction->opcode() == HloOpcode::kBroadcast) {
return computation->AddInstruction(instruction->CloneWithNewOperands(
instruction->shape(),
{CloneHelper(instruction->operand(0), computation)}));
}
LOG(FATAL) << "Unexpected instruction.";
}
}
absl::StatusOr<bool> WhileLoopConstantSinking::TrySinkingConstantsIntoWhileLoop(
HloInstruction* while_instr) {
HloComputation* while_cond = while_instr->while_condition();
HloComputation* while_body = while_instr->while_body();
const HloInstruction& init_value = *while_instr->operand(0);
if (init_value.opcode() != HloOpcode::kTuple) {
return false;
}
bool changed = false;
absl::flat_hash_map<int64_t, absl::InlinedVector<HloInstruction*, 1>>
conditional_gte_index_to_insts =
WhileUtil::GetGTEsMapForWhileConditional(*while_cond);
std::vector<HloInstruction*> invariant_body_gtes =
WhileUtil::GetInvariantGTEsForWhileBody(*while_body);
for (HloInstruction* invariant_body_gte : invariant_body_gtes) {
int64_t index = invariant_body_gte->tuple_index();
const HloInstruction& invariant_value = *init_value.operand(index);
if (invariant_value.opcode() != HloOpcode::kConstant &&
(!sink_broadcast_of_constants_ ||
invariant_value.opcode() != HloOpcode::kBroadcast ||
invariant_value.operand(0)->opcode() != HloOpcode::kConstant)) {
continue;
}
if (sink_only_scalar_constants_) {
if (!ShapeUtil::IsScalar(init_value.operand(index)->shape())) {
continue;
}
}
if (invariant_body_gte->user_count() > 1) {
HloInstruction* constant_instr =
CloneHelper(&invariant_value, while_body);
TF_RETURN_IF_ERROR(ReplaceUsesWhileKeepingLoopInvariance(
invariant_body_gte, constant_instr, while_body->root_instruction(),
index));
changed = true;
}
auto it = conditional_gte_index_to_insts.find(index);
if (it == conditional_gte_index_to_insts.end()) {
continue;
}
for (HloInstruction* invariant_cond_gte : it->second) {
if (invariant_cond_gte->user_count() > 0) {
HloInstruction* constant_instr =
CloneHelper(&invariant_value, while_cond);
TF_RETURN_IF_ERROR(
invariant_cond_gte->ReplaceAllUsesWith(constant_instr));
changed = true;
}
}
}
return changed;
}
absl::StatusOr<bool> WhileLoopConstantSinking::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "HLO module before WhileLoopConstantSinking:";
XLA_VLOG_LINES(2, module->ToString());
bool changed = false;
std::vector<HloInstruction*> while_instrs;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
absl::c_copy_if(comp->instructions(), std::back_inserter(while_instrs),
HloPredicateIsOp<HloOpcode::kWhile>);
}
for (HloInstruction* while_instr : while_instrs) {
TF_ASSIGN_OR_RETURN(bool result,
TrySinkingConstantsIntoWhileLoop(while_instr));
changed |= result;
}
if (changed) {
VLOG(2) << "HLO module after WhileLoopConstantSinking:";
XLA_VLOG_LINES(2, module->ToString());
} else {
VLOG(2) << "HLO module unchanged after WhileLoopConstantSinking";
}
return changed;
}
} | #include "xla/service/while_loop_constant_sinking.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using ::testing::_;
using WhileLoopConstantSinkingTest = HloTestBase;
TEST_F(WhileLoopConstantSinkingTest, SinkOneConstant) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2],f32[2]) parameter(0)
p_body.0 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=0
p_body.1 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=1
add.0 = f32[2] add(p_body.0, p_body.1)
ROOT root = (f32[2],f32[2]) tuple(add.0, p_body.1)
}
condition {
p_cond = (f32[2],f32[2]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[2] constant({1, 2})
const_1 = f32[2] constant({2, 1})
while_init = (f32[2],f32[2]) tuple(const_0, const_1)
ROOT while = (f32[2],f32[2]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
WhileLoopConstantSinking(false,
true)
.Run(module.get()));
ASSERT_FALSE(changed);
TF_ASSERT_OK_AND_ASSIGN(
changed, WhileLoopConstantSinking(false,
false)
.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::Add(_, op::Constant()), _));
}
TEST_F(WhileLoopConstantSinkingTest, SinkBroadcastOfConstant) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[16],f32[16]) parameter(0)
p_body.0 = get-tuple-element(p_body), index=0
p_body.1 = get-tuple-element(p_body), index=1
add.0 = add(p_body.0, p_body.1)
ROOT root = tuple(add.0, p_body.1)
}
condition {
p_cond = (f32[16],f32[16]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[] constant(1)
const_1 = f32[] constant(2)
broadcast_0 = f32[16] broadcast(const_0), dimensions={}
broadcast_1 = f32[16] broadcast(const_1), dimensions={}
while_init = tuple(broadcast_0, broadcast_1)
ROOT while = while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
WhileLoopConstantSinking(false)
.Run(module.get()));
ASSERT_FALSE(changed);
TF_ASSERT_OK_AND_ASSIGN(
changed, WhileLoopConstantSinking(true)
.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::Add(_, op::Broadcast(op::Constant())), _));
}
TEST_F(WhileLoopConstantSinkingTest, KeepConstantsLoopInvariant) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2],f32[2],f32[2]) parameter(0)
p_body.0 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_body), index=0
p_body.1 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_body), index=1
p_body.2 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_body), index=2
add.0 = f32[2] add(p_body.1, p_body.2)
ROOT root = (f32[2],f32[2],f32[2]) tuple(add.0, p_body.1, p_body.2)
}
condition {
p_cond = (f32[2],f32[2],f32[2]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[2] constant({1, 2})
const_1 = f32[2] constant({2, 1})
const_2 = f32[2] constant({3, 1})
while_init = (f32[2],f32[2],f32[2]) tuple(const_0, const_1, const_2)
ROOT while = (f32[2],f32[2],f32[2]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::Add(op::Constant(), op::Constant()),
op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))));
}
TEST_F(WhileLoopConstantSinkingTest, TupleShapedConstants) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_b = (f32[2],(f32[2],f32[2])) parameter(0)
p_b.0 = f32[2] get-tuple-element((f32[2],(f32[2],f32[2])) p_b), index=0
p_b.1 = (f32[2],f32[2]) get-tuple-element((f32[2],(f32[2],f32[2])) p_b), index=1
p_b.1.1 = f32[2] get-tuple-element(p_b.1), index=0
ROOT root = (f32[2],(f32[2],f32[2])) tuple(p_b.1.1, p_b.1)
}
condition {
p_cond = (f32[2],(f32[2],f32[2])) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[2] constant({1, 2})
const_1 = (f32[2], f32[2]) constant(({2, 1},{3,1}))
while_init = (f32[2],(f32[2],f32[2])) tuple(const_0, const_1)
ROOT while = (f32[2],(f32[2],f32[2])) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::GetTupleElement(op::Constant(), 0),
op::GetTupleElement(op::Parameter(0))));
}
TEST_F(WhileLoopConstantSinkingTest, DuplicateGTEs) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_b = (f32[2],f32[2],f32[2]) parameter(0)
p_b.1 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_b), index=1
p_b.2 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_b), index=2
p_b.2.dup = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_b), index=2
add.0 = f32[2] add(p_b.1, p_b.2.dup)
ROOT root = (f32[2],f32[2],f32[2]) tuple(add.0, p_b.1, p_b.2)
}
condition {
p_cond = (f32[2],f32[2],f32[2]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[2] constant({1, 2})
const_1 = f32[2] constant({2, 1})
const_2 = f32[2] constant({3, 1})
while_init = (f32[2],f32[2],f32[2]) tuple(const_0, const_1, const_2)
ROOT while = (f32[2],f32[2],f32[2]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::Add(op::Constant(), ::testing::Not(op::Constant())),
op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))));
}
TEST_F(WhileLoopConstantSinkingTest, DontCreateDeadConstant) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2],f32[2]) parameter(0)
p_body.0 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=0
p_body.1 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=1
token0 = token[] after-all()
outfeed = token[] outfeed(p_body.0, token0)
ROOT root = (f32[2],f32[2],f32[2]) tuple(p_body.0, p_body.1, p_body.1)
}
condition {
p_cond = (f32[2],f32[2]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[2] constant({1, 2})
const_1 = f32[2] constant({2, 1})
while_init = (f32[2],f32[2]) tuple(const_0, const_1)
ROOT while = (f32[2],f32[2],f32[2]) while(while_init), condition=condition,
body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::GetTupleElement(), op::GetTupleElement(),
op::GetTupleElement()));
for (const HloInstruction* inst : while_body->instructions()) {
if (inst->opcode() == HloOpcode::kConstant) {
EXPECT_GT(inst->user_count(), 0);
}
}
}
TEST_F(WhileLoopConstantSinkingTest, ConditionalSinkConstant) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[],f32[]) parameter(0)
p_body.0 = f32[] get-tuple-element((f32[],f32[]) p_body), index=0
const = f32[] constant(1)
add = f32[] add(p_body.0, const)
p_body.1 = f32[] get-tuple-element((f32[],f32[]) p_body), index=1
ROOT root = (f32[],f32[]) tuple(add, p_body.1)
}
condition {
p_cond = (f32[],f32[]) parameter(0)
p_cond.0 = f32[] get-tuple-element((f32[],f32[]) p_cond), index=0
p_cond.1 = f32[] get-tuple-element((f32[],f32[]) p_cond), index=1
ROOT result = pred[] compare(p_cond.0, p_cond.1), direction=LT
}
ENTRY entry {
const_0 = f32[] constant(0)
const_1 = f32[] constant(10)
while_init = (f32[],f32[]) tuple(const_0, const_1)
ROOT while = (f32[],f32[]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_condition = module->GetComputationWithName("condition");
EXPECT_THAT(while_condition->root_instruction(), op::Lt(_, op::Constant()));
}
TEST_F(WhileLoopConstantSinkingTest, ConditionalTupleShapedConstants) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_b = (f32[],(f32[],f32[])) parameter(0)
p_b.0 = f32[] get-tuple-element((f32[],(f32[],f32[])) p_b), index=0
p_b.1 = (f32[],f32[]) get-tuple-element((f32[],(f32[],f32[])) p_b), index=1
p_b.1.0 = f32[] get-tuple-element((f32[],f32[]) p_b.1), index=0
add = f32[] add(p_b.0, p_b.1.0)
ROOT root = (f32[],(f32[],f32[])) tuple(add, p_b.1)
}
condition {
p_c = (f32[],(f32[],f32[])) parameter(0)
p_c.0 = f32[] get-tuple-element((f32[],(f32[],f32[])) p_c), index=0
p_c.1 = (f32[],f32[]) get-tuple-element((f32[],(f32[],f32[])) p_c), index=1
p_c.1.1 = f32[] get-tuple-element((f32[],f32[]) p_c.1), index=1
ROOT result = pred[] compare(p_c.0, p_c.1.1), direction=LT
}
ENTRY entry {
const_0 = f32[] constant(0)
const_1 = (f32[], f32[]) constant((1, 10))
while_init = (f32[],(f32[],f32[])) tuple(const_0, const_1)
ROOT while = (f32[],(f32[],f32[])) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_condition = module->GetComputationWithName("condition");
EXPECT_THAT(while_condition->root_instruction(),
op::Lt(_, op::GetTupleElement(op::Constant())));
}
TEST_F(WhileLoopConstantSinkingTest, ConditionalDontCreateDeadConstant) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[],f32[],f32[]) parameter(0)
p_body.0 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=0
const = f32[] constant(1)
add = f32[] add(p_body.0, const)
p_body.1 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=1
p_body.2 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=2
ROOT root = (f32[],f32[],f32[]) tuple(add, p_body.1, p_body.2)
}
condition {
p_cond = (f32[],f32[],f32[]) parameter(0)
p_cond.0 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=0
p_cond.1 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=1
p_cond.2 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=2
ROOT result = pred[] compare(p_cond.0, p_cond.1), direction=LT
}
ENTRY entry {
const_0 = f32[] constant(0)
const_1 = f32[] constant(10)
const_2 = f32[] constant(12)
while_init = (f32[],f32[],f32[]) tuple(const_0, const_1, const_2)
ROOT while = (f32[],f32[],f32[]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_condition = module->GetComputationWithName("condition");
EXPECT_THAT(while_condition->root_instruction(), op::Lt(_, op::Constant()));
for (const HloInstruction* inst : while_condition->instructions()) {
if (inst->opcode() == HloOpcode::kConstant) {
EXPECT_GT(inst->user_count(), 0);
}
}
}
TEST_F(WhileLoopConstantSinkingTest, ConditionalMultipleSameIndexGTEs) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[],f32[],f32[]) parameter(0)
p_body.0 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=0
const = f32[] constant(1)
add.0 = f32[] add(p_body.0, const)
p_body.1 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=1
add.1 = f32[] add(p_body.1, const)
p_body.2 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=2
ROOT root = (f32[],f32[],f32[]) tuple(add.0, add.1, p_body.2)
}
condition {
p_cond = (f32[],f32[],f32[]) parameter(0)
p_cond.0 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=0
p_cond.2 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=2
lt.0 = pred[] compare(p_cond.0, p_cond.2), direction=LT
p_cond.1 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=1
p_cond.2.c = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=2
lt.1 = pred[] compare(p_cond.1, p_cond.2.c), direction=LT
ROOT result = pred[] and(lt.0, lt.1)
}
ENTRY entry {
const_0 = f32[] constant(0)
const_1 = f32[] constant(0)
const_2 = f32[] constant(12)
while_init = (f32[],f32[],f32[]) tuple(const_0, const_1, const_2)
ROOT while = (f32[],f32[],f32[]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_condition = module->GetComputationWithName("condition");
EXPECT_THAT(while_condition->root_instruction(),
op::And(op::Lt(_, op::Constant()), op::Lt(_, op::Constant())));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_constant_sinking.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_constant_sinking_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9131b767-6a50-48e5-8e87-4d8d59db6611 | cpp | tensorflow/tensorflow | rendezvous | tensorflow/core/framework/rendezvous.cc | tensorflow/core/framework/rendezvous_test.cc | #include "tensorflow/core/framework/rendezvous.h"
#include <deque>
#include <functional>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/local_rendezvous.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/lib/gtl/manual_constructor.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
Rendezvous::ParsedKey& Rendezvous::ParsedKey::operator=(const ParsedKey& b) {
const char* b_base = b.buf_.data();
buf_ = b.buf_;
src_device = StringPiece(buf_.data() + (b.src_device.data() - b_base),
b.src_device.size());
src = b.src;
src_incarnation = b.src_incarnation;
dst_device = StringPiece(buf_.data() + (b.dst_device.data() - b_base),
b.dst_device.size());
dst = b.dst;
edge_name = StringPiece(buf_.data() + (b.edge_name.data() - b_base),
b.edge_name.size());
return *this;
}
string Rendezvous::CreateKey(const string& src_device, uint64 src_incarnation,
const string& dst_device, const string& name,
const FrameAndIter& frame_iter) {
char buf[strings::kFastToBufferSize];
return strings::StrCat(
src_device, ";", strings::Uint64ToHexString(src_incarnation, buf), ";",
dst_device, ";", name, ";", frame_iter.frame_id, ":", frame_iter.iter_id);
}
static StringPiece ConsumeNextPart(StringPiece* s, char delim) {
for (size_t offset = 0; offset < s->size(); offset++) {
if ((*s)[offset] == delim) {
StringPiece result(s->data(), offset);
s->remove_prefix(offset + 1);
return result;
}
}
StringPiece result(s->data(), s->size());
s->remove_prefix(s->size());
return result;
}
Status Rendezvous::ParseKey(StringPiece key, ParsedKey* out) {
if (key.data() == out->buf_.data()) {
DCHECK_EQ(key.size(), out->buf_.size());
} else {
out->buf_.assign(key.data(), key.size());
}
StringPiece s(out->buf_);
StringPiece parts[5];
for (int i = 0; i < 5; i++) {
parts[i] = ConsumeNextPart(&s, ';');
}
if (s.empty() &&
!parts[4].empty() &&
DeviceNameUtils::ParseFullName(parts[0], &out->src) &&
strings::HexStringToUint64(parts[1], &out->src_incarnation) &&
DeviceNameUtils::ParseFullName(parts[2], &out->dst) &&
!parts[3].empty()) {
out->src_device = StringPiece(parts[0].data(), parts[0].size());
out->dst_device = StringPiece(parts[2].data(), parts[2].size());
out->edge_name = StringPiece(parts[3].data(), parts[3].size());
return absl::OkStatus();
}
return errors::InvalidArgument("Invalid rendezvous key: ", key);
}
RendezvousInterface::~RendezvousInterface() {}
Status RendezvousInterface::Recv(const ParsedKey& key, const Args& recv_args,
Tensor* val, bool* is_dead,
int64_t timeout_ms) {
Status ret;
Notification n;
RecvAsync(key, recv_args,
[&ret, &n, val, is_dead](const Status& s, const Args& send_args,
const Args& recv_args, const Tensor& v,
const bool dead) {
ret = s;
*val = v;
*is_dead = dead;
n.Notify();
});
if (timeout_ms > 0) {
int64_t timeout_us = timeout_ms * 1000;
bool notified = WaitForNotificationWithTimeout(&n, timeout_us);
if (!notified) {
return Status(absl::StatusCode::kDeadlineExceeded,
"Timed out waiting for notification");
}
} else {
n.WaitForNotification();
}
return ret;
}
Status RendezvousInterface::Recv(const ParsedKey& key, const Args& args,
Tensor* val, bool* is_dead) {
const int64_t no_timeout = 0;
return Recv(key, args, val, is_dead, no_timeout);
}
namespace {
class LocalRendezvousWrapper : public Rendezvous {
public:
LocalRendezvousWrapper(int num_shards) : impl_(this, num_shards) {}
Status Send(const ParsedKey& key, const Args& send_args, const Tensor& val,
const bool is_dead) override {
return impl_.Send(key, send_args, val, is_dead);
}
void RecvAsync(const ParsedKey& key, const Args& recv_args,
DoneCallback done) override {
impl_.RecvAsync(key, recv_args, std::move(done));
}
void StartAbort(const Status& status) override { impl_.StartAbort(status); }
private:
LocalRendezvous impl_;
LocalRendezvousWrapper(const LocalRendezvousWrapper&) = delete;
void operator=(const LocalRendezvousWrapper&) = delete;
};
}
Rendezvous* NewLocalRendezvous(int num_shards) {
return new LocalRendezvousWrapper(num_shards);
}
} | #include "tensorflow/core/framework/rendezvous.h"
#include "absl/status/status.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
TEST(RendezvousTest, Key) {
const string key = Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/CPU:0", 7890,
"/job:mnist/replica:1/task:2/device:GPU:0", "var0", FrameAndIter(0, 0));
EXPECT_EQ(key,
"/job:mnist/replica:1/task:2/CPU:0;"
"0000000000001ed2;"
"/job:mnist/replica:1/task:2/device:GPU:0;"
"var0;"
"0:0");
Rendezvous::ParsedKey parsed;
TF_EXPECT_OK(Rendezvous::ParseKey(key, &parsed));
EXPECT_EQ(parsed.src_device, "/job:mnist/replica:1/task:2/CPU:0");
EXPECT_EQ(parsed.src_incarnation, 7890);
EXPECT_EQ(parsed.src.type, "CPU");
EXPECT_EQ(parsed.dst_device, "/job:mnist/replica:1/task:2/device:GPU:0");
EXPECT_EQ(parsed.dst.type, "GPU");
EXPECT_FALSE(Rendezvous::ParseKey("foo;bar;baz", &parsed).ok());
EXPECT_FALSE(Rendezvous::ParseKey("/job:mnist/replica:1/task:2/CPU:0;"
"/job:mnist/replica:1/task:2/device:GPU:0;",
&parsed)
.ok());
EXPECT_FALSE(
Rendezvous::ParseKey(strings::StrCat(key, ";", key), &parsed).ok());
}
class LocalRendezvousTest : public ::testing::Test {
public:
LocalRendezvousTest() : threads_(Env::Default(), "test", 16) {
rendez_ = NewLocalRendezvous();
}
~LocalRendezvousTest() override { rendez_->Unref(); }
void SchedClosure(std::function<void()> fn) {
threads_.Schedule(std::move(fn));
}
Rendezvous* rendez_;
private:
thread::ThreadPool threads_;
};
Tensor V(const string& content) {
Tensor tensor(DT_STRING, TensorShape({}));
tensor.scalar<tstring>()() = content;
return tensor;
}
string V(const Tensor& tensor) {
CHECK_EQ(tensor.dtype(), DT_STRING);
CHECK(TensorShapeUtils::IsScalar(tensor.shape()));
return tensor.scalar<tstring>()();
}
Rendezvous::ParsedKey MakeKey(const string& name) {
string s = Rendezvous::CreateKey("/job:mnist/replica:1/task:2/CPU:0", 7890,
"/job:mnist/replica:1/task:2/device:GPU:0",
name, FrameAndIter(0, 0));
Rendezvous::ParsedKey k;
TF_EXPECT_OK(Rendezvous::ParseKey(s, &k));
return k;
}
const Rendezvous::ParsedKey& KeyFoo() {
static auto* key = new Rendezvous::ParsedKey(MakeKey("foo"));
return *key;
}
const Rendezvous::ParsedKey& KeyBar() {
static auto* key = new Rendezvous::ParsedKey(MakeKey("bar"));
return *key;
}
TEST_F(LocalRendezvousTest, SendRecv) {
Rendezvous::Args args;
TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V("hello"), false));
Tensor val(DT_STRING);
bool is_dead = false;
TF_ASSERT_OK(rendez_->Recv(KeyFoo(), args, &val, &is_dead));
EXPECT_EQ("hello", V(val));
}
TEST_F(LocalRendezvousTest, RecvSend) {
SchedClosure([this]() {
Env::Default()->SleepForMicroseconds(10000);
Rendezvous::Args args;
TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V("hello"), false));
});
Tensor val(DT_STRING);
bool is_dead = false;
Rendezvous::Args args;
TF_ASSERT_OK(rendez_->Recv(KeyFoo(), args, &val, &is_dead));
EXPECT_EQ("hello", V(val));
}
TEST_F(LocalRendezvousTest, PingPong) {
SchedClosure([this]() {
Tensor t(DT_STRING);
bool is_dead = false;
Rendezvous::Args args;
TF_ASSERT_OK(rendez_->Recv(KeyFoo(), args, &t, &is_dead));
TF_ASSERT_OK(rendez_->Send(KeyBar(), args, t, is_dead));
});
Env::Default()->SleepForMicroseconds(1000000);
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V("secret msg"), val_dead));
TF_ASSERT_OK(rendez_->Recv(KeyBar(), args, &val, &val_dead));
EXPECT_EQ("secret msg", V(val));
}
TEST_F(LocalRendezvousTest, CancelBeforeRecv) {
auto* cm = new CancellationManager();
Tensor val(DT_STRING);
bool is_dead = false;
Rendezvous::Args args;
args.cancellation_manager = cm;
cm->StartCancel();
auto s = rendez_->Recv(KeyFoo(), args, &val, &is_dead);
EXPECT_FALSE(s.ok());
EXPECT_TRUE(absl::IsCancelled(s));
EXPECT_EQ("RecvAsync is cancelled.", s.message());
delete cm;
}
TEST_F(LocalRendezvousTest, CancelAfterRecv) {
auto* cm = new CancellationManager();
Notification n;
SchedClosure([cm, &n]() {
Env::Default()->SleepForMicroseconds(10000);
cm->StartCancel();
n.Notify();
});
Tensor val(DT_STRING);
bool is_dead = false;
Rendezvous::Args args;
args.cancellation_manager = cm;
auto s = rendez_->Recv(KeyFoo(), args, &val, &is_dead);
EXPECT_FALSE(s.ok());
EXPECT_TRUE(absl::IsCancelled(s));
EXPECT_EQ("RecvAsync is cancelled.", s.message());
n.WaitForNotification();
delete cm;
}
TEST_F(LocalRendezvousTest, CancelEmptyQueue) {
auto* cm = new CancellationManager();
Notification n;
SchedClosure([this, cm, &n]() {
Env::Default()->SleepForMicroseconds(10000);
Rendezvous::Args args;
TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V("hello"), false));
cm->StartCancel();
n.Notify();
});
Tensor val(DT_STRING);
bool is_dead = false;
Rendezvous::Args args;
args.cancellation_manager = cm;
TF_ASSERT_OK(rendez_->Recv(KeyFoo(), args, &val, &is_dead));
EXPECT_EQ("hello", V(val));
n.WaitForNotification();
delete cm;
}
TEST_F(LocalRendezvousTest, CancelMultiple) {
auto* cm = new CancellationManager();
SchedClosure([this, cm]() {
Env::Default()->SleepForMicroseconds(10000);
Rendezvous::Args args;
cm->StartCancel();
TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V("hello"), false));
TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V("hello"), false));
});
Tensor val(DT_STRING);
Rendezvous::Args args;
Rendezvous::Args args_with_cancellation;
args_with_cancellation.cancellation_manager = cm;
Notification n0;
Notification n1;
Notification n2;
Notification n3;
Status s0;
Status s1;
Status s2;
Status s3;
rendez_->RecvAsync(
KeyFoo(), args,
[&n0, &s0](const Status& s, const Rendezvous::Args& send_args,
const Rendezvous::Args& recv_args, const Tensor& v,
const bool dead) {
s0.Update(s);
n0.Notify();
});
rendez_->RecvAsync(
KeyFoo(), args_with_cancellation,
[&n1, &s1](const Status& s, const Rendezvous::Args& send_args,
const Rendezvous::Args& recv_args, const Tensor& v,
const bool dead) {
s1.Update(s);
n1.Notify();
});
rendez_->RecvAsync(
KeyFoo(), args,
[&n2, &s2](const Status& s, const Rendezvous::Args& send_args,
const Rendezvous::Args& recv_args, const Tensor& v,
const bool dead) {
s2.Update(s);
n2.Notify();
});
rendez_->RecvAsync(
KeyFoo(), args_with_cancellation,
[&n3, &s3](const Status& s, const Rendezvous::Args& send_args,
const Rendezvous::Args& recv_args, const Tensor& v,
const bool dead) {
s3.Update(s);
n3.Notify();
});
n0.WaitForNotification();
n1.WaitForNotification();
n2.WaitForNotification();
n3.WaitForNotification();
TF_ASSERT_OK(s0);
TF_ASSERT_OK(s2);
EXPECT_FALSE(s1.ok());
EXPECT_FALSE(s3.ok());
delete cm;
}
struct BlockingState {
mutex lock;
int counter = 0;
Notification done;
};
TEST_F(LocalRendezvousTest, RandomSendRecv) {
static const int N = 100;
random::PhiloxRandom philox(testing::RandomSeed(), 17);
random::SimplePhilox rnd(&philox);
BlockingState state;
state.counter = N;
for (int i = 0; i < N; ++i) {
int micros = 100 + rnd.Uniform(1000);
SchedClosure([this, i, micros]() {
Env::Default()->SleepForMicroseconds(micros);
Rendezvous::Args args;
TF_ASSERT_OK(rendez_->Send(MakeKey(strings::StrCat(i)), args,
V(strings::StrCat(i)), false));
});
auto recv_done = [this, &state, i](const Status& status,
const Rendezvous::Args& sender_args,
const Rendezvous::Args& recver_args,
const Tensor& val, const bool val_dead) {
EXPECT_EQ(strings::StrCat(i), V(val));
bool done = false;
{
mutex_lock l(state.lock);
state.counter--;
if (state.counter == 0) {
done = true;
}
}
if (done) {
state.done.Notify();
}
};
micros = 100 + rnd.Uniform(1000);
SchedClosure([this, i, micros, recv_done]() {
Env::Default()->SleepForMicroseconds(micros);
rendez_->RecvAsync(MakeKey(strings::StrCat(i)), Rendezvous::Args(),
recv_done);
});
}
state.done.WaitForNotification();
}
void RandomSleep() {
if (std::rand() % 10 == 0) {
Env::Default()->SleepForMicroseconds(1000);
}
}
TEST_F(LocalRendezvousTest, MultiSends) {
static const int N = 100;
const auto& key_foo = KeyFoo();
Rendezvous::Args args;
SchedClosure([=]() {
for (int i = 0; i < N; ++i) {
TF_ASSERT_OK(rendez_->Send(key_foo, args, V(strings::StrCat(i)), false));
RandomSleep();
}
});
Tensor val;
bool val_dead;
for (int i = 0; i < N; ++i) {
TF_ASSERT_OK(rendez_->Recv(key_foo, args, &val, &val_dead));
RandomSleep();
}
}
TEST_F(LocalRendezvousTest, RecvAbort) {
rendez_->Ref();
SchedClosure([this]() {
rendez_->StartAbort(errors::Aborted(""));
rendez_->Unref();
});
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
Status status = rendez_->Recv(KeyFoo(), args, &val, &val_dead);
EXPECT_TRUE(absl::IsAborted(status));
}
TEST_F(LocalRendezvousTest, RecvSleepAbort) {
rendez_->Ref();
SchedClosure([this]() {
Env::Default()->SleepForMicroseconds(1000000);
rendez_->StartAbort(errors::Aborted(""));
rendez_->Unref();
});
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
Status status = rendez_->Recv(KeyFoo(), args, &val, &val_dead);
EXPECT_TRUE(absl::IsAborted(status));
}
TEST_F(LocalRendezvousTest, AbortThenRecvOrSend) {
rendez_->StartAbort(errors::Aborted(""));
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
EXPECT_TRUE(absl::IsAborted(rendez_->Send(KeyFoo(), args, val, val_dead)));
EXPECT_TRUE(absl::IsAborted(rendez_->Recv(KeyFoo(), args, &val, &val_dead)));
}
class DummyDeviceContext : public DeviceContext {
public:
explicit DummyDeviceContext(int stream_id) : stream_id_(stream_id) {}
~DummyDeviceContext() override {}
int stream_id() const { return stream_id_; }
void CopyTensorInSameDevice(const Tensor* input_tensor, Device* device,
Tensor* output_tensor,
StatusCallback done) const override {
done(absl::OkStatus());
}
private:
const int stream_id_;
};
TEST_F(LocalRendezvousTest, TransferDummyDeviceContext) {
Rendezvous::Args args;
args.device_context = new DummyDeviceContext(123);
TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V("hello"), false));
Notification n;
Rendezvous::Args args1;
args1.device_context = new DummyDeviceContext(1);
rendez_->RecvAsync(
KeyFoo(), args1,
[&n](const Status& s, const Rendezvous::Args& send_args,
const Rendezvous::Args& recv_args, const Tensor& val, bool is_dead) {
CHECK_EQ(123, dynamic_cast<const DummyDeviceContext*>(
send_args.device_context)
->stream_id());
n.Notify();
});
n.WaitForNotification();
args.device_context->Unref();
args1.device_context->Unref();
}
void BM_SendRecv(::testing::benchmark::State& state) {
Rendezvous* rendez = NewLocalRendezvous();
Tensor orig = V("val");
Tensor val(DT_STRING, TensorShape({}));
bool is_dead = false;
Rendezvous::Args args;
for (auto s : state) {
TF_CHECK_OK(rendez->Send(KeyFoo(), args, orig, is_dead));
TF_CHECK_OK(rendez->Recv(KeyFoo(), args, &val, &is_dead));
}
CHECK_EQ(V(val), V(orig));
rendez->Unref();
}
BENCHMARK(BM_SendRecv);
void BM_RecvSend(::testing::benchmark::State& state) {
Rendezvous* rendez = NewLocalRendezvous();
Tensor orig = V("val");
Tensor val(DT_STRING, TensorShape({}));
bool is_dead = false;
Rendezvous::Args args;
for (auto s : state) {
bool received = false;
rendez->RecvAsync(
KeyFoo(), args,
[&val, &received](const Status& ,
const Rendezvous::Args& ,
const Rendezvous::Args& ,
const Tensor& tensor, bool ) {
val = tensor;
received = true;
});
TF_CHECK_OK(rendez->Send(KeyFoo(), args, orig, is_dead));
CHECK(received);
}
CHECK_EQ(V(val), V(orig));
rendez->Unref();
}
BENCHMARK(BM_RecvSend);
void BM_PingPong(::testing::benchmark::State& state) {
const int messages_count = state.range(0);
auto* cm = new CancellationManager();
thread::ThreadPool* pool = new thread::ThreadPool(Env::Default(), "test", 1);
for (auto s : state) {
Rendezvous* rendez = NewLocalRendezvous();
pool->Schedule([rendez, messages_count]() {
Tensor bar = V("bar");
Tensor foo(DT_STRING, TensorShape({}));
bool is_dead = false;
Rendezvous::Args args;
for (int i = 0; i < messages_count; ++i) {
TF_CHECK_OK(rendez->Recv(KeyFoo(), args, &foo, &is_dead));
TF_CHECK_OK(rendez->Send(KeyBar(), args, bar, is_dead));
}
CHECK_EQ("foo", V(foo));
});
Tensor foo = V("foo");
Tensor bar(DT_STRING, TensorShape({}));
bool is_dead = false;
Rendezvous::Args args;
args.cancellation_manager = cm;
for (int i = 0; i < messages_count; ++i) {
TF_CHECK_OK(rendez->Send(KeyFoo(), args, foo, is_dead));
TF_CHECK_OK(rendez->Recv(KeyBar(), args, &bar, &is_dead));
}
CHECK_EQ("bar", V(bar));
rendez->Unref();
}
state.SetItemsProcessed(messages_count * state.iterations());
delete pool;
delete cm;
}
BENCHMARK(BM_PingPong)->Arg(100)->Arg(200)->Arg(300);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/rendezvous.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/rendezvous_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ecfc7b9e-9c2a-42a9-b642-6e26f02538cb | cpp | tensorflow/tensorflow | operand_upcaster | third_party/xla/xla/service/operand_upcaster.cc | third_party/xla/xla/service/operand_upcaster_test.cc | #include "xla/service/operand_upcaster.h"
#include <optional>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::StatusOr<std::optional<Shape>> MaybeInferShape(
const HloInstruction* instruction) {
switch (instruction->opcode()) {
case HloOpcode::kDot:
return ShapeInference::InferDotOpShape(
instruction->operand(0)->shape(), instruction->operand(1)->shape(),
instruction->dot_dimension_numbers(),
std::nullopt,
Cast<HloDotInstruction>(instruction)->sparsity());
case HloOpcode::kConvolution:
return ShapeInference::InferConvolveShape(
instruction->operand(0)->shape(), instruction->operand(1)->shape(),
instruction->feature_group_count(), instruction->batch_group_count(),
instruction->window(), instruction->convolution_dimension_numbers(),
std::nullopt);
default:
return std::optional<Shape>(std::nullopt);
}
}
}
bool OperandUpcaster::InstructionMatchesPattern(HloInstruction* instruction) {
auto status_or_inferred_shape = MaybeInferShape(instruction);
if (!status_or_inferred_shape.ok() ||
!status_or_inferred_shape->has_value()) {
return false;
}
if (absl::c_count(instruction->precision_config().operand_precision(),
PrecisionConfig::PACKED_NIBBLE) == 2) {
return true;
}
PrimitiveType inferred_type = (*status_or_inferred_shape)->element_type();
if (instruction->shape().element_type() == inferred_type &&
instruction->operand(0)->shape().element_type() == inferred_type &&
instruction->operand(1)->shape().element_type() == inferred_type) {
return false;
}
return ShapeUtil::ElementCanUpcast(**status_or_inferred_shape,
instruction->shape());
}
absl::StatusOr<HloInstruction*> OperandUpcaster::ExpandInstruction(
HloInstruction* instruction) {
const bool packed_nibble =
absl::c_count(instruction->precision_config().operand_precision(),
PrecisionConfig::PACKED_NIBBLE) == 2;
auto type = instruction->shape().element_type();
if (packed_nibble) {
HloInstruction *lhs_n0 = instruction->mutable_operand(0), *lhs_n1 = lhs_n0,
*rhs_n0 = instruction->mutable_operand(1), *rhs_n1 = rhs_n0;
TF_ASSIGN_OR_RETURN(lhs_n0, MakeBinaryHlo(HloOpcode::kShiftLeft, lhs_n0,
MakeScalarLike(lhs_n0, 4)));
HloOpcode lhs_shift = ShapeUtil::ElementIsSigned(lhs_n0->shape())
? HloOpcode::kShiftRightArithmetic
: HloOpcode::kShiftRightLogical;
TF_ASSIGN_OR_RETURN(
lhs_n0, MakeBinaryHlo(lhs_shift, lhs_n0, MakeScalarLike(lhs_n0, 4)));
lhs_n0 = MakeConvertToHlo(lhs_n0, type);
TF_ASSIGN_OR_RETURN(
lhs_n1, MakeBinaryHlo(lhs_shift, lhs_n1, MakeScalarLike(lhs_n1, 4)));
lhs_n1 = MakeConvertToHlo(lhs_n1, type);
TF_ASSIGN_OR_RETURN(rhs_n0, MakeBinaryHlo(HloOpcode::kShiftLeft, rhs_n0,
MakeScalarLike(rhs_n0, 4)));
HloOpcode rhs_shift = ShapeUtil::ElementIsSigned(rhs_n0->shape())
? HloOpcode::kShiftRightArithmetic
: HloOpcode::kShiftRightLogical;
TF_ASSIGN_OR_RETURN(
rhs_n0, MakeBinaryHlo(rhs_shift, rhs_n0, MakeScalarLike(rhs_n0, 4)));
rhs_n0 = MakeConvertToHlo(rhs_n0, type);
TF_ASSIGN_OR_RETURN(
rhs_n1, MakeBinaryHlo(rhs_shift, rhs_n1, MakeScalarLike(rhs_n1, 4)));
rhs_n1 = MakeConvertToHlo(rhs_n1, type);
HloInstruction* linear_n0 =
instruction->parent()->AddInstruction(instruction->CloneWithNewOperands(
instruction->shape(), {lhs_n0, rhs_n0}));
linear_n0->mutable_precision_config()->mutable_operand_precision()->Set(
0, PrecisionConfig::DEFAULT);
linear_n0->mutable_precision_config()->mutable_operand_precision()->Set(
1, PrecisionConfig::DEFAULT);
HloInstruction* linear_n1 =
instruction->parent()->AddInstruction(linear_n0->CloneWithNewOperands(
instruction->shape(), {lhs_n1, rhs_n1}));
return MakeBinaryHlo(HloOpcode::kAdd, linear_n0, linear_n1);
}
for (int i = 0; i < HloDotInstruction::kOperands; ++i) {
auto* operand = instruction->mutable_operand(i);
if (operand->shape().element_type() == type) {
continue;
}
auto upcast_shape = operand->shape();
upcast_shape.set_element_type(type);
auto* convert_inst = instruction->AddInstruction(
HloInstruction::CreateConvert(upcast_shape, operand));
TF_RETURN_IF_ERROR(
instruction->ReplaceOperandWithDifferentShape(i, convert_inst));
}
return nullptr;
}
} | #include "xla/service/operand_upcaster.h"
#include <memory>
#include <tuple>
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/primitive_util.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
class OperandUpcasterTest
: public HloTestBase,
public ::testing::WithParamInterface<
std::tuple<PrimitiveType, PrimitiveType, PrimitiveType>> {};
bool ShouldUpcast(PrimitiveType operand_type, PrimitiveType result_type) {
return operand_type != result_type &&
primitive_util::HigherPrecisionType(operand_type, result_type) ==
result_type;
}
TEST_P(OperandUpcasterTest, ConvertInserted) {
PrimitiveType lhs_type, rhs_type, result_type;
std::tie(lhs_type, rhs_type, result_type) = GetParam();
absl::string_view module_tmpl = R"(
HloModule module
ENTRY main {
p0 = $0[2,3]{1,0} parameter(0)
p1 = $1[3,2]{1,0} parameter(1)
ROOT dot = $2[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
auto module_string = absl::Substitute(
module_tmpl, primitive_util::LowercasePrimitiveTypeName(lhs_type),
primitive_util::LowercasePrimitiveTypeName(rhs_type),
primitive_util::LowercasePrimitiveTypeName(result_type));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool upcasted, OperandUpcaster().Run(module.get()));
EXPECT_EQ(upcasted, ShouldUpcast(lhs_type, result_type) ||
ShouldUpcast(rhs_type, result_type));
auto original_lhs = op::Parameter(0);
auto original_rhs = op::Parameter(1);
auto upcasted_lhs =
ShouldUpcast(lhs_type, result_type)
? AllOf(op::Convert(original_lhs),
op::Shape(absl::Substitute(
"$0[2,3]{1,0}",
primitive_util::LowercasePrimitiveTypeName(result_type))))
: original_lhs;
auto upcasted_rhs =
ShouldUpcast(rhs_type, result_type)
? AllOf(op::Convert(original_rhs),
op::Shape(absl::Substitute(
"$0[3,2]{1,0}",
primitive_util::LowercasePrimitiveTypeName(result_type))))
: original_rhs;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
AllOf(op::Dot(upcasted_lhs, upcasted_rhs),
op::Shape(absl::Substitute(
"$0[2,2]{1,0}",
primitive_util::LowercasePrimitiveTypeName(result_type)))));
}
INSTANTIATE_TEST_SUITE_P(S16U16, OperandUpcasterTest,
::testing::Values(std::make_tuple(S8, S8, S16),
std::make_tuple(U8, U8, U16)));
INSTANTIATE_TEST_SUITE_P(S32, OperandUpcasterTest,
::testing::Combine(::testing::Values(S8, U8, S16),
::testing::Values(S8, U8, S16),
::testing::Values(S32)));
INSTANTIATE_TEST_SUITE_P(U32, OperandUpcasterTest,
::testing::Combine(::testing::Values(U8, U16),
::testing::Values(U8, U16),
::testing::Values(U32)));
INSTANTIATE_TEST_SUITE_P(BF16, OperandUpcasterTest,
::testing::Combine(::testing::Values(BF16, S8, U8),
::testing::Values(BF16, S8, U8),
::testing::Values(BF16)));
INSTANTIATE_TEST_SUITE_P(F32, OperandUpcasterTest,
::testing::Combine(::testing::Values(BF16, F16),
::testing::Values(BF16, F16),
::testing::Values(F32)));
INSTANTIATE_TEST_SUITE_P(NoUpcast, OperandUpcasterTest,
::testing::Values(std::make_tuple(F32, F32, BF16),
std::make_tuple(S32, S32, U32)));
TEST_F(OperandUpcasterTest, SparseDot) {
absl::string_view kHlo = R"(
HloModule module
ENTRY main {
p0 = bf16[2,16]{1,0} parameter(0)
p1 = bf16[32,2]{1,0} parameter(1)
meta = u16[2,2]{1,0} parameter(2)
ROOT dot = f32[2,2]{1,0} dot(p0, p1, meta),
lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHlo));
TF_ASSERT_OK_AND_ASSIGN(bool upcasted, OperandUpcaster().Run(module.get()));
EXPECT_TRUE(upcasted);
auto upcasted_lhs =
AllOf(op::Convert(op::Parameter(0)), op::Shape("f32[2,16]{1,0}"));
auto upcasted_rhs =
AllOf(op::Convert(op::Parameter(1)), op::Shape("f32[32,2]{1,0}"));
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(::testing::MakeMatcher(new ::xla::testing::HloMatcher(
HloOpcode::kDot,
{upcasted_lhs, upcasted_rhs, op::Parameter(2)})),
op::Shape("f32[2,2]{1,0}")));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/operand_upcaster.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/operand_upcaster_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b969274a-0550-41b7-9010-91181907524b | cpp | tensorflow/tensorflow | dot_merger | third_party/xla/xla/service/dot_merger.cc | third_party/xla/xla/service/dot_merger_test.cc | #include "xla/service/dot_merger.h"
#include <cstdint>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/protobuf_util.h"
#include "xla/service/graphcycles/graphcycles.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::StatusOr<HloInstruction*> TryMergeSameOperand(HloInstruction* a,
HloInstruction* b) {
if (a->shape().layout() != b->shape().layout()) {
VLOG(3) << "Can't merge dots because they have a different layout:\n"
<< "\t" << a->ToString() << "\n"
<< "\t" << b->ToString();
return nullptr;
}
if (a->operand(0) != b->operand(0) && a->operand(1) != b->operand(1)) {
VLOG(4) << "Can't merge dots because they don't share an operand.\n"
<< "\t" << a->ToString() << "\n"
<< "\t" << b->ToString();
return nullptr;
}
if (a->operand(0)->shape().element_type() !=
b->operand(0)->shape().element_type() ||
a->operand(1)->shape().element_type() !=
b->operand(1)->shape().element_type() ||
a->shape().element_type() != b->shape().element_type()) {
VLOG(3)
<< "Can't merge dots because their lhs/rhs/return-types don't match.\n"
<< "\t" << a->ToString() << "\n"
<< "\t" << b->ToString();
return nullptr;
}
const DotDimensionNumbers& dnums_a = a->dot_dimension_numbers();
const DotDimensionNumbers& dnums_b = b->dot_dimension_numbers();
if (!absl::c_equal(dnums_a.lhs_batch_dimensions(),
dnums_b.lhs_batch_dimensions()) ||
!absl::c_equal(dnums_a.rhs_batch_dimensions(),
dnums_b.rhs_batch_dimensions()) ||
!absl::c_equal(dnums_a.lhs_contracting_dimensions(),
dnums_b.lhs_contracting_dimensions()) ||
!absl::c_equal(dnums_a.rhs_contracting_dimensions(),
dnums_b.rhs_contracting_dimensions())) {
VLOG(3) << "Can't merge dots because they have mismatching dnums.\n"
<< "\t" << a->ToString() << "\n"
<< "\t" << b->ToString() << "\n"
<< absl::c_equal(dnums_a.lhs_batch_dimensions(),
dnums_b.lhs_batch_dimensions())
<< ", "
<< absl::c_equal(dnums_a.rhs_batch_dimensions(),
dnums_b.rhs_batch_dimensions())
<< ", "
<< absl::c_equal(dnums_a.lhs_contracting_dimensions(),
dnums_b.lhs_contracting_dimensions())
<< ", "
<< absl::c_equal(dnums_a.rhs_contracting_dimensions(),
dnums_b.rhs_contracting_dimensions());
return nullptr;
}
if (!absl::c_equal(a->precision_config().operand_precision(),
b->precision_config().operand_precision())) {
VLOG(3) << "Can't merge dots because they have mismatching operand "
"precisions:\n"
<< "\t" << a->ToString() << "\n"
<< "\t" << b->ToString();
return nullptr;
}
HloDotInstruction* dot_a = Cast<HloDotInstruction>(a);
HloDotInstruction* dot_b = Cast<HloDotInstruction>(b);
if (!absl::c_equal(dot_a->sparsity(), dot_b->sparsity(),
protobuf_util::ProtobufEquals)) {
VLOG(3) << "Can't merge dots because they have mismatching sparsity "
"descriptors:\n"
<< "\t" << a->ToString() << "\n"
<< "\t" << b->ToString();
return nullptr;
}
VLOG(2) << "Merging dots sharing an operand:\n"
<< "\t" << a->ToString() << "\n"
<< "\t" << b->ToString();
const DotDimensionNumbers& dnums = a->dot_dimension_numbers();
bool lhs_same = a->operand(0) == b->operand(0);
HloInstruction* shared_op = a->mutable_operand(lhs_same ? 0 : 1);
HloInstruction* diff_op_a = a->mutable_operand(lhs_same ? 1 : 0);
HloInstruction* diff_op_b = b->mutable_operand(lhs_same ? 1 : 0);
if (diff_op_a->shape().layout() != diff_op_b->shape().layout()) {
VLOG(3) << "Can't merge dots because the different operands have a "
"different layout:\n"
<< "\t" << diff_op_a->ToString() << "\n"
<< "\t" << diff_op_b->ToString();
return nullptr;
}
CHECK_EQ(dnums.lhs_batch_dimensions_size(),
dnums.rhs_batch_dimensions_size());
std::set<int64_t> used_dims;
int64_t shared_op_num_non_contracting_dims =
shared_op->shape().rank() - dnums.lhs_batch_dimensions_size();
if (lhs_same) {
shared_op_num_non_contracting_dims -=
dnums.lhs_contracting_dimensions_size();
used_dims.insert(dnums.rhs_contracting_dimensions().begin(),
dnums.rhs_contracting_dimensions().end());
used_dims.insert(dnums.rhs_batch_dimensions().begin(),
dnums.rhs_batch_dimensions().end());
} else {
shared_op_num_non_contracting_dims -=
dnums.rhs_contracting_dimensions_size();
used_dims.insert(dnums.lhs_contracting_dimensions().begin(),
dnums.lhs_contracting_dimensions().end());
used_dims.insert(dnums.lhs_batch_dimensions().begin(),
dnums.lhs_batch_dimensions().end());
}
if (used_dims.size() + 1 != diff_op_a->shape().rank()) {
VLOG(3)
<< "Can't merge dots because the different operands don't have exactly "
"one non-contracting dimension:\n"
<< "\t" << a->ToString() << "\n"
<< "\t" << b->ToString();
return nullptr;
}
int64_t outer_dim = 0;
for (auto used_dim : used_dims) {
if (used_dim != outer_dim) {
break;
}
++outer_dim;
}
std::vector<SparsityDescriptor> sparsity(dot_a->sparsity().begin(),
dot_a->sparsity().end());
std::vector<HloInstruction*> sparse_meta(sparsity.size());
for (int i = 0; i < sparsity.size(); ++i) {
HloInstruction* meta = a->mutable_operand(HloDotInstruction::kOperands + i);
HloInstruction* other_meta =
b->mutable_operand(HloDotInstruction::kOperands + i);
if (sparsity[i].index() == (lhs_same ? 1 : 0)) {
TF_ASSIGN_OR_RETURN(
Shape meta_concat_shape,
ShapeInference::InferConcatOpShape(
{&meta->shape(), &other_meta->shape()}, outer_dim));
meta = meta->AddInstruction(HloInstruction::CreateConcatenate(
meta_concat_shape, {meta, other_meta}, outer_dim));
} else {
if (other_meta != meta) {
VLOG(3)
<< "Can't merge dots because the sparsity metadata is different:\n"
<< "\t" << a->ToString() << "\n"
<< "\t" << b->ToString();
return nullptr;
}
}
sparse_meta[i] = meta;
}
TF_ASSIGN_OR_RETURN(
Shape concat_shape,
ShapeInference::InferConcatOpShape(
{&diff_op_a->shape(), &diff_op_b->shape()}, outer_dim));
*concat_shape.mutable_layout() = diff_op_a->shape().layout();
HloInstruction* concat_op =
diff_op_a->AddInstruction(HloInstruction::CreateConcatenate(
concat_shape, {diff_op_a, diff_op_b}, outer_dim));
HloInstruction* dot_lhs = lhs_same ? shared_op : concat_op;
HloInstruction* dot_rhs = lhs_same ? concat_op : shared_op;
TF_ASSIGN_OR_RETURN(
Shape new_dot_shape,
ShapeInference::InferDotOpShape(
dot_lhs->shape(), dot_rhs->shape(), dnums,
a->shape().element_type(), sparsity));
*new_dot_shape.mutable_layout() = a->shape().layout();
HloInstruction* new_dot = a->AddInstruction(
HloInstruction::CreateDot(new_dot_shape, dot_lhs, dot_rhs, dnums,
a->precision_config(), sparsity, sparse_meta));
if (!a->metadata().op_name().empty()) {
new_dot->set_metadata(a->metadata());
} else if (!b->metadata().op_name().empty()) {
new_dot->set_metadata(b->metadata());
}
DimensionVector start_indices(new_dot_shape.dimensions_size(), 0);
DimensionVector limit_indices(new_dot_shape.dimensions().begin(),
new_dot_shape.dimensions().end());
DimensionVector strides(new_dot_shape.dimensions_size(), 1);
int64_t slice_dim = new_dot_shape.dimensions_size() -
(lhs_same ? 1 : 1 + shared_op_num_non_contracting_dims);
limit_indices[slice_dim] = a->shape().dimensions(slice_dim);
HloInstruction* new_a = a->AddInstruction(HloInstruction::CreateSlice(
a->shape(), new_dot, start_indices, limit_indices, strides));
TF_RETURN_IF_ERROR(a->ReplaceAllUsesWith(new_a));
start_indices[slice_dim] = limit_indices[slice_dim];
limit_indices[slice_dim] = new_dot_shape.dimensions(slice_dim);
HloInstruction* new_b = b->AddInstruction(HloInstruction::CreateSlice(
b->shape(), new_dot, start_indices, limit_indices, strides));
TF_RETURN_IF_ERROR(b->ReplaceAllUsesWith(new_b));
return new_dot;
}
absl::StatusOr<bool> MergeDots(HloComputation* comp,
int64_t max_size_to_merge) {
auto is_merge_candidate = [&](HloInstruction* instr) {
int64_t bytes = ShapeUtil::ByteSizeOfElements(instr->shape());
for (const HloInstruction* operand : instr->operands()) {
bytes += ShapeUtil::ByteSizeOfElements(operand->shape());
}
return bytes <= max_size_to_merge;
};
absl::flat_hash_map<HloInstruction*, absl::flat_hash_set<HloInstruction*>>
equivalence_classes;
for (HloInstruction* instr : comp->instructions()) {
if (instr->opcode() != HloOpcode::kDot ||
!instr->control_predecessors().empty() ||
!instr->control_successors().empty()) {
continue;
}
for (HloInstruction* operand : instr->operands()) {
equivalence_classes[operand].insert(instr);
}
}
absl::erase_if(
equivalence_classes,
[&](const std::pair<const HloInstruction*,
absl::flat_hash_set<HloInstruction*>>& kv) {
const auto& v = kv.second;
return v.size() < 2 || absl::c_none_of(v, is_merge_candidate);
});
if (equivalence_classes.empty()) {
return false;
}
GraphCycles graph;
absl::flat_hash_map<HloInstruction*, int32_t> graph_ids_map;
auto graph_id = [&](HloInstruction* instr) {
auto it_and_inserted = graph_ids_map.emplace(instr, -1);
auto it = it_and_inserted.first;
auto inserted = it_and_inserted.second;
if (inserted) {
it->second = graph.NewNode();
}
return it->second;
};
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
int32_t id = graph_id(instr);
for (HloInstruction* operand : instr->operands()) {
CHECK(graph.InsertEdge(graph_id(operand), id));
}
for (HloInstruction* control_pred : instr->control_predecessors()) {
CHECK(graph.InsertEdge(graph_id(control_pred), id));
}
}
absl::flat_hash_set<HloInstruction*> dead_instrs;
std::vector<HloInstruction*> keys;
keys.reserve(equivalence_classes.size());
for (auto& kv : equivalence_classes) {
keys.push_back(kv.first);
}
absl::c_sort(keys, [](const HloInstruction* a, const HloInstruction* b) {
return a->unique_id() < b->unique_id();
});
for (auto key : keys) {
const auto& values = equivalence_classes[key];
absl::InlinedVector<HloInstruction*, 16> dots(values.begin(), values.end());
absl::c_sort(dots, [](const HloInstruction* a, const HloInstruction* b) {
return a->unique_id() < b->unique_id();
});
for (int64_t i = 0; i < dots.size(); i++) {
HloInstruction*& a = dots[i];
if (a == nullptr) {
continue;
}
for (int64_t j = i + 1; j < dots.size(); j++) {
HloInstruction* b = dots[j];
if (b == nullptr) {
continue;
}
int32_t a_id = graph_id(a);
int32_t b_id = graph_id(b);
if (dead_instrs.contains(a) || dead_instrs.contains(b) ||
(!is_merge_candidate(a) && !is_merge_candidate(b)) ||
graph.IsReachableNonConst(a_id, b_id) ||
graph.IsReachableNonConst(b_id, a_id)) {
continue;
}
TF_ASSIGN_OR_RETURN(HloInstruction * merged, TryMergeSameOperand(a, b));
if (merged != nullptr) {
int32_t merged_id = graph_id(merged);
graph.InsertEdge(a_id, merged_id);
graph.InsertEdge(b_id, merged_id);
for (int32_t succ : graph.SuccessorsCopy(a_id)) {
graph.InsertEdge(merged_id, succ);
}
for (int32_t succ : graph.SuccessorsCopy(b_id)) {
graph.InsertEdge(merged_id, succ);
}
dead_instrs.insert(a);
dead_instrs.insert(b);
dots[i] = merged;
dots[j] = nullptr;
}
}
}
}
for (HloInstruction* instr : dead_instrs) {
TF_RETURN_IF_ERROR(comp->RemoveInstruction(instr));
}
return !dead_instrs.empty();
}
}
absl::StatusOr<bool> DotMerger::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool changed_computation,
MergeDots(comp, max_size_to_merge_));
changed |= changed_computation;
}
return changed;
}
} | #include "xla/service/dot_merger.h"
#include <cstdint>
#include <limits>
#include <memory>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = ::xla::match;
class DotMergerTest : public HloTestBase {
public:
DotMergerTest()
: HloTestBase(false,
false) {}
};
TEST_F(DotMergerTest, MergeRHS) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs = f32[200,100] parameter(0)
rhs0 = f32[100, 10] parameter(1)
rhs1 = f32[100, 50] parameter(2)
dot0 = f32[200, 10] dot(lhs, rhs0), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot1 = f32[200, 50] dot(lhs, rhs1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[200,10], f32[200,50]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
const HloInstruction* dot0 = nullptr;
const HloInstruction* dot1 = nullptr;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Slice(m::Op(&dot0)), m::Slice(m::Op(&dot1)))));
EXPECT_EQ(dot0, dot1);
EXPECT_THAT(dot0,
GmockMatch(m::Dot(m::Parameter(0),
m::Concatenate().WithBinaryOperandsAnyOrder(
m::Parameter(1), m::Parameter(2)))));
}
TEST_F(DotMergerTest, MergeRHSWithLayouts) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs = f32[200,100] parameter(0)
rhs0 = f32[100, 10]{0,1} parameter(1)
rhs1 = f32[100, 50]{0,1} parameter(2)
dot0 = f32[200, 10] dot(lhs, rhs0), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot1 = f32[200, 50] dot(lhs, rhs1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[200,10], f32[200,50]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
const HloInstruction* dot0 = nullptr;
const HloInstruction* dot1 = nullptr;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Slice(m::Op(&dot0)), m::Slice(m::Op(&dot1)))));
EXPECT_EQ(dot0, dot1);
Shape expected_concat_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {100, 60}, {0, 1});
EXPECT_THAT(
dot0, GmockMatch(m::Dot(m::Parameter(0),
m::Concatenate()
.WithBinaryOperandsAnyOrder(m::Parameter(1),
m::Parameter(2))
.WithShapeEqualTo(&expected_concat_shape))));
}
TEST_F(DotMergerTest, NoMergeDifferentLayoutRHS) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs = f32[200,100] parameter(0)
rhs0 = f32[100, 10]{0,1} parameter(1)
rhs1 = f32[100, 50]{1,0} parameter(2)
dot0 = f32[200, 10] dot(lhs, rhs0), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot1 = f32[200, 50] dot(lhs, rhs1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[200,10], f32[200,50]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, MergeLHS) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[100,200] parameter(0)
lhs1 = f32[300,200] parameter(1)
rhs = f32[200, 50] parameter(2)
dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[100,50], f32[300,50]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Slice(), m::Slice())));
}
TEST_F(DotMergerTest, MergeLHSDotsWithNonDefaultLayout) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[100,200] parameter(0)
lhs1 = f32[300,200] parameter(1)
rhs = f32[200, 50] parameter(2)
dot0 = f32[100, 50]{0,1} dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot1 = f32[300, 50]{0,1} dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[100,50]{0,1}, f32[300,50]{0,1}) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
Shape expected_dot_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {400, 50}, {0, 1});
const HloInstruction* dot0 = nullptr;
const HloInstruction* dot1 = nullptr;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Slice(m::Dot(&dot0, m::Op(), m::Op())
.WithShapeEqualTo(&expected_dot_shape)),
m::Slice(m::Dot(&dot1, m::Op(), m::Op())))));
EXPECT_EQ(dot0, dot1);
}
TEST_F(DotMergerTest, NoMergeDifferentLayoutLHS) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[100,200]{1,0} parameter(0)
lhs1 = f32[300,200]{0,1} parameter(1)
rhs = f32[200, 50] parameter(2)
dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[100,50], f32[300,50]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, NoMergeDifferentDotLayout) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[100,200] parameter(0)
lhs1 = f32[300,200] parameter(1)
rhs = f32[200, 50] parameter(2)
dot0 = f32[100, 50]{0,1} dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot1 = f32[300, 50]{1,0} dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[100,50]{0,1}, f32[300,50]{1,0}) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, MergeThree) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[100,200] parameter(0)
lhs1 = f32[300,200] parameter(1)
lhs2 = f32[500,200] parameter(2)
rhs = f32[200, 50] parameter(3)
dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot2 = f32[500, 50] dot(lhs2, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[100,50], f32[300,50], f32[500,50]) tuple(dot0, dot1, dot2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
AlgebraicSimplifier algsimp{AlgebraicSimplifierOptions{}};
TF_ASSERT_OK(this->RunHloPass(&algsimp, module.get()).status());
const HloInstruction* s0 = nullptr;
const HloInstruction* s1 = nullptr;
const HloInstruction* s2 = nullptr;
SCOPED_TRACE(module->ToString());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Slice(m::Dot(
&s0,
m::Concatenate(m::Parameter(0), m::Parameter(1), m::Parameter(2)),
m::Parameter(3))),
m::Slice(m::Op(&s1)), m::Slice(m::Op(&s2)))));
EXPECT_EQ(s0, s1);
EXPECT_EQ(s1, s2);
}
TEST_F(DotMergerTest, NoMergeThreeDueToCycle) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[100,200] parameter(0)
lhs1 = f32[300,200] parameter(1)
rhs = f32[200, 50] parameter(2)
dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
zero = f32[] constant(0)
lhs2 = f32[500,200] pad(dot0, zero), padding=400_0x150_0
dot2 = f32[500, 50] dot(lhs2, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[100,50], f32[300,50], f32[500,50]) tuple(dot0, dot1, dot2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
AlgebraicSimplifier algsimp{AlgebraicSimplifierOptions{}};
TF_ASSERT_OK(this->RunHloPass(&algsimp, module.get()).status());
const HloInstruction* s0 = nullptr;
const HloInstruction* s1 = nullptr;
const HloInstruction* s2 = nullptr;
SCOPED_TRACE(module->ToString());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Slice(m::Dot(&s0, m::Concatenate(m::Parameter(0), m::Parameter(1)),
m::Parameter(2))),
m::Slice(m::Op(&s1)),
m::Dot(&s2, m::Op(), m::Parameter(2)))));
EXPECT_EQ(s0, s1);
EXPECT_NE(s0, s2);
}
TEST_F(DotMergerTest, NoMergeDataDependency) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[100,200] parameter(0)
rhs = f32[200, 50] parameter(1)
dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
zero = f32[] constant(0)
lhs1 = f32[300,200] pad(dot0, zero), padding=200_0x150_0
dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[100,50], f32[300,50]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, MergeSameContractingDimsOnBothSides) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[100,200] parameter(0)
lhs1 = f32[300,200] parameter(1)
rhs = f32[50, 200] parameter(2)
dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={1}
dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={1}
ROOT tuple = (f32[100,50], f32[300,50]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Slice(), m::Slice())));
}
TEST_F(DotMergerTest, MergeWithBatchDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[2,4,100,200] parameter(0)
lhs1 = f32[2,4,300,200] parameter(1)
rhs = f32[2,4,200, 50] parameter(2)
dot0 = f32[2,4,100, 50] dot(lhs0, rhs), lhs_batch_dims={0,1}, rhs_batch_dims={0,1},
lhs_contracting_dims={3}, rhs_contracting_dims={2}
dot1 = f32[2,4,300, 50] dot(lhs1, rhs), lhs_batch_dims={0,1}, rhs_batch_dims={0,1},
lhs_contracting_dims={3}, rhs_contracting_dims={2}
ROOT tuple = (f32[2,4,100,50], f32[2,4,300,50]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Slice(), m::Slice())));
}
TEST_F(DotMergerTest, MergeWithBatchDimsAndMultipleContractingDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs = f32[2,3,4,5] parameter(0)
rhs0 = f32[2,6,3,4,5] parameter(1)
rhs1 = f32[2,7,3,4,5] parameter(2)
dot0 = f32[2,4,6] dot(lhs, rhs0), lhs_batch_dims={0,2}, rhs_batch_dims={0,3},
lhs_contracting_dims={1,3}, rhs_contracting_dims={2,4}
dot1 = f32[2,4,7] dot(lhs, rhs1), lhs_batch_dims={0,2}, rhs_batch_dims={0,3},
lhs_contracting_dims={1,3}, rhs_contracting_dims={2,4}
ROOT tuple = (f32[2,4,6], f32[2,4,7]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
TF_ASSERT_OK(verifier().Run(module.get()).status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Slice(), m::Slice())));
}
TEST_F(DotMergerTest, MergeWithUnsortedBatchDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[2,4,100,200] parameter(0)
lhs1 = f32[2,4,300,200] parameter(1)
rhs = f32[2,4,200, 50] parameter(2)
dot0 = f32[4,2,100, 50] dot(lhs0, rhs), lhs_batch_dims={1,0}, rhs_batch_dims={1,0},
lhs_contracting_dims={3}, rhs_contracting_dims={2}
dot1 = f32[4,2,300, 50] dot(lhs1, rhs), lhs_batch_dims={1,0}, rhs_batch_dims={1,0},
lhs_contracting_dims={3}, rhs_contracting_dims={2}
ROOT tuple = (f32[4,2,100,50], f32[4,2,300,50]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Slice(), m::Slice())));
}
TEST_F(DotMergerTest, NoMergeDueToIsMergeCandidate) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[100,200] parameter(0)
lhs1 = f32[300,200] parameter(1)
lhs2 = f32[500,200] parameter(2)
rhs = f32[200, 50] parameter(3)
dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot2 = f32[500, 50] dot(lhs2, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[100,50], f32[300,50], f32[500,50]) tuple(dot0, dot1, dot2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass((100 * 50 + 100 * 200 + 200 * 50) *
sizeof(float));
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
const HloInstruction* s0 = nullptr;
const HloInstruction* s1 = nullptr;
const HloInstruction* s2 = nullptr;
SCOPED_TRACE(module->ToString());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Slice(m::Dot(&s0, m::Concatenate(m::Parameter(0), m::Parameter(1)),
m::Parameter(3))),
m::Slice(m::Op(&s1)),
m::Dot(&s2, m::Parameter(2), m::Parameter(3)))));
EXPECT_EQ(s0, s1);
EXPECT_NE(s0, s2);
}
TEST_F(DotMergerTest, NoMergeDifferentLhsBatchDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[10,10,10,10] parameter(0)
lhs1 = f32[10,10,10,10] parameter(1)
rhs = f32[10,10,10,10] parameter(2)
dot0 = f32[10,10,10,10] dot(lhs0, rhs), lhs_batch_dims={0,1}, rhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_contracting_dims={2}
dot1 = f32[10,10,10,10] dot(lhs1, rhs), lhs_batch_dims={0,2}, rhs_batch_dims={0,1}, lhs_contracting_dims={1}, rhs_contracting_dims={2}
ROOT tuple = (f32[10,10,10,10], f32[10,10,10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, NoMergeDifferentRhsBatchDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[10,10,10,10] parameter(0)
lhs1 = f32[10,10,10,10] parameter(1)
rhs = f32[10,10,10,10] parameter(2)
dot0 = f32[10,10,10,10] dot(lhs0, rhs), lhs_batch_dims={0,1}, rhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_contracting_dims={2}
dot1 = f32[10,10,10,10] dot(lhs1, rhs), lhs_batch_dims={0,1}, rhs_batch_dims={0,2}, lhs_contracting_dims={2}, rhs_contracting_dims={1}
ROOT tuple = (f32[10,10,10,10], f32[10,10,10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, MergeMultipleContractingDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[10,10,10] parameter(0)
lhs1 = f32[10,10,10] parameter(1)
rhs = f32[10,10,10] parameter(2)
dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}
dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}
ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
const HloInstruction* s0 = nullptr;
const HloInstruction* s1 = nullptr;
SCOPED_TRACE(module->ToString());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Slice(m::Dot(&s0, m::Concatenate(m::Parameter(0), m::Parameter(1)),
m::Parameter(2))),
m::Slice(m::Op(&s1)))));
EXPECT_EQ(s0, s1);
}
TEST_F(DotMergerTest, MergeMultipleNonContractingDimsInRhsSharedOperand) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[8,9,10] parameter(0)
lhs1 = f32[8,9,11] parameter(1)
rhs = f32[8,9,12,13] parameter(2)
dot0 = f32[10,12,13] dot(lhs0, rhs), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}
dot1 = f32[11,12,13] dot(lhs1, rhs), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}
ROOT tuple = (f32[10,12,13], f32[11,12,13]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
TF_ASSERT_OK(verifier().Run(module.get()).status());
const HloInstruction* s0 = nullptr;
const HloInstruction* s1 = nullptr;
SCOPED_TRACE(module->ToString());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Slice(m::Dot(&s0, m::Concatenate(m::Parameter(0), m::Parameter(1)),
m::Parameter(2))),
m::Slice(m::Op(&s1)))));
EXPECT_EQ(s0, s1);
}
TEST_F(DotMergerTest, NoMergeMultipleOuterDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[10,10,10] parameter(0)
lhs1 = f32[10,10,10] parameter(1)
rhs = f32[10,10,10] parameter(2)
dot0 = f32[10,10,10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
dot1 = f32[10,10,10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
ROOT tuple = (f32[10,10,10,10], f32[10,10,10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, NoMergeDifferentLhsContractingDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[10,10] parameter(0)
lhs1 = f32[10,10] parameter(1)
rhs = f32[10,10] parameter(2)
dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, NoMergeDifferentRhsContractingDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[10,10] parameter(0)
lhs1 = f32[10,10] parameter(1)
rhs = f32[10,10] parameter(2)
dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={1}
ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, NoMergeControlPredecessor) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[10,10] parameter(0)
lhs1 = f32[10,10] parameter(1)
rhs = f32[10,10] parameter(2)
dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
dot2 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}, control-predecessors={dot1}
ROOT tuple = (f32[10,10], f32[10,10], f32[10,10]) tuple(dot0, dot1, dot2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, NoMergeDifferentLhsTypes) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[10,10] parameter(0)
lhs1 = f16[10,10] parameter(1)
rhs = f32[10,10] parameter(2)
dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, NoMergeDifferentRhsTypes) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs = f32[10,10] parameter(0)
rhs0 = f32[10,10] parameter(1)
rhs1 = f16[10,10] parameter(2)
dot0 = f32[10,10] dot(lhs, rhs0), lhs_contracting_dims={0}, rhs_contracting_dims={0}
dot1 = f32[10,10] dot(lhs, rhs1), lhs_contracting_dims={0}, rhs_contracting_dims={0}
ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, NoMergeDifferentReturnTypes) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f16[10,10] parameter(0)
lhs1 = f16[10,10] parameter(1)
rhs = f16[10,10] parameter(2)
dot0 = f16[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
ROOT tuple = (f16[10,10], f32[10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, MergeWithTypeUpgrade) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f16[10,10] parameter(0)
lhs1 = f16[10,10] parameter(1)
rhs = f16[10,10] parameter(2)
dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* d0 = nullptr;
const HloInstruction* d1 = nullptr;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Slice(m::Dot(&d0, m::Concatenate(m::Parameter(0), m::Parameter(1)),
m::Parameter(2))
.WithShape(F32, {20, 10})),
m::Slice(m::Op(&d1)))));
EXPECT_EQ(d0, d1);
}
TEST_F(DotMergerTest, MergeSparseDotsSameMetadata) {
absl::string_view kHlo = R"(
HloModule test
ENTRY main {
lhs0 = f16[5,10,32] parameter(0)
lhs1 = f16[5,10,32] parameter(1)
rhs = f16[5,10,16] parameter(2)
meta = u16[5,10,2] parameter(3)
dot0 = f32[5,10,10] dot(lhs0, rhs, meta), sparsity=R.2@2:4,
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}
dot1 = f32[5,10,10] dot(lhs1, rhs, meta), sparsity=R.2@2:4,
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}
ROOT tuple = (f32[5,10,10], f32[5,10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHlo));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
const HloInstruction *d0, *d1;
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Slice(m::Op(&d0)
.WithOpcode(HloOpcode::kDot)
.WithOperand(0, m::Concatenate(m::Parameter(0),
m::Parameter(1)))
.WithOperand(1, m::Parameter(2))
.WithOperand(2, m::Parameter(3))
.WithShape(F32, {5, 20, 10})),
m::Slice(m::Op(&d1)))));
EXPECT_EQ(d0, d1);
EXPECT_EQ(d0->operand(2)->shape(), ShapeUtil::MakeShape(U16, {5, 10, 2}));
}
TEST_F(DotMergerTest, MergeSparseDotsConcatMetadata) {
absl::string_view kHlo = R"(
HloModule test
ENTRY main {
lhs0 = f16[5,10,16] parameter(0)
lhs1 = f16[5,10,16] parameter(1)
rhs = f16[5,10,32] parameter(2)
meta0 = u16[5,10,2] parameter(3)
meta1 = u16[5,10,2] parameter(4)
dot0 = f32[5,10,10] dot(lhs0, rhs, meta0), sparsity=L.2@2:4,
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}
dot1 = f32[5,10,10] dot(lhs1, rhs, meta1), sparsity=L.2@2:4,
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}
ROOT tuple = (f32[5,10,10], f32[5,10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHlo));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
const HloInstruction *d0, *d1;
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Slice(m::Op(&d0)
.WithOpcode(HloOpcode::kDot)
.WithOperand(0, m::Concatenate(m::Parameter(0),
m::Parameter(1)))
.WithOperand(1, m::Parameter(2))
.WithOperand(2, m::Concatenate(m::Parameter(3),
m::Parameter(4)))
.WithShape(F32, {5, 20, 10})),
m::Slice(m::Op(&d1)))));
EXPECT_EQ(d0, d1);
EXPECT_EQ(d0->operand(2)->shape(), ShapeUtil::MakeShape(U16, {5, 20, 2}));
}
TEST_F(DotMergerTest, MergeSparseDotsDifferentMetadata) {
absl::string_view kHlo = R"(
HloModule test
ENTRY main {
lhs0 = f16[5,10,32] parameter(0)
lhs1 = f16[5,10,32] parameter(1)
rhs = f16[5,10,16] parameter(2)
meta1 = u16[5,10,2] parameter(3)
meta2 = u16[5,10,2] parameter(4)
dot0 = f32[5,10,10] dot(lhs0, rhs, meta1), sparsity=R.2@2:4,
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}
dot1 = f32[5,10,10] dot(lhs1, rhs, meta2), sparsity=R.2@2:4,
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}
ROOT tuple = (f32[5,10,10], f32[5,10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHlo));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dot_merger.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dot_merger_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bcbda2a7-93ed-47d7-b14f-5c2b1c80a977 | cpp | tensorflow/tensorflow | host_offloading_prepare | third_party/xla/xla/service/host_offloading_prepare.cc | third_party/xla/xla/service/host_offloading_prepare_test.cc | #include "xla/service/host_offloading_prepare.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/call_graph.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using xla::host_memory_offload_annotations::kMoveToHostCustomCallTarget;
bool IsHostAsyncStart(const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kAsyncStart &&
instruction->async_execution_thread() == HloInstruction::kHostThread &&
instruction->async_wrapped_instruction()->opcode() == HloOpcode::kCall;
}
absl::StatusOr<bool> RemoveSurroundingMoveCustomCalls(
HloInstruction* async_start) {
bool removed = false;
for (HloInstruction* operand : async_start->operands()) {
if (operand->IsCustomCall(kMoveToHostCustomCallTarget)) {
CHECK_EQ(operand->operands().size(), 1);
VLOG(1) << "Replacing " << operand->ToString() << " with "
<< operand->operands().at(0)->ToString();
TF_RETURN_IF_ERROR(
operand->ReplaceAllUsesWith(operand->mutable_operand(0)));
TF_RETURN_IF_ERROR(async_start->parent()->RemoveInstruction(operand));
removed = true;
}
}
return removed;
}
absl::StatusOr<bool> ElideMoveCustomCalls(HloModule* module) {
bool changed = false;
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
for (HloComputation* computation : module->computations()) {
if (computation->execution_thread() != HloInstruction::kHostThread) {
continue;
}
std::vector<HloInstruction*> callers =
call_graph->GetComputationCallers(computation);
for (HloInstruction* caller : callers) {
VLOG(2) << "Hlo computation " << computation->name()
<< " is offloaded to host and has caller " << caller->ToString();
if (caller->parent()->execution_thread() == HloInstruction::kHostThread) {
VLOG(3) << "Nested host computation, must be a async-wrapper";
continue;
}
VLOG(2) << "Going to adjust before and after " << caller->name();
}
}
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (IsHostAsyncStart(instruction)) {
VLOG(2) << "Found async start of host computation: "
<< instruction->ToString() << " done must be "
<< instruction->users().at(0)->ToString();
TF_ASSIGN_OR_RETURN(bool removed,
RemoveSurroundingMoveCustomCalls(instruction));
changed = changed || removed;
}
}
}
return changed;
}
absl::StatusOr<bool> ConvertToCustomCall(HloModule* module) {
bool changed = false;
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (IsHostAsyncStart(instruction)) {
auto* call_start = Cast<HloAsyncInstruction>(instruction);
auto* call = call_start->async_wrapped_instruction();
auto custom_call = HloInstruction::CreateCustomCall(
call->shape(), call->operands(), call->called_computations().at(0),
"HostExecute");
custom_call->set_output_to_operand_aliasing(
call->output_operand_aliasing());
HloComputation* async_computation =
call_start->async_wrapped_computation();
async_computation->set_root_instruction(
async_computation->AddInstruction(std::move(custom_call)));
TF_RETURN_IF_ERROR(async_computation->RemoveInstruction(call));
changed = true;
}
}
}
return changed;
}
}
absl::StatusOr<bool> HostOffloadingPrepare::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
switch (rewrite_) {
case Rewrite::kElideMoveToHost:
return ElideMoveCustomCalls(module);
case Rewrite::kConvertToCustomCall:
return ConvertToCustomCall(module);
}
}
} | #include "xla/service/host_offloading_prepare.h"
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using Rewrite = HostOffloadingPrepare::Rewrite;
class HostOffloadingPrepareTest : public HloTestBase {
protected:
absl::StatusOr<bool> RunRewrite(HloModule* module, Rewrite rewrite) {
TF_EXPECT_OK(verifier().Run(module).status());
if (module->has_schedule()) {
return absl::InternalError("Expected a non-scheduled module");
}
HostOffloadingPrepare pass(rewrite);
TF_ASSIGN_OR_RETURN(bool changed, pass.Run(module));
return changed;
}
std::vector<const HloInstruction*> GetHostOffloadAsyncStartInstructions(
const HloModule* module) {
std::vector<const HloInstruction*> result;
for (const HloComputation* computation : module->computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kAsyncStart &&
instruction->async_execution_thread() ==
HloInstruction::kHostThread) {
result.push_back(instruction);
}
}
}
return result;
}
};
TEST_F(HostOffloadingPrepareTest, SingleInputHasMoveToHost) {
const std::string& hlo_string = R"(
HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}
host_computation {
Arg_0.0 = s32[32]{0} parameter(0)
ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.0)
}, execution_thread="host"
async_computation {
param_0 = s32[32]{0} parameter(0)
ROOT call = s32[32]{0} call(param_0), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"}
}, execution_thread="host"
ENTRY main {
Arg_0.1 = s32[32]{0:T(128)} parameter(0)
constant.2 = s32[]{:T(128)} constant(2)
broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}
multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)
move_to_host = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToHost"
start = ((s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_host), async_execution_thread="host", calls=async_computation
ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type="host"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunRewrite(module.get(), Rewrite::kElideMoveToHost));
EXPECT_TRUE(changed);
for (const HloInstruction* instruction :
GetHostOffloadAsyncStartInstructions(module.get())) {
for (const HloInstruction* operand : instruction->operands()) {
EXPECT_FALSE(operand->IsCustomCall(
{host_memory_offload_annotations::kMoveToHostCustomCallTarget}));
}
for (const HloInstruction* user : instruction->users()) {
EXPECT_FALSE(user->IsCustomCall(
{host_memory_offload_annotations::kMoveToDeviceCustomCallTarget}));
}
}
}
TEST_F(HostOffloadingPrepareTest, MultipleInputHasOneMoveToHost) {
const std::string& hlo_string = R"(
HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}
host_computation {
Arg_0.0 = s32[32]{0} parameter(0)
Arg_0.1 = s32[32]{0} parameter(1)
ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.1)
}, execution_thread="host"
async_computation {
param_0 = s32[32]{0} parameter(0)
param_1 = s32[32]{0} parameter(1)
ROOT call = s32[32]{0} call(param_0, param_1), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"}
}, execution_thread="host"
ENTRY main {
Arg_0.1 = s32[32]{0:T(128)} parameter(0)
constant.2 = s32[]{:T(128)} constant(2)
broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}
multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)
move_to_host = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToHost"
start = ((s32[32]{0:T(128)}, s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_host, move_to_host), async_execution_thread="host", calls=async_computation
ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type="host"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunRewrite(module.get(), Rewrite::kElideMoveToHost));
EXPECT_TRUE(changed);
for (const HloInstruction* instruction :
GetHostOffloadAsyncStartInstructions(module.get())) {
for (const HloInstruction* operand : instruction->operands()) {
EXPECT_FALSE(operand->IsCustomCall(
{host_memory_offload_annotations::kMoveToHostCustomCallTarget}));
}
for (const HloInstruction* user : instruction->users()) {
EXPECT_FALSE(user->IsCustomCall(
{host_memory_offload_annotations::kMoveToDeviceCustomCallTarget}));
}
}
}
TEST_F(HostOffloadingPrepareTest, MultipleInputHasMultipleMoveToHost) {
const std::string& hlo_string = R"(
HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}
host_computation {
Arg_0.0 = s32[32]{0} parameter(0)
Arg_0.1 = s32[32]{0} parameter(1)
ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.1)
}, execution_thread="host"
async_computation {
param_0 = s32[32]{0} parameter(0)
param_1 = s32[32]{0} parameter(1)
ROOT call = s32[32]{0} call(param_0, param_1), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"}
}, execution_thread="host"
ENTRY main {
Arg_0.1 = s32[32]{0:T(128)} parameter(0)
constant.2 = s32[]{:T(128)} constant(2)
broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}
multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)
move_to_host.1 = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToHost"
move_to_host.2 = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToHost"
start = ((s32[32]{0:T(128)}, s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_host.1, move_to_host.2), async_execution_thread="host", calls=async_computation
ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type="host"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunRewrite(module.get(), Rewrite::kElideMoveToHost));
EXPECT_TRUE(changed);
for (const HloInstruction* instruction :
GetHostOffloadAsyncStartInstructions(module.get())) {
for (const HloInstruction* operand : instruction->operands()) {
EXPECT_FALSE(operand->IsCustomCall(
{host_memory_offload_annotations::kMoveToHostCustomCallTarget}));
}
for (const HloInstruction* user : instruction->users()) {
EXPECT_FALSE(user->IsCustomCall(
{host_memory_offload_annotations::kMoveToDeviceCustomCallTarget}));
}
}
}
TEST_F(HostOffloadingPrepareTest, SingleInputHasMoveToDevice) {
const std::string& hlo_string = R"(
HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}
host_computation {
Arg_0.0 = s32[32]{0} parameter(0)
ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.0)
}, execution_thread="host"
async_computation {
param_0 = s32[32]{0} parameter(0)
ROOT call = s32[32]{0} call(param_0), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"}
}, execution_thread="host"
ENTRY main {
Arg_0.1 = s32[32]{0:T(128)} parameter(0)
constant.2 = s32[]{:T(128)} constant(2)
broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}
multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)
move_to_device = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToDevice"
start = ((s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_device), async_execution_thread="host", calls=async_computation
ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type="host"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunRewrite(module.get(), Rewrite::kElideMoveToHost));
EXPECT_FALSE(changed);
}
TEST_F(HostOffloadingPrepareTest, MultipleInputHasOneMoveToDevice) {
const std::string& hlo_string = R"(
HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}
host_computation {
Arg_0.0 = s32[32]{0} parameter(0)
Arg_0.1 = s32[32]{0} parameter(1)
ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.1)
}, execution_thread="host"
async_computation {
param_0 = s32[32]{0} parameter(0)
param_1 = s32[32]{0} parameter(1)
ROOT call = s32[32]{0} call(param_0, param_1), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"}
}, execution_thread="host"
ENTRY main {
Arg_0.1 = s32[32]{0:T(128)} parameter(0)
constant.2 = s32[]{:T(128)} constant(2)
broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}
multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)
move_to_device = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToDevice"
custom-call.cloned.call-start = ((s32[32]{0:T(128)}, s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_device, move_to_device), async_execution_thread="host", calls=async_computation
ROOT custom-call.cloned.call-done = s32[32]{0:T(128)} async-done(custom-call.cloned.call-start), frontend_attributes={_xla_compute_type="host"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunRewrite(module.get(), Rewrite::kElideMoveToHost));
EXPECT_FALSE(changed);
}
TEST_F(HostOffloadingPrepareTest, MultipleInputHasMultipleMoveToDevice) {
const std::string& hlo_string = R"(
HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}
host_computation {
Arg_0.0 = s32[32]{0} parameter(0)
Arg_0.1 = s32[32]{0} parameter(1)
ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.1)
}, execution_thread="host"
async_computation {
param_0 = s32[32]{0} parameter(0)
param_1 = s32[32]{0} parameter(1)
ROOT call = s32[32]{0} call(param_0, param_1), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"}
}, execution_thread="host"
ENTRY main {
Arg_0.1 = s32[32]{0:T(128)} parameter(0)
constant.2 = s32[]{:T(128)} constant(2)
broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}
multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)
move_to_device.1 = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToDevice"
move_to_device.2 = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToDevice"
start = ((s32[32]{0:T(128)}, s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_device.1, move_to_device.2), async_execution_thread="host", calls=async_computation
ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type="host"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunRewrite(module.get(), Rewrite::kElideMoveToHost));
EXPECT_FALSE(changed);
}
TEST_F(HostOffloadingPrepareTest, ConvertToCustomCall) {
const char* hlo = R"(
HloModule my_module
host_computation {
Arg_0.0 = s32[32] parameter(0)
ROOT multiply.0 = s32[32] multiply(Arg_0.0, Arg_0.0)
}, execution_thread="host"
async_computation {
param_0 = s32[32] parameter(0)
ROOT call = s32[32] call(param_0), to_apply=host_computation
}, execution_thread="host"
ENTRY main {
Arg_0.1 = s32[32] parameter(0)
start = ((s32[32]), s32[32], u32[]) async-start(Arg_0.1),
async_execution_thread="host", calls=async_computation
ROOT done = s32[32] async-done(start)
}
)";
const char* expected = R"(
)";
RunAndFilecheckHloRewrite(
hlo, HostOffloadingPrepare(Rewrite::kConvertToCustomCall), expected);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_offloading_prepare.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_offloading_prepare_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b9f15842-1826-48a4-bfa0-e2fbdeeca04f | cpp | tensorflow/tensorflow | convert_async_collectives_to_sync | third_party/xla/xla/service/gpu/transforms/convert_async_collectives_to_sync.cc | third_party/xla/xla/service/gpu/transforms/convert_async_collectives_to_sync_test.cc | #include "xla/service/gpu/transforms/convert_async_collectives_to_sync.h"
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
absl::Status GpuConvertAsyncCollectivesToSync::ConvertAsyncInstructionsToSync(
HloComputation* computation,
absl::Span<const std::pair<HloInstruction*, HloInstruction*>> async_pairs)
const {
absl::flat_hash_map<HloInstruction*, HloInstruction*> replaced_ops;
CollectiveBackendConfig sync_config;
sync_config.set_is_sync(true);
for (auto& [async_start, async_done] : async_pairs) {
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
async_start->backend_config<GpuBackendConfig>());
*gpu_config.mutable_collective_backend_config() = sync_config;
TF_RETURN_IF_ERROR(async_start->set_backend_config(gpu_config));
replaced_ops[async_start] = nullptr;
replaced_ops[async_done] = async_start;
}
HloModule* module = computation->parent();
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
std::vector<HloInstruction*> new_sequence;
new_sequence.reserve(sequence.size());
for (HloInstruction* instr : sequence.instructions()) {
auto it = replaced_ops.find(instr);
if (it == replaced_ops.end()) {
new_sequence.push_back(instr);
continue;
}
if (it->second == nullptr) {
continue;
}
new_sequence.push_back(it->second);
new_sequence.push_back(instr);
}
module->schedule().set_sequence(computation, new_sequence);
return absl::OkStatus();
}
}
} | #include "xla/service/gpu/transforms/convert_async_collectives_to_sync.h"
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::IsFalse;
using ::testing::IsTrue;
class GpuConvertAsyncCollectivesToSyncTest : public HloTestBase {
public:
absl::Status RunPass(HloModule *module, bool expect_change,
HloPredicate is_nop = {}) {
TF_ASSIGN_OR_RETURN(bool changed,
GpuConvertAsyncCollectivesToSync{is_nop}.Run(module));
EXPECT_EQ(changed, expect_change);
return absl::OkStatus();
}
bool IsSync(HloModule *module, std::string_view name) {
const HloInstruction *inst = FindInstruction(module, name);
if (inst == nullptr) {
return false;
}
auto backend_config = inst->backend_config<GpuBackendConfig>()
.value()
.collective_backend_config();
return backend_config.is_sync();
}
HloPredicate is_nop_simple_ =
HloPredicateIsOp<HloOpcode::kBitcast, HloOpcode::kGetTupleElement,
HloOpcode::kParameter>;
};
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllReduce) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
ROOT done = u32[] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllReduceWithNop) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3, replica_groups={{0,1}, {2,3}}
id2 = f32[] bitcast(id)
ROOT done = u32[] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true, is_nop_simple_));
EXPECT_THAT(IsSync(module.get(), "start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleCollectiveBroadcast) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
collective_broadcast {
p0 = u32[8] parameter(0)
ROOT result = u32[8] collective-broadcast(p0), replica_groups={{0,1}, {2,3}}
}
ENTRY main {
data = u32[8] parameter(0)
cb-start = ((u32[8]{0}), u32[8]{0}) async-start(u32[8]{0} %data), calls=collective_broadcast
ROOT %ars = u32[8]{0} async-done(((u32[8]{0}), u32[8]{0}) %cb-start), calls=collective_broadcast
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "cb-start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllReduceWithNonNop) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
id2 = u32[] add(id, id)
ROOT done = u32[] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), false));
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllGather) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
ENTRY test_computation {
a1 = u32[1, 2] parameter(0)
ags = (u32[1, 2], u32[2, 2]) all-gather-start(a1), dimensions={0}, channel_id=3
ROOT allgather = u32[2,2] all-gather-done(ags)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "ags"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleCollectivePermute) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
ENTRY test_computation {
p = u32[2] parameter(0)
start = (u32[2], u32[2], u32[], u32[]) collective-permute-start(p), source_target_pairs={{0,1}, {1,0}}
ROOT done = u32[2] collective-permute-done(start)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleReduceScatter) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
add {
lhs = u32[] parameter(0)
rhs = u32[] parameter(1)
ROOT add = u32[] add(lhs, rhs)
}
reduce_scatter {
p0 = u32[8] parameter(0)
ROOT result = u32[4] reduce-scatter(p0), replica_groups={{0,3}, {1,2}},
dimensions={0}, to_apply=add
}
ENTRY main {
data = u32[8] parameter(0)
rs-start = ((u32[8]{0}), u32[4]{0}) async-start(u32[8]{0} %data), calls=reduce_scatter
ROOT %ars = u32[4]{0} async-done(((u32[8]{0}), u32[4]{0}) %rs-start), calls=reduce_scatter
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "rs-start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllToAll) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
all_to_all {
p0 = u32[2] parameter(0)
ROOT result = u32[2] all-to-all(p0), dimensions={0}, replica_groups={{0,1},{2,3}}
}
ENTRY test_computation {
a1 = u32[2] parameter(0)
a2a-start = ((u32[2]), u32[2]) async-start(u32[2] a1), calls=all_to_all
ROOT a2s = u32[2] async-done(a2a-start), calls=all_to_all
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "a2a-start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, ControlDeps) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
done1 = u32[] all-reduce-done(start1)
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4, control-predecessors={done1}
done2 = u32[] all-reduce-done(start2)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start1"), IsTrue());
EXPECT_THAT(IsSync(module.get(), "start2"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, MultipleInFlightStreaming) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4
done1 = u32[] all-reduce-done(start1)
done2 = u32[] all-reduce-done(start2)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start1"), IsTrue());
EXPECT_THAT(IsSync(module.get(), "start2"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, MultipleInFlightNested) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4
done2 = u32[] all-reduce-done(start2)
done1 = u32[] all-reduce-done(start1)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start1"), IsTrue());
EXPECT_THAT(IsSync(module.get(), "start2"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, MultipleInFlightNestedPartial) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4
done2 = u32[] all-reduce-done(start2)
id2 = u32[] add(done2, done2)
done1 = u32[] all-reduce-done(start1)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start1"), IsFalse());
EXPECT_THAT(IsSync(module.get(), "start2"), IsTrue());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/convert_async_collectives_to_sync.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/convert_async_collectives_to_sync_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
13af8cfa-a80f-4abd-b903-d4e5aa058635 | cpp | tensorflow/tensorflow | while_loop_trip_count_annotator | third_party/xla/xla/service/while_loop_trip_count_annotator.cc | third_party/xla/xla/service/while_loop_trip_count_annotator_test.cc | #include "xla/service/while_loop_trip_count_annotator.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/while_loop_analysis.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
absl::StatusOr<bool> WhileLoopTripCountAnnotator::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (const HloComputation* comp : module->computations(execution_threads)) {
for (HloInstruction* instr : comp->instructions()) {
if (instr->opcode() != HloOpcode::kWhile) {
continue;
}
if (auto trip_count = ComputeWhileLoopTripCount(instr)) {
WhileLoopBackendConfig config;
config.mutable_known_trip_count()->set_n(*trip_count);
TF_RETURN_IF_ERROR(instr->set_backend_config(config));
changed = true;
}
}
}
return changed;
}
} | #include "xla/service/while_loop_trip_count_annotator.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class TripCountAnnotatorTest : public HloTestBase {};
TEST_F(TripCountAnnotatorTest, KnownSmallTripCount) {
const char* kModuleStr = R"(
HloModule test
Body {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
one = s32[] constant(1)
i_plus_one = s32[] add(i, one)
ROOT tuple = (s32[]) tuple(i_plus_one)
}
Cond {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(i, trip_count), direction=LT
}
ENTRY test {
i_start = s32[] constant(0)
initial_tuple = (s32[]) tuple(i_start)
ROOT while = (s32[]) while(initial_tuple), condition=Cond, body=Body
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
WhileLoopTripCountAnnotator pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get()));
ASSERT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(auto config,
m->entry_computation()
->root_instruction()
->backend_config<WhileLoopBackendConfig>());
EXPECT_EQ(10, config.known_trip_count().n());
}
TEST_F(TripCountAnnotatorTest, KnownLargeTripCount) {
const char* kModuleStr = R"(
HloModule test
Body {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
one = s32[] constant(1)
i_plus_one = s32[] add(i, one)
ROOT tuple = (s32[]) tuple(i_plus_one)
}
Cond {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
trip_count = s32[] constant(1000000)
ROOT done = pred[] compare(i, trip_count), direction=LT
}
ENTRY test {
i_start = s32[] constant(0)
initial_tuple = (s32[]) tuple(i_start)
ROOT while = (s32[]) while(initial_tuple), condition=Cond, body=Body
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
WhileLoopTripCountAnnotator pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get()));
ASSERT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(auto config,
m->entry_computation()
->root_instruction()
->backend_config<WhileLoopBackendConfig>());
EXPECT_EQ(1000000, config.known_trip_count().n());
}
TEST_F(TripCountAnnotatorTest, NonzeroStart) {
const char* kModuleStr = R"(
HloModule test
Body {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
one = s32[] constant(1)
i_plus_one = s32[] add(i, one)
ROOT tuple = (s32[]) tuple(i_plus_one)
}
Cond {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
trip_count = s32[] constant(1000000)
ROOT done = pred[] compare(i, trip_count), direction=LT
}
ENTRY test {
i_start = s32[] constant(10)
initial_tuple = (s32[]) tuple(i_start)
ROOT while = (s32[]) while(initial_tuple), condition=Cond, body=Body
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
WhileLoopTripCountAnnotator pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get()));
ASSERT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(auto config,
m->entry_computation()
->root_instruction()
->backend_config<WhileLoopBackendConfig>());
EXPECT_EQ(999990, config.known_trip_count().n());
}
TEST_F(TripCountAnnotatorTest, LessThanOrEqualTo) {
const char* kModuleStr = R"(
HloModule test
Body {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
one = s32[] constant(1)
i_plus_one = s32[] add(i, one)
ROOT tuple = (s32[]) tuple(i_plus_one)
}
Cond {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
trip_count = s32[] constant(1000000)
ROOT done = pred[] compare(i, trip_count), direction=LE
}
ENTRY test {
i_start = s32[] constant(10)
initial_tuple = (s32[]) tuple(i_start)
ROOT while = (s32[]) while(initial_tuple), condition=Cond, body=Body
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
WhileLoopTripCountAnnotator pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get()));
ASSERT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(auto config,
m->entry_computation()
->root_instruction()
->backend_config<WhileLoopBackendConfig>());
EXPECT_EQ(999991, config.known_trip_count().n());
}
TEST_F(TripCountAnnotatorTest, Int64Overflow) {
const char* kModuleStr = R"(
HloModule test
Body {
param = (s64[]) parameter(0)
i = s64[] get-tuple-element(param), index=0
one = s64[] constant(1)
i_plus_one = s64[] add(i, one)
ROOT tuple = (s64[]) tuple(i_plus_one)
}
Cond {
param = (s64[]) parameter(0)
i = s64[] get-tuple-element(param), index=0
trip_count = s64[] constant(9223372036854775807)
ROOT done = pred[] compare(i, trip_count), direction=LE
}
ENTRY test {
i_start = s64[] constant(-9223372036854775808)
initial_tuple = (s64[]) tuple(i_start)
ROOT while = (s64[]) while(initial_tuple), condition=Cond, body=Body
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
WhileLoopTripCountAnnotator pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get()));
EXPECT_FALSE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_trip_count_annotator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_trip_count_annotator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
53602328-c7cf-4c6d-8ee9-b9779b3bed8b | cpp | tensorflow/tensorflow | ar_crs_combiner | third_party/xla/xla/service/ar_crs_combiner.cc | third_party/xla/xla/service/ar_crs_combiner_test.cc | #include "xla/service/ar_crs_combiner.h"
#include <algorithm>
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_replication_analysis.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::StatusOr<bool> ReplaceReplicatedAllReduce(HloModule* module,
int64_t partition_count) {
TF_ASSIGN_OR_RETURN(
auto replication_analysis,
HloReplicationAnalysis::Run(module, true));
bool changed = false;
int64_t next_channel = hlo_query::NextChannelId(*module);
for (auto computation : module->computations()) {
for (auto instruction : computation->instructions()) {
if (auto ar = DynCast<HloAllReduceInstruction>(instruction)) {
const Shape& shape = ar->shape();
if (ar->channel_id()) {
continue;
}
if (ar->replica_groups().size() > 1) {
continue;
}
if (shape.IsTuple() || shape.element_type() != F32) {
continue;
}
if (module->config().replica_count() < 8 * partition_count) {
continue;
}
if (replication_analysis->HloInstructionIsReplicatedAt(ar, {})) {
VLOG(2) << "Replaced replicated all-reduce:" << ar->ToString();
ar->set_channel_id(next_channel++);
auto divisor =
computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<float>(partition_count)));
auto bcast = computation->AddInstruction(
HloInstruction::CreateBroadcast(shape, divisor, {}));
auto div = computation->AddInstruction(HloInstruction::CreateBinary(
ar->shape(), HloOpcode::kDivide, ar, bcast));
TF_RETURN_IF_ERROR(ar->ReplaceAllUsesWith(div));
changed = true;
}
}
}
}
return changed;
}
bool HasCombinableReplicaGroup(HloInstruction* hlo, int64_t num_partitions) {
auto all_reduce = Cast<HloAllReduceInstruction>(hlo);
auto replica_groups = all_reduce->replica_groups();
const int64_t replica_count = hlo->GetModule()->config().replica_count();
CHECK(all_reduce->IsCrossModuleAllReduce());
if (all_reduce->use_global_device_ids()) {
if (replica_groups.size() != replica_count) {
return false;
}
for (const auto& group : replica_groups) {
if (group.replica_ids_size() != num_partitions) {
return false;
}
absl::flat_hash_set<int64_t> partition_ids;
int64_t replica_id = group.replica_ids(0) / num_partitions;
for (int64_t i = 0; i < num_partitions; ++i) {
if (group.replica_ids(i) / num_partitions != replica_id) {
return false;
}
partition_ids.insert(group.replica_ids(i) % num_partitions);
}
if (partition_ids.size() != num_partitions) {
return false;
}
}
return true;
}
return replica_groups.size() == replica_count;
}
}
namespace m = match;
std::optional<ArCrsCombiner::ArCrsPair> ArCrsCombiner::MatchesArCrsPattern(
HloInstruction* instruction) {
auto can_ar_move_past_instruction = [](HloInstruction* instruction) -> bool {
if (instruction->user_count() != 1) {
return false;
}
switch (instruction->opcode()) {
case HloOpcode::kBitcast:
case HloOpcode::kTranspose:
case HloOpcode::kReshape:
return true;
case HloOpcode::kConvert:
return ShapeUtil::ElementIsFloating(instruction->shape()) ==
ShapeUtil::ElementIsFloating(instruction->operand(0)->shape());
case HloOpcode::kAdd:
case HloOpcode::kSubtract:
case HloOpcode::kMultiply:
return ShapeUtil::ElementIsFloating(instruction->shape());
default:
return false;
}
};
auto computation_is_addition = [](HloComputation* c) {
return c->instruction_count() == 3 &&
Match(c->root_instruction(), m::Add(m::Parameter(), m::Parameter()));
};
if (instruction->IsCrossModuleAllReduce() &&
HasCombinableReplicaGroup(instruction, num_spatial_partitions_) &&
computation_is_addition(instruction->called_computations()[0]) &&
instruction->user_count() == 1) {
auto next = instruction->users()[0];
int64_t distance = 1;
while (!next->IsCrossReplicaAllReduce()) {
if (can_ar_move_past_instruction(next)) {
next = next->users()[0];
} else {
return std::nullopt;
}
++distance;
}
if (!Cast<HloAllReduceInstruction>(next)->IsNoop() &&
computation_is_addition(next->called_computations()[0])) {
ArCrsPair pair(instruction, next, distance);
VLOG(2) << "ArCrsPair matching pattern: " << pair.ToString();
return pair;
}
}
return std::nullopt;
}
std::optional<HloInstruction*> ArCrsCombiner::WhileFromBodyParameter(
HloInstruction* instruction) {
CHECK_EQ(HloOpcode::kParameter, instruction->opcode());
HloComputation* computation = instruction->parent();
auto caller_instructions = call_graph_->GetComputationCallers(computation);
if (caller_instructions.size() == 1) {
auto caller_instruction = caller_instructions[0];
if (caller_instruction->opcode() == HloOpcode::kWhile) {
return caller_instruction;
}
}
return std::nullopt;
}
std::optional<HloInstruction*> ArCrsCombiner::ConditionalFromBodyParameter(
HloInstruction* instruction) {
CHECK_EQ(HloOpcode::kParameter, instruction->opcode());
HloComputation* computation = instruction->parent();
auto caller_instructions = call_graph_->GetComputationCallers(computation);
if (caller_instructions.size() == 1) {
auto caller_instruction = caller_instructions[0];
if (caller_instruction->opcode() == HloOpcode::kConditional) {
return caller_instruction;
}
}
return std::nullopt;
}
std::optional<std::vector<HloInstruction*>> ArCrsCombiner::GetAllTuples(
HloInstruction* instruction,
absl::flat_hash_set<HloInstruction*>* visited) {
if (visited->find(instruction) != visited->end()) {
return std::vector<HloInstruction*>();
}
visited->insert(instruction);
switch (instruction->opcode()) {
case HloOpcode::kTuple: {
return std::vector<HloInstruction*>({instruction});
}
case HloOpcode::kDomain: {
return GetAllTuples(instruction->operands()[0], visited);
}
case HloOpcode::kParameter: {
auto maybe_while = WhileFromBodyParameter(instruction);
if (maybe_while) {
auto while_instr = *maybe_while;
auto init_tuples = GetAllTuples(while_instr->while_init(), visited);
auto body_tuples = GetAllTuples(
while_instr->while_body()->root_instruction(), visited);
if (!init_tuples || !body_tuples) {
return std::nullopt;
}
auto result = *init_tuples;
result.insert(result.end(), body_tuples->begin(), body_tuples->end());
return result;
}
auto maybe_conditional = ConditionalFromBodyParameter(instruction);
if (maybe_conditional) {
auto cond_instr = *maybe_conditional;
std::vector<HloInstruction*> tuples;
for (int64_t i = 0; i < cond_instr->branch_computations().size(); ++i) {
if (cond_instr->branch_computation(i)->parameter_instruction(0) ==
instruction) {
auto branch_tuples =
GetAllTuples(cond_instr->mutable_operand(i + 1), visited);
if (!branch_tuples) {
return std::nullopt;
}
tuples.insert(tuples.end(), branch_tuples->begin(),
branch_tuples->end());
}
}
return tuples;
}
return std::nullopt;
}
case HloOpcode::kGetTupleElement: {
std::vector<HloInstruction*> result_tuples;
auto tuples = GetAllTuples(instruction->operands()[0], visited);
if (!tuples) {
return std::nullopt;
}
for (auto tuple : *tuples) {
auto tmp_tuples = GetAllTuples(
tuple->mutable_operand(instruction->tuple_index()), visited);
if (!tmp_tuples) {
return std::nullopt;
}
result_tuples.insert(result_tuples.end(), tmp_tuples->begin(),
tmp_tuples->end());
}
return result_tuples;
}
case HloOpcode::kConditional: {
std::vector<HloInstruction*> result_tuples;
const auto& branch_computations = instruction->branch_computations();
result_tuples.reserve(branch_computations.size());
for (HloComputation* body : branch_computations) {
if (body->root_instruction()->opcode() != HloOpcode::kTuple) {
return std::nullopt;
}
result_tuples.push_back(body->root_instruction());
}
return result_tuples;
}
case HloOpcode::kWhile: {
auto init_tuples = GetAllTuples(instruction->while_init(), visited);
auto body_tuples =
GetAllTuples(instruction->while_body()->root_instruction(), visited);
if (!init_tuples || !body_tuples) {
return std::nullopt;
}
auto result = *init_tuples;
result.insert(result.end(), body_tuples->begin(), body_tuples->end());
return result;
}
default:
return std::nullopt;
}
}
bool ArCrsCombiner::TupleElementsComputeSameValue(
HloInstruction* tuple_shaped_instruction, int64_t i1, int64_t i2,
absl::flat_hash_map<int64_t, int64_t>* visited_pairs) {
absl::flat_hash_set<HloInstruction*> visited;
auto tuples = GetAllTuples(tuple_shaped_instruction, &visited);
if (!tuples) {
return false;
}
for (auto tuple : *tuples) {
CHECK_EQ(tuple->opcode(), HloOpcode::kTuple);
if (!InstructionsComputeSameValue(tuple->mutable_operand(i1),
tuple->mutable_operand(i2),
visited_pairs)) {
return false;
}
}
return true;
}
bool ArCrsCombiner::TestInstructionsComputeSameValue(HloInstruction* i1,
HloInstruction* i2) {
ArCrsCombiner combiner(2,
false);
auto module = i1->GetModule();
CHECK_EQ(module, i2->GetModule());
combiner.call_graph_ = CallGraph::Build(module);
absl::flat_hash_map<int64_t, int64_t> visited_pairs;
return combiner.InstructionsComputeSameValue(i1, i2, &visited_pairs);
}
bool ArCrsCombiner::InstructionsComputeSameValue(
HloInstruction* i1, HloInstruction* i2,
absl::flat_hash_map<int64_t, int64_t>* visited_pairs) {
if (i1 == i2) {
return true;
}
auto uid1 = i1->unique_id();
auto uid2 = i2->unique_id();
auto min_uid = std::min(uid1, uid2);
auto max_uid = std::max(uid1, uid2);
auto it = visited_pairs->find(min_uid);
if (it != visited_pairs->end() && max_uid == it->second) {
return true;
}
auto opcode1 = i1->opcode();
auto operands1 = i1->operands();
if (opcode1 != i2->opcode() || operands1.size() != i2->operands().size()) {
return false;
}
auto eq_computations = [](const HloComputation* a, const HloComputation* b) {
return *a == *b;
};
auto eq_operands = [](const HloInstruction*, const HloInstruction*) {
return true;
};
if (i1->IsCrossModuleAllReduce()) {
return i1->Identical(*i2, eq_operands, eq_computations,
false);
}
visited_pairs->emplace(min_uid, max_uid);
for (int i = 0; i < operands1.size(); ++i) {
auto operand1 = operands1[i];
auto operand2 = i2->operands()[i];
if (!InstructionsComputeSameValue(operand1, operand2, visited_pairs)) {
return false;
}
}
if (opcode1 == HloOpcode::kParameter) {
return false;
}
if (opcode1 == HloOpcode::kGetTupleElement) {
return i1->tuple_index() == i2->tuple_index() ||
TupleElementsComputeSameValue(operands1[0], i1->tuple_index(),
i2->tuple_index(), visited_pairs);
}
auto eq_instructions = [](const HloInstruction* i1,
const HloInstruction* i2) -> bool { return true; };
return i1->Identical(*i2, eq_instructions, eq_computations,
false);
}
void ArCrsCombiner::GroupAllReducesById(HloModule* module) {
absl::flat_hash_set<int64_t> discarded_ar_ids;
for (HloComputation* computation : module->MakeNonfusionComputations()) {
for (HloInstruction* instruction : computation->instructions()) {
auto maybe_pair = MatchesArCrsPattern(instruction);
if (maybe_pair) {
auto pair = *maybe_pair;
int64_t ar_id = *(instruction->channel_id());
if (discarded_ar_ids.find(ar_id) != discarded_ar_ids.end()) {
continue;
}
auto it = crs_reserved_map_.find(pair.crs);
if (it != crs_reserved_map_.end()) {
auto prev_ar_id = it->second;
CHECK(all_reduce_map_.find(ar_id) == all_reduce_map_.end());
CHECK_NE(prev_ar_id, ar_id);
auto prev_pair = all_reduce_map_[prev_ar_id].back();
int64_t prev_distance = prev_pair.distance;
if (prev_distance < pair.distance) {
VLOG(2) << "Replacing ArCrsPair: " << prev_pair.ToString()
<< " with ArCrsPair: " << pair.ToString();
all_reduce_map_.erase(prev_ar_id);
discarded_ar_ids.insert(prev_ar_id);
all_reduce_map_[ar_id].push_back(pair);
crs_reserved_map_[pair.crs] = ar_id;
} else {
discarded_ar_ids.insert(ar_id);
}
} else {
if (all_reduce_map_.find(ar_id) != all_reduce_map_.end()) {
int64_t prev_distance = all_reduce_map_[ar_id].back().distance;
CHECK_EQ(prev_distance, pair.distance)
<< "All ARs with the same AR ID must have the same distance "
"from the corresponding CRSs. Found: "
<< prev_distance << " and " << pair.distance;
}
all_reduce_map_[ar_id].push_back(pair);
crs_reserved_map_[pair.crs] = ar_id;
}
}
}
}
}
absl::Status ArCrsCombiner::KeepProvablyEqualInstructionGroupsMPMD() {
for (auto it = all_reduce_map_.begin(); it != all_reduce_map_.end();) {
auto copy_it = it++;
auto channel_id = copy_it->first;
VLOG(2)
<< "KeepProvablyEqualInstructionGroups. Checking AllReduce channel id: "
<< channel_id << "\n";
auto pairs_vec = copy_it->second;
TF_RET_CHECK(pairs_vec.size() == num_spatial_partitions_);
auto instr_0 = pairs_vec[0].ar;
for (int i = 1; i < pairs_vec.size(); ++i) {
auto instr_i = pairs_vec[i].ar;
auto next_0 = instr_0->users()[0];
auto next_i = instr_i->users()[0];
absl::flat_hash_map<int64_t, int64_t> visited_pairs;
while (true) {
if (!InstructionsComputeSameValue(next_0, next_i, &visited_pairs)) {
all_reduce_map_.erase(copy_it);
VLOG(2) << "KeepProvablyEqualInstructionGroups. Erased AllReduce "
"channel id: "
<< channel_id << "\n";
break;
}
if (next_0->IsCrossReplicaAllReduce()) {
break;
}
next_0 = next_0->users()[0];
next_i = next_i->users()[0];
}
}
}
return absl::OkStatus();
}
absl::Status ArCrsCombiner::KeepProvablyEqualInstructionGroupsSPMD(
HloModule* module) {
TF_ASSIGN_OR_RETURN(
auto replication_analysis,
HloReplicationAnalysis::Run(module, true));
for (auto it = all_reduce_map_.begin(); it != all_reduce_map_.end();) {
auto copy_it = it++;
auto channel_id = copy_it->first;
VLOG(2)
<< "KeepProvablyEqualInstructionGroups. Checking AllReduce channel id: "
<< channel_id << "\n";
auto pairs_vec = copy_it->second;
TF_RET_CHECK(pairs_vec.size() == 1);
auto instr = pairs_vec[0].ar;
auto next = instr->users()[0];
while (true) {
TF_RET_CHECK(next->shape().IsArray());
if (!replication_analysis->HloInstructionIsReplicatedAt(next, {})) {
all_reduce_map_.erase(copy_it);
VLOG(2) << "KeepProvablyEqualInstructionGroups. Erased AllReduce "
"channel id: "
<< channel_id << "\n";
break;
}
if (next->IsCrossReplicaAllReduce()) {
break;
}
next = next->users()[0];
}
}
return absl::OkStatus();
}
absl::StatusOr<bool> ArCrsCombiner::RewriteGraph() {
if (all_reduce_map_.empty()) {
return false;
}
for (const auto& it : all_reduce_map_) {
auto pairs_vec = it.second;
for (auto pair : pairs_vec) {
auto all_reduce = pair.ar;
auto parent_computation = all_reduce->parent();
auto channel_id = all_reduce->channel_id();
auto prev = all_reduce->mutable_operand(0);
auto next = all_reduce->users()[0];
TF_CHECK_OK(all_reduce->ReplaceUseWith(next, prev));
TF_CHECK_OK(parent_computation->RemoveInstruction(all_reduce));
while (!next->IsCrossReplicaAllReduce()) {
switch (next->opcode()) {
case HloOpcode::kBitcast:
case HloOpcode::kTranspose:
case HloOpcode::kReshape:
case HloOpcode::kConvert:
case HloOpcode::kMultiply:
break;
case HloOpcode::kAdd:
case HloOpcode::kSubtract: {
auto other_operand = (next->operands()[0] == prev)
? next->operands()[1]
: next->operands()[0];
if (other_operand->IsCrossModuleAllReduce() &&
other_operand->user_count() == 1) {
TF_CHECK_OK(other_operand->ReplaceAllUsesWith(
other_operand->mutable_operand(0)));
} else {
auto shape = other_operand->shape();
Literal lit(shape);
lit.PopulateWithValue<float>(num_spatial_partitions_);
auto divisor = parent_computation->AddInstruction(
HloInstruction::CreateConstant(lit.Clone()));
auto division = parent_computation->AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kDivide,
other_operand, divisor));
TF_CHECK_OK(other_operand->ReplaceUseWith(next, division));
}
break;
}
default:
LOG(FATAL) << "Unexpected instruction: " << next->ToShortString();
}
prev = next;
next = next->users()[0];
}
next->set_channel_id(channel_id);
}
}
return true;
}
absl::StatusOr<bool> ArCrsCombiner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
call_graph_ = CallGraph::Build(module);
GroupAllReducesById(module);
if (spmd_partition_) {
TF_RETURN_IF_ERROR(KeepProvablyEqualInstructionGroupsSPMD(module));
} else {
TF_RETURN_IF_ERROR(KeepProvablyEqualInstructionGroupsMPMD());
}
TF_ASSIGN_OR_RETURN(auto changed, RewriteGraph());
if (module->config().replica_count() > 1 && spmd_partition_) {
TF_ASSIGN_OR_RETURN(auto replaced, ReplaceReplicatedAllReduce(
module, num_spatial_partitions_));
changed |= replaced;
}
return changed;
}
} | #include "xla/service/ar_crs_combiner.h"
#include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class ArCrsCombinerTest : public HloTestBase {};
TEST_F(ArCrsCombinerTest, SameValueTestBasecase) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) {
%p = f32[2,2] parameter(0)
%constant.f32.1 = f32[2,2] constant({{1, 2}, {3, 4}})
%constant.f32.2 = f32[2,2] constant({{1, 2}, {3, 4}})
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32.1, %constant.f32.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(
i1, module->entry_computation()->parameter_instruction(0)));
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestBasecase2) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (x: f32[]) -> (f32[], f32[]) {
%x = f32[] parameter(0)
ROOT %tuple = (f32[], f32[]) tuple(%x, %x)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestBasecase3) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (x: f32[], y: f32[]) -> (f32[], f32[]) {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %tuple = (f32[], f32[]) tuple(%x, %y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestNumOperands) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2,2]) -> ((f32[2,2]), (f32[2,2], f32[2,2])) {
%p = f32[2,2] parameter(0)
%constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})
%tuple1 = (f32[2,2]) tuple(%constant.f32)
%tuple2 = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)
ROOT %tuple = ((f32[2,2]), (f32[2,2], f32[2,2])) tuple(%tuple1, %tuple2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestSliceIndicesMatch) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2]) -> (f32[1], f32[1]) {
%p = f32[2] parameter(0)
%slice.1 = f32[1] slice(f32[2] %p), slice={[0:1]}
%slice.2 = f32[1] slice(f32[2] %p), slice={[0:1]}
ROOT %tuple = (f32[1], f32[1]) tuple(%slice.1, %slice.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestSliceIndicesDontMatch) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2]) -> (f32[1], f32[1]) {
%p = f32[2] parameter(0)
%slice.1 = f32[1] slice(f32[2] %p), slice={[0:1]}
%slice.2 = f32[1] slice(f32[2] %p), slice={[1:2]}
ROOT %tuple = (f32[1], f32[1]) tuple(%slice.1, %slice.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestTupleElementSameIndex) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) {
%p = f32[2,2] parameter(0)
%constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})
%tuple.1 = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)
%get-tuple-element.1 = f32[2,2] get-tuple-element(%tuple.1), index=0
%get-tuple-element.2 = f32[2,2] get-tuple-element(%tuple.1), index=0
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%get-tuple-element.1, %get-tuple-element.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestTupleElementDifferentIndex1) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) {
%p = f32[2,2] parameter(0)
%constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})
%tuple.1 = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)
%get-tuple-element.1 = f32[2,2] get-tuple-element(%tuple.1), index=0
%get-tuple-element.2 = f32[2,2] get-tuple-element(%tuple.1), index=1
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%get-tuple-element.1, %get-tuple-element.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestTupleElementDifferentIndex2) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) {
%p = f32[2,2] parameter(0)
%constant.f32.1 = f32[2,2] constant({{1, 2}, {3, 4}})
%constant.f32.2 = f32[2,2] constant({{2, 3}, {4, 5}})
%tuple.1 = (f32[2,2], f32[2,2]) tuple(%constant.f32.1, %constant.f32.2)
%get-tuple-element.1 = f32[2,2] get-tuple-element(%tuple.1), index=0
%get-tuple-element.2 = f32[2,2] get-tuple-element(%tuple.1), index=1
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%get-tuple-element.1, %get-tuple-element.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestWhile1) {
const char* module_str = R"(
HloModule foobar
%condition (x: (f32[2,2], f32[2,2])) -> pred[] {
%x = (f32[2,2], f32[2,2]) parameter(0)
%constant.0 = s32[] constant(0)
%constant.1 = s32[] constant(1)
ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %constant.0), direction=GT
}
%body (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {
%x = (f32[2,2], f32[2,2]) parameter(0)
%constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})
%get-tuple-element.1 = f32[2,2] get-tuple-element(%x), index=0
%get-tuple-element.2 = f32[2,2] get-tuple-element(%x), index=1
%add.1 = f32[2,2] add(%get-tuple-element.1, %constant.f32)
%add.2 = f32[2,2] add(%get-tuple-element.2, %constant.f32)
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2)
}
ENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) {
%constant.f32 = f32[2,2] constant({{3, 4}, {5, 6}})
%init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)
ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_while = module->entry_computation()->root_instruction();
auto body_tuple = root_while->while_body()->root_instruction();
auto i1 = body_tuple->operands()[0];
auto i2 = body_tuple->operands()[1];
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestWhile2) {
const char* module_str = R"(
HloModule foobar
%condition (x: (f32[2,2], f32[2,2])) -> pred[] {
%x = (f32[2,2], f32[2,2]) parameter(0)
%constant.0 = s32[] constant(0)
%constant.1 = s32[] constant(1)
ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %constant.0), direction=GT
}
%body (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {
%x = (f32[2,2], f32[2,2]) parameter(0)
%constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})
%get-tuple-element.1 = f32[2,2] get-tuple-element(%x), index=0
%get-tuple-element.2 = f32[2,2] get-tuple-element(%x), index=1
%add.1 = f32[2,2] add(%get-tuple-element.1, %constant.f32)
%add.2 = f32[2,2] add(%get-tuple-element.2, %constant.f32)
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2)
}
ENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) {
%constant.f32.1 = f32[2,2] constant({{3, 4}, {5, 6}})
%constant.f32.2 = f32[2,2] constant({{3, 4}, {7, 8}})
%init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32.1, %constant.f32.2)
ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_while = module->entry_computation()->root_instruction();
auto body_tuple = root_while->while_body()->root_instruction();
auto i1 = body_tuple->operands()[0];
auto i2 = body_tuple->operands()[1];
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestWhile3) {
const char* module_str = R"(
HloModule foobar
%condition (x: (f32[2,2], f32[2,2])) -> pred[] {
%x = (f32[2,2], f32[2,2]) parameter(0)
%constant.0 = s32[] constant(0)
%constant.1 = s32[] constant(1)
ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %constant.0), direction=GT
}
%body (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {
%x = (f32[2,2], f32[2,2]) parameter(0)
%constant.f32.1 = f32[2,2] constant({{1, 2}, {3, 4}})
%constant.f32.2 = f32[2,2] constant({{3, 4}, {1, 2}})
%get-tuple-element.1 = f32[2,2] get-tuple-element(%x), index=0
%get-tuple-element.2 = f32[2,2] get-tuple-element(%x), index=1
%add.1 = f32[2,2] add(%get-tuple-element.1, %constant.f32.1)
%add.2 = f32[2,2] add(%get-tuple-element.2, %constant.f32.2)
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2)
}
ENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) {
%constant.f32 = f32[2,2] constant({{3, 4}, {5, 6}})
%init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)
ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_while = module->entry_computation()->root_instruction();
auto body_tuple = root_while->while_body()->root_instruction();
auto i1 = body_tuple->operands()[0]->operands()[0];
auto i2 = body_tuple->operands()[1]->operands()[0];
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestNestedWhile) {
const char* module_str = R"(
HloModule foobar
%condition (x: (f32[2,2], f32[2,2])) -> pred[] {
%x = (f32[2,2], f32[2,2]) parameter(0)
ROOT %t = pred[] constant(true)
}
%body_inner (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {
%x = (f32[2,2], f32[2,2]) parameter(0)
%constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})
%gte.1 = f32[2,2] get-tuple-element(%x), index=0
%gte.2 = f32[2,2] get-tuple-element(%x), index=1
%add.1 = f32[2,2] add(%gte.1, %constant.f32)
%add.2 = f32[2,2] add(%gte.2, %constant.f32)
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2)
}
%body_outer (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {
%x = (f32[2,2], f32[2,2]) parameter(0)
%gte.1 = f32[2,2] get-tuple-element(%x), index=0
%gte.2 = f32[2,2] get-tuple-element(%x), index=1
%init = (f32[2,2], f32[2,2]) tuple(%gte.1, %gte.2)
ROOT %while.1 = (f32[2,2], f32[2,2]) while(%init), condition=%condition,
body=%body_inner
}
ENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) {
%constant.f32 = f32[2,2] constant({{3, 4}, {5, 6}})
%init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)
ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition,
body=%body_outer
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_while = module->entry_computation()->root_instruction();
auto inner_while = root_while->while_body()->root_instruction();
auto i1 = inner_while->while_body()->root_instruction()->operands()[0];
auto i2 = inner_while->while_body()->root_instruction()->operands()[1];
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
void CompareReplicaGroups(absl::Span<const ReplicaGroup> groups_before,
absl::Span<const ReplicaGroup> groups_after) {
ASSERT_EQ(groups_before.size(), groups_after.size());
for (int i = 0; i < groups_before.size(); ++i) {
auto group_before = groups_before[i];
std::vector<int64_t> ids_before(group_before.replica_ids().begin(),
group_before.replica_ids().end());
auto group_after = groups_after[i];
std::vector<int64_t> ids_after(group_after.replica_ids().begin(),
group_after.replica_ids().end());
EXPECT_EQ(ids_before, ids_after);
}
}
TEST_F(ArCrsCombinerTest, RewriteArConvertCrs) {
const char* module_str = R"(
HloModule foobar
%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {
%a = bf16[] parameter(0)
%b = bf16[] parameter(1)
ROOT %add = bf16[] add(%a, %b)
}
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: bf16[]) -> (f32[], f32[]) {
%p = bf16[] parameter(0)
%constant.bf16 = bf16[] constant(1)
%all-reduce.ar.1 = bf16[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.bf16,
sharding={maximal device=0}
%convert.1 = f32[]
convert(%all-reduce.ar.1),
sharding={maximal device=0}
%all-reduce.1 = f32[]
all-reduce(%convert.1),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=0}
%all-reduce.ar.2 = bf16[]
all-reduce(%constant.bf16),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.bf16,
sharding={maximal device=1}
%convert.2 = f32[]
convert(%all-reduce.ar.2),
sharding={maximal device=1}
%all-reduce.2 = f32[]
all-reduce(%convert.2),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=1}
ROOT %tuple = (f32[], f32[])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Convert(op::Parameter())),
op::AllReduce(op::Convert(op::Constant()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteArConvertCrsSPMD) {
const char* module_str = R"(
HloModule foobar
%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {
%a = bf16[] parameter(0)
%b = bf16[] parameter(1)
ROOT %add = bf16[] add(%a, %b)
}
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: bf16[]) -> (f32[]) {
%p = bf16[] parameter(0)
%all-reduce.ar.1 = bf16[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.bf16
%convert.1 = f32[] convert(%all-reduce.ar.1)
%all-reduce.1 = f32[]
all-reduce(%convert.1),
replica_groups={{0,1}},
to_apply=%sum.f32
ROOT %tuple = (f32[]) tuple(%all-reduce.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2, true);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Convert(op::Parameter()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteArBitcastCrs) {
const char* module_str = R"(
HloModule foobar
%sum.1 (a: f32[2,1], b: f32[2,1]) -> f32[2,1] {
%a = f32[2,1] parameter(0)
%b = f32[2,1] parameter(1)
ROOT %add = f32[2,1] add(%a, %b)
}
%sum.2 (x: f32[2], y: f32[2]) -> f32[2] {
%x = f32[2] parameter(0)
%y = f32[2] parameter(1)
ROOT %add = f32[2] add(%x, %y)
}
ENTRY %entrycomp (p: f32[2,1]) -> (f32[2], f32[2]) {
%p = f32[2,1] parameter(0)
%all-reduce.ar.1 = f32[2,1]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.1,
sharding={maximal device=0}
%bitcast.1 = f32[2]{0} bitcast(f32[2,1]{1,0} %all-reduce.ar.1)
%all-reduce.1 = f32[2]
all-reduce(%bitcast.1),
replica_groups={{0,1}},
to_apply=%sum.2,
sharding={maximal device=0}
%all-reduce.ar.2 = f32[2,1]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.1,
sharding={maximal device=1}
%bitcast.2 = f32[2]{0} bitcast(f32[2,1]{1,0} %all-reduce.ar.2)
%all-reduce.2 = f32[2]
all-reduce(%bitcast.2),
replica_groups={{0,1}},
to_apply=%sum.2,
sharding={maximal device=1}
ROOT %tuple = (f32[2], f32[2])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Bitcast(op::Parameter())),
op::AllReduce(op::Bitcast(op::Parameter()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteArMultiplyCrs) {
const char* module_str = R"(
HloModule foobar
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {
%p = f32[] parameter(0)
%constant.f32 = f32[] constant(123)
%all-reduce.ar.1 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.f32,
sharding={maximal device=0}
%multiply.1 = f32[]
multiply(%all-reduce.ar.1, %constant.f32),
sharding={maximal device=0}
%all-reduce.1 = f32[]
all-reduce(%multiply.1),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=0}
%all-reduce.ar.2 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.f32,
sharding={maximal device=1}
%multiply.2 = f32[]
multiply(%all-reduce.ar.2, %constant.f32),
sharding={maximal device=1}
%all-reduce.2 = f32[]
all-reduce(%multiply.2),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=1}
ROOT %tuple = (f32[], f32[])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Multiply(op::Parameter(), op::Constant())),
op::AllReduce(op::Multiply(op::Parameter(), op::Constant()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteArMultiplyCrsSPMD) {
const char* module_str = R"(
HloModule foobar
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[]) {
%p = f32[] parameter(0)
%constant.f32 = f32[] constant(123)
%all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}},
channel_id=1, to_apply=%sum.f32
%multiply.1 = f32[] multiply(%all-reduce.ar.1, %constant.f32)
%all-reduce.1 = f32[] all-reduce(%multiply.1), replica_groups={{0,1}},
to_apply=%sum.f32, sharding={maximal device=0}
ROOT %tuple = (f32[]) tuple(%all-reduce.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Multiply(op::Parameter(), op::Constant()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteArConvertAddCrs) {
const char* module_str = R"(
HloModule foobar
%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {
%a = bf16[] parameter(0)
%b = bf16[] parameter(1)
ROOT %add = bf16[] add(%a, %b)
}
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {
%p = f32[] parameter(0)
%constant.bf16 = bf16[] constant(1)
%constant.f32 = f32[] constant(2)
%all-reduce.ar.1 = bf16[]
all-reduce(%constant.bf16),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.bf16,
sharding={maximal device=0}
%convert.1 = f32[]
convert(%all-reduce.ar.1),
sharding={maximal device=0}
%add.1 = f32[]
add(%constant.f32, %convert.1),
sharding={maximal device=0}
%all-reduce.1 = f32[]
all-reduce(%add.1),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=0}
%all-reduce.ar.2 = bf16[]
all-reduce(%constant.bf16),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.bf16,
sharding={maximal device=1}
%convert.2 = f32[]
convert(%all-reduce.ar.2),
sharding={maximal device=1}
%add.2 = f32[]
add(%constant.f32, %convert.2),
sharding={maximal device=1}
%all-reduce.2 = f32[]
all-reduce(%add.2),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=1}
ROOT %tuple = (f32[], f32[])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(
op::AllReduce(op::Add(op::Divide(op::Constant(), op::Constant()),
op::Convert())),
op::AllReduce(op::Add(op::Divide(op::Constant(), op::Constant()),
op::Convert()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteArConvertAddCrsSPMD) {
const char* module_str = R"(
HloModule foobar
%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {
%a = bf16[] parameter(0)
%b = bf16[] parameter(1)
ROOT %add = bf16[] add(%a, %b)
}
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[]) {
%p = f32[] parameter(0)
%constant.bf16 = bf16[] constant(1)
%constant.f32 = f32[] constant(2)
%all-reduce.ar.1 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}},
channel_id=1, to_apply=%sum.bf16
%convert.1 = f32[] convert(%all-reduce.ar.1), sharding={maximal device=0}
%add.1 = f32[] add(%constant.f32, %convert.1)
%all-reduce.1 = f32[] all-reduce(%add.1), replica_groups={{0,1}},
to_apply=%sum.f32
ROOT %tuple = (f32[]) tuple(%all-reduce.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Add(
op::Divide(op::Constant(), op::Constant()), op::Convert()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, OtherSummandNotTheSameDontRewrite) {
const char* module_str = R"(
HloModule foobar
%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {
%a = bf16[] parameter(0)
%b = bf16[] parameter(1)
ROOT %add = bf16[] add(%a, %b)
}
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {
%p = f32[] parameter(0)
%constant.bf16 = bf16[] constant(1)
%constant.f32.1 = f32[] constant(2)
%constant.f32.2 = f32[] constant(3)
%all-reduce.ar.1 = bf16[]
all-reduce(%constant.bf16),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.bf16,
sharding={maximal device=0}
%convert.1 = f32[]
convert(%all-reduce.ar.1),
sharding={maximal device=0}
%add.1 = f32[]
add(%constant.f32.1, %convert.1),
sharding={maximal device=0}
%all-reduce.1 = f32[]
all-reduce(%add.1),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=0}
%all-reduce.ar.2 = bf16[]
all-reduce(%constant.bf16),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.bf16,
sharding={maximal device=1}
%convert.2 = f32[]
convert(%all-reduce.ar.2),
sharding={maximal device=1}
%add.2 = f32[]
add(%constant.f32.2, %convert.2),
sharding={maximal device=1}
%all-reduce.2 = f32[]
all-reduce(%add.2),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=1}
ROOT %tuple = (f32[], f32[])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_FALSE(changed);
}
TEST_F(ArCrsCombinerTest, OtherSummandNotTheSameDontRewriteSPMD) {
const char* module_str = R"(
HloModule foobar
%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {
%a = bf16[] parameter(0)
%b = bf16[] parameter(1)
ROOT %add = bf16[] add(%a, %b)
}
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[]) {
%p = f32[] parameter(0)
%constant.bf16 = bf16[] constant(1)
%constant.f32.1 = f32[] constant(2)
%all-reduce.ar.1 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}},
channel_id=1, to_apply=%sum.bf16
%convert.1 = f32[] convert(%all-reduce.ar.1)
%add.1 = f32[] add(%p, %convert.1)
%all-reduce.1 = f32[] all-reduce(%add.1), replica_groups={{0,1}}, to_apply=%sum.f32
ROOT %tuple = (f32[]) tuple(%all-reduce.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
ArCrsCombiner combiner(2,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_FALSE(changed);
}
TEST_F(ArCrsCombinerTest, ArThenCrsDontCrash) {
const char* module_str = R"(
HloModule foobar
%sum.1 (a: f32[], b: f32[]) -> f32[] {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {
%p = f32[] parameter(0)
%constant.f32 = f32[] constant(123)
%all-reduce.ar.1 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.1,
sharding={maximal device=0}
%all-reduce.1 = f32[]
all-reduce(%all-reduce.ar.1),
replica_groups={{0,1}},
to_apply=%sum.1,
sharding={maximal device=0}
%multiply.1 = f32[]
multiply(%all-reduce.1, %constant.f32),
sharding={maximal device=0}
%all-reduce.ar.2 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.1,
sharding={maximal device=1}
%all-reduce.2 = f32[]
all-reduce(%all-reduce.ar.2),
replica_groups={{0,1}},
to_apply=%sum.1,
sharding={maximal device=1}
%multiply.2 = f32[]
multiply(%all-reduce.2, %constant.f32),
sharding={maximal device=1}
ROOT %tuple = (f32[], f32[])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Parameter()),
op::AllReduce(op::Parameter())));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteMultipleAdds) {
const char* module_str = R"(
HloModule foobar
%sum (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {
%p = f32[] parameter(0)
%constant.1 = f32[] constant(1)
%constant.2 = f32[] constant(2)
%all-reduce.ar.1 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum,
sharding={maximal device=0}
%add.11 = f32[]
add(%constant.1, %all-reduce.ar.1),
sharding={maximal device=0}
%add.12 = f32[]
add(%constant.2, %add.11),
sharding={maximal device=0}
%all-reduce.1 = f32[]
all-reduce(%add.12),
replica_groups={{0,1}},
to_apply=%sum,
sharding={maximal device=0}
%all-reduce.ar.2 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum,
sharding={maximal device=0}
%add.21 = f32[]
add(%constant.1, %all-reduce.ar.2),
sharding={maximal device=0}
%add.22 = f32[]
add(%constant.2, %add.21),
sharding={maximal device=0}
%all-reduce.2 = f32[]
all-reduce(%add.22),
replica_groups={{0,1}},
to_apply=%sum,
sharding={maximal device=0}
ROOT %tuple = (f32[], f32[])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Add(
op::Divide(op::Constant(), op::Constant()),
op::Add(op::Divide(op::Constant(), op::Constant()),
op::Parameter()))),
op::AllReduce(op::Add(
op::Divide(op::Constant(), op::Constant()),
op::Add(op::Divide(op::Constant(), op::Constant()),
op::Parameter())))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteMultipleAddsSPMD) {
const char* module_str = R"(
HloModule foobar
%sum (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[]) {
%p = f32[] parameter(0)
%constant.1 = f32[] constant(1)
%constant.2 = f32[] constant(2)
%all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}},
channel_id=1, to_apply=%sum
%add.11 = f32[] add(%constant.1, %all-reduce.ar.1)
%add.12 = f32[] add(%constant.2, %add.11)
%all-reduce.1 = f32[] all-reduce(%add.12), replica_groups={{0,1}}, to_apply=%sum
ROOT %tuple = (f32[]) tuple(%all-reduce.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(
op::Add(op::Divide(op::Constant(), op::Constant()),
op::Add(op::Divide(op::Constant(), op::Constant()),
op::Parameter())))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteArSubtractCrs) {
const char* module_str = R"(
HloModule foobar
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {
%p = f32[] parameter(0)
%constant.f32 = f32[] constant(123)
%all-reduce.ar.1 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.f32,
sharding={maximal device=0}
%sub.1 = f32[]
subtract(%constant.f32, %all-reduce.ar.1),
sharding={maximal device=0}
%all-reduce.1 = f32[]
all-reduce(%sub.1),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=0}
%all-reduce.ar.2 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.f32,
sharding={maximal device=1}
%sub.2 = f32[]
subtract(%constant.f32, %all-reduce.ar.2),
sharding={maximal device=1}
%all-reduce.2 = f32[]
all-reduce(%sub.2),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=1}
ROOT %tuple = (f32[], f32[])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(
op::AllReduce(op::Subtract(op::Divide(op::Constant(), op::Constant()),
op::Parameter())),
op::AllReduce(op::Subtract(op::Divide(op::Constant(), op::Constant()),
op::Parameter()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteArSubtractCrsSPMD) {
const char* module_str = R"(
HloModule foobar
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[]) {
%p = f32[] parameter(0)
%constant.f32 = f32[] constant(123)
%all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}},
channel_id=1, to_apply=%sum.f32
%sub.1 = f32[] subtract(%constant.f32, %all-reduce.ar.1)
%all-reduce.1 = f32[] all-reduce(%sub.1), replica_groups={{0,1}},
to_apply=%sum.f32
ROOT %tuple = (f32[]) tuple(%all-reduce.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Subtract(
op::Divide(op::Constant(), op::Constant()), op::Parameter()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteMultipleARsLeft) {
const char* module_str = R"(
HloModule foobar
%sum (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {
%p = f32[] parameter(0)
%const1 = f32[] constant(1)
%const2 = f32[] constant(2)
%ar11 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum,
sharding={maximal device=0}
%add11 = f32[]
add(%ar11, %const1),
sharding={maximal device=0}
%ar12 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=2,
to_apply=%sum,
sharding={maximal device=0}
%add12 = f32[]
add(%add11, %ar12),
sharding={maximal device=0}
%crs1 = f32[]
all-reduce(%add12),
replica_groups={{0,1}},
to_apply=%sum,
sharding={maximal device=0}
%ar21 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum,
sharding={maximal device=1}
%add21 = f32[]
add(%ar21, %const1),
sharding={maximal device=1}
%ar22 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=2,
to_apply=%sum,
sharding={maximal device=1}
%add22 = f32[]
add(%add21, %ar22),
sharding={maximal device=1}
%crs2 = f32[]
all-reduce(%add22),
replica_groups={{0,1}},
to_apply=%sum,
sharding={maximal device=1}
ROOT %tuple = (f32[], f32[])
tuple(%crs1, %crs2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Add(
op::Add(op::Parameter(),
op::Divide(op::Constant(), op::Constant())),
op::Parameter())),
op::AllReduce(op::Add(
op::Add(op::Parameter(),
op::Divide(op::Constant(), op::Constant())),
op::Parameter()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteMultipleARsLeftSPMD) {
const char* module_str = R"(
HloModule foobar
%sum (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[]) {
%p = f32[] parameter(0)
%const1 = f32[] constant(1)
%const2 = f32[] constant(2)
%ar11 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1,
to_apply=%sum
%add11 = f32[] add(%ar11, %const1)
%ar12 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=2,
to_apply=%sum
%add12 = f32[] add(%add11, %ar12)
%crs1 = f32[] all-reduce(%add12), replica_groups={{0,1}},
to_apply=%sum
ROOT %tuple = (f32[]) tuple(%crs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Add(
op::Add(op::Parameter(), op::Divide(op::Constant(), op::Constant())),
op::Parameter()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteMultipleARsRight) {
const char* module_str = R"(
HloModule foobar
%sum (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {
%p = f32[] parameter(0)
%const1 = f32[] constant(1)
%const2 = f32[] constant(2)
%ar11 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum,
sharding={maximal device=0}
%ar12 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=2,
to_apply=%sum,
sharding={maximal device=0}
%add11 = f32[]
add(%ar12, %const1),
sharding={maximal device=0}
%add12 = f32[]
add(%ar11, %add11),
sharding={maximal device=0}
%crs1 = f32[]
all-reduce(%add12),
replica_groups={{0,1}},
to_apply=%sum,
sharding={maximal device=0}
%ar21 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum,
sharding={maximal device=1}
%ar22 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=2,
to_apply=%sum,
sharding={maximal device=1}
%add21 = f32[]
add(%ar22, %const1),
sharding={maximal device=1}
%add22 = f32[]
add(%ar21, %add21),
sharding={maximal device=1}
%crs2 = f32[]
all-reduce(%add22),
replica_groups={{0,1}},
to_apply=%sum,
sharding={maximal device=1}
ROOT %tuple = (f32[], f32[])
tuple(%crs1, %crs2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Add(
op::Parameter(),
op::Add(op::Parameter(),
op::Divide(op::Constant(), op::Constant())))),
op::AllReduce(op::Add(
op::Parameter(),
op::Add(op::Parameter(),
op::Divide(op::Constant(), op::Constant()))))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteMultipleARsRightSPMD) {
const char* module_str = R"(
HloModule foobar
%sum (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[]) {
%p = f32[] parameter(0)
%const1 = f32[] constant(1)
%const2 = f32[] constant(2)
%ar11 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum
%ar12 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=2, to_apply=%sum
%add11 = f32[] add(%ar12, %const1)
%add12 = f32[] add(%ar11, %add11)
%crs1 = f32[] all-reduce(%add12), replica_groups={{0,1}}, to_apply=%sum
ROOT %tuple = (f32[]) tuple(%crs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Add(
op::Parameter(),
op::Add(op::Parameter(),
op::Divide(op::Constant(), op::Constant()))))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, OneReplicaDontRewrite) {
const char* module_str = R"(
HloModule foobar
%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {
%a = bf16[] parameter(0)
%b = bf16[] parameter(1)
ROOT %add = bf16[] add(%a, %b)
}
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: bf16[]) -> (f32[], f32[]) {
%p = bf16[] parameter(0)
%constant.bf16 = bf16[] constant(1)
%all-reduce.ar.1 = bf16[]
all-reduce(%p),
replica_groups={{0}},
channel_id=1,
to_apply=%sum.bf16,
sharding={maximal device=0}
%convert.1 = f32[]
convert(%all-reduce.ar.1),
sharding={maximal device=0}
%all-reduce.1 = f32[]
all-reduce(%convert.1),
replica_groups={{0}},
to_apply=%sum.f32,
sharding={maximal device=0}
%all-reduce.ar.2 = bf16[]
all-reduce(%constant.bf16),
replica_groups={{0}},
channel_id=1,
to_apply=%sum.bf16,
sharding={maximal device=1}
%convert.2 = f32[]
convert(%all-reduce.ar.2),
sharding={maximal device=1}
%all-reduce.2 = f32[]
all-reduce(%convert.2),
replica_groups={{0}},
to_apply=%sum.f32,
sharding={maximal device=1}
ROOT %tuple = (f32[], f32[])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 1));
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_FALSE(changed);
}
TEST_F(ArCrsCombinerTest, OneReplicaDontRewriteSPMD) {
const char* module_str = R"(
HloModule foobar
%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {
%a = bf16[] parameter(0)
%b = bf16[] parameter(1)
ROOT %add = bf16[] add(%a, %b)
}
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: bf16[]) -> (f32[]) {
%p = bf16[] parameter(0)
%constant.bf16 = bf16[] constant(1)
%all-reduce.ar.1 = bf16[] all-reduce(%p), replica_groups={{0}},
channel_id=1, to_apply=%sum.bf16
%convert.1 = f32[] convert(%all-reduce.ar.1)
%all-reduce.1 = f32[] all-reduce(%convert.1),
replica_groups={{0}}, to_apply=%sum.f32
ROOT %tuple = (f32[]) tuple(%all-reduce.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 1));
ArCrsCombiner combiner(2,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_FALSE(changed);
}
TEST_F(ArCrsCombinerTest, SameValueTestConditional) {
const char* module_str = R"(
HloModule foobar
branch_true {
pt = (f32[2,4], f32[2,4]) parameter(0)
gte.0 = f32[2,4] get-tuple-element(pt), index=0
gte.1 = f32[2,4] get-tuple-element(pt), index=1
ROOT tuple.t = (f32[2,4], f32[2,4]) tuple(gte.1, gte.0)
}
branch_false {
pf = (f32[2,4], f32[2,4]) parameter(0)
gte.0 = f32[2,4] get-tuple-element(pf), index=0
gte.1 = f32[2,4] get-tuple-element(pf), index=1
add = f32[2,4] add(gte.1, gte.1)
ROOT tuple.f = (f32[2,4], f32[2,4]) tuple(gte.0, add)
}
ENTRY Parameters1.v4 {
constant = pred[] constant(true)
p = f32[2,4] parameter(0)
tuple = (f32[2,4], f32[2,4]) tuple(p, p)
ROOT conditional = (f32[2,4], f32[2,4]) conditional(constant, tuple, tuple), true_computation=branch_true, false_computation=branch_false
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto cond = module->entry_computation()->root_instruction();
auto branch_true = cond->branch_computation(0)->root_instruction();
auto t0 = branch_true->mutable_operand(0);
auto t1 = branch_true->mutable_operand(1);
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(t0, t1));
auto branch_false = cond->branch_computation(1)->root_instruction();
auto f0 = branch_false->mutable_operand(0);
auto f1 = branch_false->mutable_operand(1);
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(f0, f1));
}
TEST_F(ArCrsCombinerTest, AllReduceWithReplicas) {
const char* module_str = R"(
HloModule foobar
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: bf16[]) -> (f32[], f32[]) {
%p = bf16[] parameter(0)
%all-reduce.0 = f32[] all-reduce(%p), channel_id=1, replica_groups={{0,1}},
to_apply=%sum.f32, sharding={maximal device=0}
%all-reduce.1 = f32[] all-reduce(%p), channel_id=1, replica_groups={{0,1}},
to_apply=%sum.f32, sharding={maximal device=1}
%all-reduce.2 = f32[] all-reduce(%all-reduce.0), replica_groups={{0,1}},
to_apply=%sum.f32, sharding={maximal device=0}
%all-reduce.3 = f32[] all-reduce(%all-reduce.1), replica_groups={{0,1}},
to_apply=%sum.f32, sharding={maximal device=1}
ROOT %tuple = (f32[], f32[]) tuple(%all-reduce.2, %all-reduce.3),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_FALSE(changed);
}
TEST_F(ArCrsCombinerTest, AllReduceWithReplicasSPMD) {
const char* module_str = R"(
HloModule foobar
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: bf16[]) -> (f32[]) {
%p = bf16[] parameter(0)
%all-reduce.0 = f32[] all-reduce(%p), channel_id=1, replica_groups={{0},{1}},
to_apply=%sum.f32
%all-reduce.2 = f32[] all-reduce(%all-reduce.0), replica_groups={{0},{1}},
to_apply=%sum.f32
ROOT %tuple = (f32[]) tuple(%all-reduce.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
ArCrsCombiner combiner(2,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_FALSE(changed);
}
TEST_F(ArCrsCombinerTest, ReplaceReplicatedAllReduceSPMD) {
const char* module_str = R"(
HloModule foobar
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[2,4]) -> f32[2,4] {
%p = f32[2,4] parameter(0), sharding={replicated}
ROOT %all-reduce = f32[2,4] all-reduce(%p), to_apply=%sum.f32,
replica_groups={{0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 32));
ArCrsCombiner combiner(2,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Divide(op::AllReduce(op::Parameter()),
op::Broadcast(op::Constant())));
auto ar = root->operand(0);
auto divisor = root->operand(1)->operand(0);
EXPECT_TRUE(ar->channel_id());
EXPECT_TRUE(divisor->literal().IsAllFloat(2));
}
TEST_F(ArCrsCombinerTest, AllReduceWithGlobalIdReplicaGroups) {
const char* module_str = R"(
HloModule foobar
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: bf16[]) -> (f32[]) {
%p = bf16[] parameter(0)
%all-reduce.0 = f32[] all-reduce(%p), channel_id=1,
replica_groups={{0,1,2,3},{4,5,6,7}}, use_global_device_ids=true,
to_apply=%sum.f32
%all-reduce.2 = f32[] all-reduce(%all-reduce.0), replica_groups={{0,1}},
to_apply=%sum.f32
ROOT %tuple = (f32[]) tuple(%all-reduce.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2,
4));
ArCrsCombiner combiner(4,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/ar_crs_combiner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/ar_crs_combiner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f1c72f99-0fb9-453a-a24b-21b943ceb2cf | cpp | tensorflow/tensorflow | dynamic_dimension_simplifier | third_party/xla/xla/service/dynamic_dimension_simplifier.cc | third_party/xla/xla/service/dynamic_dimension_simplifier_test.cc | #include "xla/service/dynamic_dimension_simplifier.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/status_macros.h"
namespace xla {
namespace {
absl::StatusOr<bool> ConcatForwarding(HloInstruction* concat) {
if (concat->opcode() != HloOpcode::kConcatenate) {
return false;
}
bool changed = false;
auto parent = concat->parent();
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : concat->operands()) {
if (operand->opcode() != HloOpcode::kConcatenate ||
operand->concatenate_dimension() != concat->concatenate_dimension()) {
new_operands.push_back(operand);
} else {
changed = true;
for (HloInstruction* operand_operand : operand->operands()) {
new_operands.push_back(operand_operand);
}
}
}
if (changed) {
auto new_concat = parent->AddInstruction(HloInstruction::CreateConcatenate(
concat->shape(), new_operands, concat->concatenate_dimension()));
TF_RETURN_IF_ERROR(parent->ReplaceInstruction(concat, new_concat));
}
return changed;
}
absl::StatusOr<bool> SliceConcatForwarding(HloInstruction* slice) {
if (slice->opcode() != HloOpcode::kSlice) {
return false;
}
auto concat = slice->mutable_operand(0);
if (concat->opcode() != HloOpcode::kConcatenate) {
return false;
}
if (slice->shape().rank() != 1) {
return false;
}
int64_t concat_dim = concat->concatenate_dimension();
std::vector<HloInstruction*> new_operands;
int64_t size_so_far = 0;
int64_t slice_size = slice->shape().dimensions(concat_dim);
if (slice_size != slice->slice_limits(0) - slice->slice_starts(0)) {
return false;
}
if (slice->slice_strides(0) != 1) {
return false;
}
for (HloInstruction* operand : concat->operands()) {
if (size_so_far == slice->slice_starts(0) &&
operand->shape().dimensions(0) == slice_size) {
TF_RETURN_IF_ERROR(slice->ReplaceAllUsesWith(operand));
return true;
}
size_so_far += operand->shape().dimensions(concat_dim);
}
return false;
}
absl::StatusOr<bool> ReshapeBroadcastForwarding(HloInstruction* reshape) {
if (reshape->opcode() != HloOpcode::kReshape) {
return false;
}
auto broadcast = reshape->mutable_operand(0);
if (broadcast->opcode() != HloOpcode::kBroadcast) {
return false;
}
if (reshape->shape().rank() != 0) {
return false;
}
if (broadcast->shape().rank() != 1) {
return false;
}
if (broadcast->mutable_operand(0)->shape().rank() != 0) {
return false;
}
TF_RETURN_IF_ERROR(
reshape->ReplaceAllUsesWith(broadcast->mutable_operand(0)));
return true;
}
absl::StatusOr<bool> ReshapeReshapeForwarding(HloInstruction* reshape) {
if (reshape->opcode() != HloOpcode::kReshape) {
return false;
}
auto reshape_2 = reshape->mutable_operand(0);
if (reshape_2->opcode() != HloOpcode::kReshape) {
return false;
}
if (!Shape::Equal()(reshape->shape(), reshape_2->operand(0)->shape())) {
return false;
}
TF_RETURN_IF_ERROR(
reshape->ReplaceAllUsesWith(reshape_2->mutable_operand(0)));
return true;
}
absl::StatusOr<bool> IdentityConvertRemoving(HloInstruction* convert) {
if (convert->opcode() != HloOpcode::kConvert) {
return false;
}
auto operand = convert->mutable_operand(0);
if (Shape::Equal()(convert->shape(), operand->shape())) {
TF_RETURN_IF_ERROR(convert->ReplaceAllUsesWith(operand));
return true;
}
return false;
}
absl::StatusOr<bool> IdentityReshapeRemoving(HloInstruction* reshape) {
if (reshape->opcode() != HloOpcode::kReshape) {
return false;
}
auto operand = reshape->mutable_operand(0);
if (Shape::Equal()(reshape->shape(), operand->shape())) {
TF_RETURN_IF_ERROR(reshape->ReplaceAllUsesWith(operand));
return true;
}
return false;
}
}
absl::StatusOr<bool> DynamicDimensionSimplifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
2, "DynamicDimensionSimplifier::Run(), before:\n" + module->ToString());
bool changed = false;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto* inst : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool local_changed, ConcatForwarding(inst));
changed |= local_changed;
}
}
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto* inst : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool local_changed, SliceConcatForwarding(inst));
changed |= local_changed;
}
}
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto* inst : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool local_changed, ReshapeBroadcastForwarding(inst));
changed |= local_changed;
}
}
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto* inst : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool local_changed, ReshapeReshapeForwarding(inst));
changed |= local_changed;
}
}
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto* inst : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool local_changed, IdentityConvertRemoving(inst));
changed |= local_changed;
}
}
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto* inst : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool local_changed, IdentityReshapeRemoving(inst));
changed |= local_changed;
}
}
XLA_VLOG_LINES(
2, "DynamicDimensionSimplifier::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/dynamic_dimension_simplifier.h"
#include <memory>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/shape_inference.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
namespace m = match;
class DynamicDimensionSimplifierTest : public HloTestBase {};
TEST_F(DynamicDimensionSimplifierTest, ForwardConcat) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[1] parameter(0)
p1 = s32[1] parameter(1)
p2 = s32[1] parameter(2)
concat1 = s32[2] concatenate(p0, p1), dimensions={0}
ROOT concat2 = s32[3] concatenate(concat1, p2), dimensions={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_TRUE(simplifier.Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Concatenate(m::Parameter(0), m::Parameter(1),
m::Parameter(2))));
}
TEST_F(DynamicDimensionSimplifierTest, DoNotForwardConcatMultipleDims) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[1, 1] parameter(0)
p1 = s32[1, 1] parameter(1)
p2 = s32[2, 1] parameter(2)
concat1 = s32[2, 1] concatenate(p0, p1), dimensions={0}
ROOT concat2 = s32[2, 2] concatenate(concat1, p2), dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_FALSE(simplifier.Run(m.get()).value());
}
TEST_F(DynamicDimensionSimplifierTest, ForwardConcatSlice) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[1] parameter(0)
p1 = s32[1] parameter(1)
p2 = s32[1] parameter(2)
concat = s32[3] concatenate(p0, p1, p2), dimensions={0}
ROOT slice = s32[1] slice(concat), slice={[1:2]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_TRUE(simplifier.Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(1)));
}
TEST_F(DynamicDimensionSimplifierTest, DoNotForwardConcatSliceSizeMismatch) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[1] parameter(0)
p1 = s32[1] parameter(1)
p2 = s32[1] parameter(2)
concat = s32[3] concatenate(p0, p1, p2), dimensions={0}
ROOT slice = s32[2] slice(concat), slice={[1:3]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_FALSE(simplifier.Run(m.get()).value());
}
TEST_F(DynamicDimensionSimplifierTest, DoNotForwardConcatSliceStrided) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[1] parameter(0)
p1 = s32[1] parameter(1)
p2 = s32[1] parameter(2)
concat = s32[3] concatenate(p0, p1, p2), dimensions={0}
ROOT slice = s32[1] slice(concat), slice={[1:2:2]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_FALSE(simplifier.Run(m.get()).value());
}
TEST_F(DynamicDimensionSimplifierTest, BroadcastReshapeForwarding) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[] parameter(0)
broadcast = s32[1] broadcast(p0), dimensions={}
ROOT reshape = s32[] reshape(broadcast)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_TRUE(simplifier.Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(0)));
}
TEST_F(DynamicDimensionSimplifierTest, ReshapeReshapeForwarding) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[] parameter(0)
reshape = s32[1] reshape(p0)
ROOT reshape2 = s32[] reshape(reshape)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_TRUE(simplifier.Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(0)));
}
TEST_F(DynamicDimensionSimplifierTest,
DoNotReshapeReshapeForwardingShapeMismatch) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[1, 1] parameter(0)
reshape = s32[1] reshape(p0)
ROOT reshape2 = s32[] reshape(reshape)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_FALSE(simplifier.Run(m.get()).value());
}
TEST_F(DynamicDimensionSimplifierTest, IdConvertRemoving) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[1] parameter(0)
ROOT reshape2 = s32[1] convert(p0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_TRUE(simplifier.Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(0)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_dimension_simplifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_dimension_simplifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b2d66996-ec37-441c-9dba-74eedda21742 | cpp | tensorflow/tensorflow | all_reduce_simplifier | third_party/xla/xla/service/all_reduce_simplifier.cc | third_party/xla/xla/service/all_reduce_simplifier_test.cc | #include "xla/service/all_reduce_simplifier.h"
#include <cstdint>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_replication_analysis.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::StatusOr<bool> AllReduceSimplifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_ASSIGN_OR_RETURN(
auto replication,
HloReplicationAnalysis::Run(module, false));
std::vector<std::pair<HloInstruction*, int64_t>> all_reduces_to_replace;
auto get_participant_counts_for_replica_group =
[](const HloInstruction* all_reduce) -> absl::StatusOr<int64_t> {
const HloModuleConfig& config = all_reduce->GetModule()->config();
TF_ASSIGN_OR_RETURN(
CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(all_reduce->channel_id().has_value(),
Cast<HloAllReduceInstruction>(all_reduce)
->use_global_device_ids()));
int64_t num_devices = config.num_partitions();
int64_t num_replicas = config.replica_count();
TF_ASSIGN_OR_RETURN(std::vector<int64_t> participant_counts,
GetPariticipantCountsForReplicaGroups(
num_replicas, num_devices,
all_reduce->replica_groups(), group_mode));
if (participant_counts.empty()) {
return -1;
}
if (!absl::c_all_of(participant_counts, [&](int64_t participant_count) {
return participant_count == participant_counts[0];
})) {
return -1;
}
return participant_counts[0];
};
bool changed = false;
for (auto computation : module->computations(execution_threads)) {
for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {
if ((inst->opcode() == HloOpcode::kAllGather ||
inst->opcode() == HloOpcode::kReduceScatter) &&
ShapeUtil::Compatible(inst->shape(), inst->operand(0)->shape())) {
changed = true;
TF_RETURN_IF_ERROR(
computation->ReplaceInstruction(inst, inst->mutable_operand(0)));
}
}
}
for (auto computation : module->computations(execution_threads)) {
for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {
if (!inst->shape().IsArray()) {
continue;
}
if (!inst->IsCrossReplicaAllReduce() && !inst->IsCrossModuleAllReduce()) {
continue;
}
TF_ASSIGN_OR_RETURN(int64_t group_size,
get_participant_counts_for_replica_group(inst));
if (group_size == -1 ||
(!inst->IsCrossReplicaAllReduce() && group_size != 1) ||
(!inst->IsCrossReplicaAllReduce() &&
!module->config().use_spmd_partitioning())) {
continue;
}
if (replication->HloInstructionIsReplicatedAt(inst->operand(0), {}) ||
group_size == 1) {
all_reduces_to_replace.push_back({inst, group_size});
}
}
}
for (auto all_reduce_and_group_size : all_reduces_to_replace) {
auto all_reduce = all_reduce_and_group_size.first;
const int64_t replica_group_size = all_reduce_and_group_size.second;
if (replica_group_size == 1) {
TF_RETURN_IF_ERROR(all_reduce->parent()->ReplaceInstruction(
all_reduce, all_reduce->mutable_operand(0)));
changed = true;
continue;
}
if (all_reduce->to_apply()->instruction_count() != 3 ||
all_reduce->to_apply()->num_parameters() != 2) {
continue;
}
HloInstruction* replacement;
switch (all_reduce->to_apply()->root_instruction()->opcode()) {
case HloOpcode::kAdd: {
auto multiplier =
all_reduce->parent()->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(replica_group_size)));
if (all_reduce->shape().element_type() != S32) {
multiplier = all_reduce->parent()->AddInstruction(
HloInstruction::CreateConvert(
ShapeUtil::ChangeElementType(
multiplier->shape(), all_reduce->shape().element_type()),
multiplier));
}
if (all_reduce->shape().rank() > 0) {
multiplier = all_reduce->parent()->AddInstruction(
HloInstruction::CreateBroadcast(all_reduce->shape(), multiplier,
{}));
}
replacement =
all_reduce->parent()->AddInstruction(HloInstruction::CreateBinary(
all_reduce->shape(), HloOpcode::kMultiply,
all_reduce->mutable_operand(0), multiplier));
break;
}
case HloOpcode::kMinimum:
case HloOpcode::kMaximum:
case HloOpcode::kOr:
case HloOpcode::kAnd:
replacement = all_reduce->mutable_operand(0);
break;
default:
continue;
}
VLOG(2) << "Replacing " << all_reduce->ToString() << " with "
<< replacement->ToString();
TF_RETURN_IF_ERROR(all_reduce->ReplaceAllUsesWith(replacement));
changed = true;
}
return changed;
}
} | #include "xla/service/all_reduce_simplifier.h"
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = match;
using AllReduceSimplifierTest = HloTestBase;
TEST_F(AllReduceSimplifierTest, ReplicatedParameters) {
const char* kModuleStr = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
max {
a.1 = f32[] parameter(0)
b.1 = f32[] parameter(1)
ROOT max = f32[] maximum(a.1, b.1)
}
min {
a.2 = f32[] parameter(0)
b.2 = f32[] parameter(1)
ROOT min = f32[] minimum(a.2, b.2)
}
sum.1 {
a.3 = f32[] parameter(0)
b.3 = f32[] parameter(1)
ROOT add.1 = f32[] add(a.3, b.3)
}
test {
p0 = f32[8,16] parameter(0), parameter_replication={true}
p1 = f32[8,16] parameter(1), parameter_replication={false}
p2 = f32[] parameter(2), parameter_replication={true}
all-reduce = f32[8,16] all-reduce(p0), replica_groups={}, to_apply=sum
all-reduce.1 = f32[8,16] all-reduce(p0), replica_groups={}, to_apply=max
all-reduce.2 = f32[8,16] all-reduce(p1), replica_groups={}, to_apply=min
all-reduce.3 = f32[] all-reduce(p2), replica_groups={}, to_apply=sum.1
ROOT tuple = (f32[8,16], f32[8,16], f32[8,16], f32[]) tuple(all-reduce, all-reduce.1, all-reduce.2, all-reduce.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
kModuleStr, 8));
AllReduceSimplifier simplifier(8);
ASSERT_TRUE(simplifier.Run(module.get()).value());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::MultiplyAnyOrder(m::Parameter(0),
m::Broadcast(m::Convert(m::ConstantScalar(8)))),
m::Parameter(0), m::AllReduce(m::Parameter(1)),
m::MultiplyAnyOrder(m::Parameter(2),
m::Convert(m::ConstantScalar(8))))));
}
TEST_F(AllReduceSimplifierTest, AllReduceAfterAllReduce) {
const char* kModuleStr = R"(
HloModule m
max {
a.1 = f32[] parameter(0)
b.1 = f32[] parameter(1)
ROOT max = f32[] maximum(a.1, b.1)
}
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
test {
p0 = f32[8,16] parameter(0), parameter_replication={false}
all-reduce = f32[8,16] all-reduce(p0), replica_groups={}, to_apply=max
ROOT all-reduce.1 = f32[8,16] all-reduce(all-reduce), replica_groups={}, to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
kModuleStr, 8));
AllReduceSimplifier simplifier(8);
ASSERT_TRUE(simplifier.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::MultiplyAnyOrder(
m::AllReduce(m::Parameter(0)),
m::Broadcast(m::Convert(m::ConstantScalar(8))))));
}
TEST_F(AllReduceSimplifierTest, SubgroupAllReduce) {
const char* kModuleStr = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
max {
a.1 = f32[] parameter(0)
b.1 = f32[] parameter(1)
ROOT max = f32[] maximum(a.1, b.1)
}
min {
a.2 = f32[] parameter(0)
b.2 = f32[] parameter(1)
ROOT min = f32[] minimum(a.2, b.2)
}
test {
p0 = f32[8,16] parameter(0), parameter_replication={true}
p1 = f32[8,16] parameter(1), parameter_replication={false}
all-reduce = f32[8,16] all-reduce(p0), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum
all-reduce.1 = f32[8,16] all-reduce(p0), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=max
all-reduce.2 = f32[8,16] all-reduce(p1), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=min
ROOT tuple = (f32[8,16], f32[8,16], f32[8,16]) tuple(all-reduce, all-reduce.1, all-reduce.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
kModuleStr, 8));
AllReduceSimplifier simplifier(8);
ASSERT_TRUE(simplifier.Run(module.get()).value());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::MultiplyAnyOrder(m::Parameter(0),
m::Broadcast(m::Convert(m::ConstantScalar(4)))),
m::Parameter(0), m::AllReduce(m::Parameter(1)))));
}
TEST_F(AllReduceSimplifierTest, TrivialSubgroupAllReduce) {
const char* kModuleStr = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
test {
p0 = f32[8,16] parameter(0), parameter_replication={false}
ROOT all-reduce = f32[8,16] all-reduce(p0),
replica_groups={{0},{1},{2},{3},{4},{5},{6},{7}},
to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
kModuleStr, 8));
AllReduceSimplifier simplifier(8);
EXPECT_TRUE(simplifier.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(0)));
}
TEST_F(AllReduceSimplifierTest, TrivialSubgroupNonCrossReplicaAllReduce) {
const char* kModuleStr = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
test {
p0 = f32[8,16] parameter(0), parameter_replication={false}
ROOT all-reduce = f32[8,16] all-reduce(p0),
channel_id=1,
use_global_device_ids=true,
replica_groups={{0},{1},{2},{3},{4},{5},{6},{7}},
to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(kModuleStr, 1,
8));
module->mutable_config().set_use_spmd_partitioning(true);
AllReduceSimplifier simplifier(1);
EXPECT_TRUE(simplifier.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(0)));
}
TEST_F(AllReduceSimplifierTest, NonCrossReplicaAllReduceAfterAllReduce) {
const char* kModuleStr = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
test {
p0 = f32[8,16] parameter(0), parameter_replication={false}
all-reduce = f32[8,16] all-reduce(p0),
channel_id=1,
use_global_device_ids=true,
replica_groups={{0,2},{1,3},{4,6},{5,7}},
to_apply=sum
ROOT all-reduce.1 = f32[8,16] all-reduce(all-reduce),
channel_id=2,
use_global_device_ids=true,
replica_groups={{0,4},{1,5},{2,6},{3,7}},
to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(kModuleStr, 1,
8));
module->mutable_config().set_use_spmd_partitioning(true);
AllReduceSimplifier simplifier(1);
EXPECT_FALSE(simplifier.Run(module.get()).value());
}
TEST_F(AllReduceSimplifierTest, MPMDNonCrossReplicaAllReduce) {
const char* kModuleStr = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
test {
p0 = f32[8,16] parameter(0), parameter_replication={false}
ROOT all-reduce = f32[8,16] all-reduce(p0),
channel_id=1,
replica_groups={{0},{1}},
to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(kModuleStr, 2,
1));
module->mutable_config().set_use_spmd_partitioning(false);
AllReduceSimplifier simplifier(2);
EXPECT_FALSE(simplifier.Run(module.get()).value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_simplifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_simplifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
08422a02-e75f-4e3b-88eb-cda17385701d | cpp | tensorflow/tensorflow | while_loop_fusible_sinking | third_party/xla/xla/service/while_loop_fusible_sinking.cc | third_party/xla/xla/service/while_loop_fusible_sinking_test.cc | #include "xla/service/while_loop_fusible_sinking.h"
#include <cstdint>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/while_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsPurelyExpanding(const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kBroadcast ||
(instr->opcode() == HloOpcode::kConstant &&
instr->shape().rank() == 0) ||
instr->opcode() == HloOpcode::kIota;
}
bool IsFusionCandidate(const HloInstruction* instr) {
return instr->opcode() != HloOpcode::kRng &&
(instr->IsElementwise() || instr->opcode() == HloOpcode::kReshape ||
instr->opcode() == HloOpcode::kTranspose);
}
}
bool WhileLoopFusibleSinking::IsSinkableFusion(HloInstruction* while_operand) {
absl::InlinedVector<HloInstruction*, 8> worklist;
absl::flat_hash_set<int> visited;
worklist.push_back(while_operand);
while (!worklist.empty()) {
HloInstruction* to_process = worklist.back();
worklist.pop_back();
if (!to_process->IsFusible()) {
return false;
}
if (!visited.insert(to_process->unique_id()).second) {
if (visited.size() > 100) {
return false;
}
continue;
}
if (IsPurelyExpanding(to_process)) {
continue;
}
if (IsFusionCandidate(to_process)) {
for (auto* op : to_process->operands()) {
worklist.push_back(op);
}
continue;
}
return false;
}
return true;
}
HloInstruction* WhileLoopFusibleSinking::CreateSinkableFusion(
HloInstruction* while_operand) {
HloInstruction* fusion =
while_operand->AddInstruction(while_operand->CreateFusion(
while_operand->shape(), HloInstruction::FusionKind::kLoop,
while_operand));
bool did_fuse = IsFusionCandidate(while_operand);
while (did_fuse) {
did_fuse = false;
for (int64_t i = fusion->operand_count() - 1; i >= 0; --i) {
HloInstruction* op = fusion->mutable_operand(i);
if (IsPurelyExpanding(op)) {
continue;
}
fusion->FuseInstruction(op);
did_fuse = true;
break;
}
}
did_fuse = true;
while (did_fuse) {
did_fuse = false;
for (int64_t i = fusion->operand_count() - 1; i >= 0; --i) {
HloInstruction* op = fusion->mutable_operand(i);
if (IsPurelyExpanding(op)) {
fusion->FuseInstruction(op);
did_fuse = true;
break;
}
}
}
return fusion;
}
absl::StatusOr<bool> WhileLoopFusibleSinking::TrySinkingFusiblesIntoWhileLoop(
HloInstruction* while_instr) {
HloComputation* while_cond = while_instr->while_condition();
HloComputation* while_body = while_instr->while_body();
if (call_counts_[while_body] > 1 || call_counts_[while_cond] > 1) {
return false;
}
HloInstruction* init_value = while_instr->mutable_operand(0);
if (init_value->opcode() != HloOpcode::kTuple) {
return false;
}
bool changed = false;
std::vector<HloInstruction*> invariant_body_gtes =
WhileUtil::GetInvariantGTEsForWhileBody(*while_body);
std::vector<int64_t> tuple_indices;
std::vector<HloInstruction*> new_operands;
for (HloInstruction* invariant_body_gte : invariant_body_gtes) {
int64_t index = invariant_body_gte->tuple_index();
if (while_instr->operand_count() == 0 || init_value->operand_count() == 0) {
CHECK_EQ(while_instr->user_count(), 0);
VLOG(3) << "Each element in the operand tuple of the while instruction '"
<< while_instr->name()
<< "' was an invariant value, whose usage has been replaced "
" directly by the value.";
break;
}
HloInstruction* invariant_value = init_value->mutable_operand(index);
if (absl::c_any_of(invariant_body_gte->users(),
[](const HloInstruction* use) {
switch (use->opcode()) {
case HloOpcode::kDynamicSlice:
case HloOpcode::kGather:
case HloOpcode::kSlice:
return true;
default:
return false;
}
})) {
continue;
}
if (init_value->IsRoot() || init_value->user_count() > 1) {
init_value = init_value->AddInstruction(init_value->Clone());
TF_RETURN_IF_ERROR(while_instr->ReplaceOperandWith(0, init_value));
}
if (!IsSinkableFusion(invariant_value)) {
continue;
}
HloInstruction* fusion = CreateSinkableFusion(invariant_value);
changed = true;
if (fusion->operand_count() > 0 &&
(while_instr->IsRoot() ||
absl::c_any_of(while_instr->users(), [&](HloInstruction* use) {
return use->opcode() != HloOpcode::kGetTupleElement;
}))) {
auto uses = while_instr->users();
std::vector<HloInstruction*> gtes(init_value->operand_count());
for (int64_t i = 0; i < gtes.size(); ++i) {
gtes[i] = while_instr->AddInstruction(
HloInstruction::CreateGetTupleElement(while_instr, i));
}
HloInstruction* tuple =
while_instr->AddInstruction(HloInstruction::CreateTuple(gtes));
if (while_instr->IsRoot()) {
while_instr->parent()->set_root_instruction(tuple);
}
if (!uses.empty()) {
TF_RETURN_IF_ERROR(while_instr->ReplaceUsesWith(uses, tuple));
}
}
absl::InlinedVector<HloInstruction*, 2> invariant_output_uses;
for (auto use : while_instr->users()) {
if (use->opcode() == HloOpcode::kGetTupleElement &&
use->tuple_index() == index) {
invariant_output_uses.push_back(use);
}
}
for (auto use : invariant_output_uses) {
TF_RETURN_IF_ERROR(
while_instr->parent()->ReplaceInstruction(use, invariant_value));
}
HloInstruction* root = while_body->root_instruction();
HloInstruction* parameter = while_body->parameter_instruction(0);
tuple_indices.resize(fusion->operand_count());
int64_t next_index = init_value->operand_count();
new_operands.resize(fusion->operand_count());
for (int64_t i = 0; i < fusion->operand_count(); ++i) {
init_value->AppendOperand(fusion->mutable_operand(i));
parameter->mutable_shape()->mutable_tuple_shapes()->push_back(
fusion->mutable_operand(i)->shape());
new_operands[i] = root->AddInstruction(
HloInstruction::CreateGetTupleElement(parameter, next_index++));
root->AppendOperand(new_operands[i]);
}
*(init_value->mutable_shape()) = parameter->shape();
*(while_instr->mutable_shape()) = parameter->shape();
*(while_cond->parameter_instruction(0)->mutable_shape()) =
parameter->shape();
*(root->mutable_shape()) = parameter->shape();
auto cloned_fusion = while_body->AddInstruction(
fusion->CloneWithNewOperands(fusion->shape(), new_operands));
TF_RETURN_IF_ERROR(fusion->parent()->RemoveInstruction(fusion));
TF_RETURN_IF_ERROR(
while_body->ReplaceInstruction(invariant_body_gte, cloned_fusion));
TF_RETURN_IF_ERROR(cloned_fusion->Defuse());
}
return changed;
}
absl::StatusOr<bool> WhileLoopFusibleSinking::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
call_counts_.clear();
bool changed = false;
std::vector<HloInstruction*> while_instrs;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
absl::c_copy_if(comp->instructions(), std::back_inserter(while_instrs),
HloPredicateIsOp<HloOpcode::kWhile>);
}
for (HloInstruction* while_instr : while_instrs) {
call_counts_[while_instr->while_body()]++;
call_counts_[while_instr->while_condition()]++;
}
for (HloInstruction* while_instr : while_instrs) {
TF_ASSIGN_OR_RETURN(bool result,
TrySinkingFusiblesIntoWhileLoop(while_instr));
changed |= result;
}
return changed;
}
} | #include "xla/service/while_loop_fusible_sinking.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using ::testing::_;
using WhileLoopFusibleSinkingTest = HloTestBase;
TEST_F(WhileLoopFusibleSinkingTest, SinkOneFusible) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2],f32[2]) parameter(0)
p_body.0 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=0
p_body.1 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=1
add.0 = f32[2] add(p_body.0, p_body.1)
ROOT root = (f32[2],f32[2]) tuple(add.0, p_body.1)
}
condition {
p_cond = (f32[2],f32[2]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[2] parameter(0)
const_1 = f32[2] iota(), iota_dimension=0
while_init = (f32[2],f32[2]) tuple(const_0, const_1)
ROOT while = (f32[2],f32[2]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopFusibleSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::Add(_, op::Iota()), _));
}
TEST_F(WhileLoopFusibleSinkingTest, SinkMask) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[5,7],f32[5,7]) parameter(0)
p_body.0 = get-tuple-element(p_body), index=0
p_body.1 = get-tuple-element(p_body), index=1
add.0 = add(p_body.0, p_body.1)
ROOT root = tuple(add.0, p_body.1)
}
condition {
p_cond = (f32[5,7],f32[5,7]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[5,7] parameter(0)
p = f32[5] parameter(1)
a = f32[5,7] iota(), iota_dimension=0
b = f32[5,7] iota(), iota_dimension=1
c = add(a, b)
d = f32[5,7] broadcast(p), dimensions={0}
mask = multiply(c,d)
while_init = tuple(const_0, mask)
ROOT while = while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopFusibleSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::Add(_, op::Multiply(op::Add(op::Iota(), op::Iota()),
op::Broadcast())),
_, _));
}
TEST_F(WhileLoopFusibleSinkingTest, NoSinkSlicedMask) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[5,7],f32[5,7]) parameter(0)
p_body.0 = get-tuple-element(p_body), index=0
p_body.1 = get-tuple-element(p_body), index=1
z = s32[] constant(0)
j = s32[] constant(3)
ds = f32[1,7] dynamic-slice(p_body.1, j, z), dynamic_slice_sizes={1,7}
r = f32[7] reshape(ds)
b = f32[5,7] broadcast(r), dimensions={1}
a = add(b, p_body.0)
add.0 = add(a, p_body.1)
ROOT root = tuple(add.0, p_body.1)
}
condition {
p_cond = (f32[5,7],f32[5,7]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[5,7] parameter(0)
p = f32[5] parameter(1)
a = f32[5,7] iota(), iota_dimension=0
b = f32[5,7] iota(), iota_dimension=1
c = add(a, b)
d = f32[5,7] broadcast(p), dimensions={0}
mask = multiply(c,d)
while_init = tuple(const_0, mask)
ROOT while = while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopFusibleSinking{}.Run(module.get()));
EXPECT_FALSE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_fusible_sinking.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_fusible_sinking_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
555aed98-8f55-421e-badb-302e32069668 | cpp | tensorflow/tensorflow | reduce_scatter_decomposer | third_party/xla/xla/service/reduce_scatter_decomposer.cc | third_party/xla/xla/service/reduce_scatter_decomposer_test.cc | #include "xla/service/reduce_scatter_decomposer.h"
#include <sys/types.h>
#include <limits>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal_util.h"
#include "xla/service/collective_decomposer_utils.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape_util.h"
namespace xla {
absl::StatusOr<bool> ReduceScatterDecomposer::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
bool changed = false;
int64_t next_channel_id = hlo_query::NextChannelId(*module);
for (HloComputation *computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction *instruction :
computation->MakeInstructionPostOrder()) {
auto *rs = DynCast<HloReduceScatterInstruction>(instruction);
if (!rs || !rs->shape().IsArray()) {
continue;
}
std::optional<int64_t> channel_id;
if (rs->channel_id()) {
channel_id = next_channel_id++;
}
if (should_decompose_ && !should_decompose_(rs)) {
continue;
}
VLOG(2) << "Decompose: " << rs->ToString();
HloComputation *apply_clone = module->AddComputationAndUnifyNamesAndIds(
rs->to_apply()->Clone(), false);
HloInstruction *ar =
computation->AddInstruction(HloInstruction::CreateAllReduce(
rs->operand(0)->shape(), rs->operands(), apply_clone,
rs->device_list(), rs->constrain_layout(), channel_id,
rs->use_global_device_ids()));
apply_clone->SetCollectiveCallInstruction(ar);
TF_ASSIGN_OR_RETURN(
CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(rs->channel_id().has_value(),
rs->use_global_device_ids()));
TF_ASSIGN_OR_RETURN(
std::vector<HloInstruction *> start_indices,
CreateStartIndicesForCollectiveDecomposition(
group_mode, rs->replica_groups(), rs->shape(),
rs->scatter_dimension(), computation, update_layout_));
HloInstruction *ds =
computation->AddInstruction(HloInstruction::CreateDynamicSlice(
rs->shape(), ar, start_indices, rs->shape().dimensions()));
TF_RETURN_IF_ERROR(rs->ReplaceAllUsesWith(ds));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(rs));
changed = true;
}
}
return changed;
}
} | #include "xla/service/reduce_scatter_decomposer.h"
#include <utility>
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class ReduceScatterDecomposerTest : public HloTestBase {
public:
enum class PassAction {
kNoChange,
kTrivialGroups,
kTableLookup,
};
void RunPass(
absl::string_view hlo_module, PassAction action,
CollectiveOpGroupMode mode = CollectiveOpGroupMode::kCrossReplica,
int64_t shard_size = 0, int64_t shard_dimension = 0,
int64_t replica_count = 2,
std::function<bool(const HloInstruction *)> should_decompose =
[](const HloInstruction *) { return true; }) {
const int64_t partition_count = 2;
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(hlo_module, replica_count,
partition_count));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ReduceScatterDecomposer(nullptr,
should_decompose)
.Run(module.get()));
if (action == PassAction::kNoChange) {
ASSERT_FALSE(changed);
return;
}
ASSERT_TRUE(changed);
Literal multiplier = LiteralUtil::CreateR0<uint32_t>(shard_size);
::testing::Matcher<const ::xla::HloInstruction *> id_matcher = [&]() {
switch (mode) {
case CollectiveOpGroupMode::kCrossPartition:
return op::PartitionId();
case CollectiveOpGroupMode::kCrossReplica:
return op::ReplicaId();
case CollectiveOpGroupMode::kCrossReplicaAndPartition:
return op::ReplicaId();
case CollectiveOpGroupMode::kFlattenedID: {
return op::Add(
op::Multiply(op::ReplicaId(),
op::Constant(LiteralUtil::CreateR0<uint32_t>(
partition_count))),
op::PartitionId());
}
}
}();
auto root = module->entry_computation()->root_instruction();
const Shape &shape = root->shape();
::testing::Matcher<const ::xla::HloInstruction *> slice_index = id_matcher;
if (action == PassAction::kTableLookup) {
slice_index = op::Reshape(op::DynamicSlice(op::Constant(), id_matcher));
}
if (mode == CollectiveOpGroupMode::kCrossReplicaAndPartition) {
slice_index = op::Add(
op::Multiply(
slice_index,
op::Constant(LiteralUtil::CreateR0<uint32_t>(partition_count))),
op::PartitionId());
}
auto zero_matcher = op::Constant(LiteralUtil::Zero(U32));
std::vector<::testing::Matcher<const ::xla::HloInstruction *>> ds_operands(
shape.rank() + 1, zero_matcher);
ds_operands[0] = op::AllReduce(op::Parameter(0));
ds_operands[shard_dimension + 1] =
op::Multiply(slice_index, op::Constant(std::move(multiplier)));
EXPECT_THAT(root, op::DynamicSlice(ds_operands));
}
};
TEST_F(ReduceScatterDecomposerTest, TrivialReplicaID) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ROOT rs = f32[4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={0}, to_apply=sum
}
)";
RunPass(hlo_string, PassAction::kTrivialGroups,
CollectiveOpGroupMode::kCrossReplica,
4);
}
TEST_F(ReduceScatterDecomposerTest, TableLookupReplicaId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ROOT rs = f32[4] reduce-scatter(p0), replica_groups={{1, 0}}, dimensions={0}, to_apply=sum
}
)";
RunPass(hlo_string, PassAction::kTableLookup,
CollectiveOpGroupMode::kCrossReplica,
4);
}
TEST_F(ReduceScatterDecomposerTest, TrivialCrossReplicaAndPartition) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[4, 8] parameter(0)
ROOT rs = f32[4, 2] reduce-scatter(p0), replica_groups={{0, 1}}, channel_id=1, dimensions={1}, to_apply=sum
}
)";
RunPass(hlo_string, PassAction::kTrivialGroups,
CollectiveOpGroupMode::kCrossReplicaAndPartition,
2, 1);
}
TEST_F(ReduceScatterDecomposerTest,
TrivialCrossReplicaAndPartition_SingleReplica) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[4, 8] parameter(0)
ROOT rs = f32[4, 4] reduce-scatter(p0), replica_groups={{0}}, channel_id=1, dimensions={1}, to_apply=sum
}
)";
RunPass(hlo_string, PassAction::kTrivialGroups,
CollectiveOpGroupMode::kCrossPartition,
4, 1, 1);
}
TEST_F(ReduceScatterDecomposerTest, TableLookupFlattenedId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[4, 8] parameter(0)
ROOT rs = f32[4, 2] reduce-scatter(p0), replica_groups={{1,0, 3, 2}}, channel_id=1, dimensions={1}, to_apply=sum, use_global_device_ids=true
}
)";
RunPass(hlo_string, PassAction::kTableLookup,
CollectiveOpGroupMode::kFlattenedID,
2, 1);
}
TEST_F(ReduceScatterDecomposerTest, NoChange) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[4, 8] parameter(0)
ROOT rs = (f32[4, 2], f32[4,2]) reduce-scatter(p0, p0), replica_groups={{1,0, 3, 2}}, channel_id=1, dimensions={1}, to_apply=sum, use_global_device_ids=true
}
)";
RunPass(hlo_string, PassAction::kNoChange);
}
TEST_F(ReduceScatterDecomposerTest, NoChangeWithShouldDecompose) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[4, 8] parameter(0)
ROOT rs = f32[4, 4] reduce-scatter(p0), replica_groups={{0,1}, {2,3}}, channel_id=1, dimensions={1}, to_apply=sum, use_global_device_ids=true
}
)";
RunPass(hlo_string, PassAction::kNoChange,
CollectiveOpGroupMode::kCrossReplica,
0, 0,
2, [](const HloInstruction *) { return false; });
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_scatter_decomposer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_scatter_decomposer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7d1d8932-f15f-4fd7-a669-a3307f9aec2f | cpp | tensorflow/tensorflow | while_loop_concat_code_motion | third_party/xla/xla/service/while_loop_concat_code_motion.cc | third_party/xla/xla/service/while_loop_concat_code_motion_test.cc | #include "xla/service/while_loop_concat_code_motion.h"
#include <map>
#include <optional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/service/while_loop_simplifier.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
struct ConcatGroup {
ConcatGroup(std::vector<HloInstruction*> elements, int64_t concat_dim,
bool inserted_concat_dim)
: elements(std::move(elements)),
element_sizes(this->elements.size(), 1),
element_offsets(this->elements.size(), 0),
concat_dim(concat_dim),
inserted_concat_dim(inserted_concat_dim) {
if (inserted_concat_dim) {
absl::c_iota(element_offsets, 0);
} else {
for (int64_t i = 0; i < element_sizes.size(); ++i) {
element_sizes[i] = this->elements[i]->shape().dimensions(concat_dim);
if (i > 0) {
element_offsets[i] = element_offsets[i - 1] + element_sizes[i - 1];
}
}
}
}
Shape GetConcatShape() const {
if (inserted_concat_dim) {
std::vector<int64_t> dims;
const Shape& element_shape = elements.back()->shape();
dims.reserve(element_shape.rank() + 1);
for (int64_t i = 0; i < element_shape.rank(); ++i) {
if (i == concat_dim) {
dims.push_back(elements.size());
}
dims.push_back(element_shape.dimensions(i));
}
if (dims.size() == concat_dim) {
dims.push_back(elements.size());
}
return ShapeUtil::MakeShape(element_shape.element_type(), dims);
} else {
int64_t dim_size = 0;
for (int64_t size : element_sizes) {
dim_size += size;
}
Shape shape = elements.back()->shape();
shape.set_dimensions(concat_dim, dim_size);
return shape;
}
}
HloInstruction* CreateSlice(HloInstruction* full_data, int64_t element_index,
HloComputation* comp) const {
Shape shape = full_data->shape();
shape.set_dimensions(concat_dim, element_sizes[element_index]);
std::vector<int64_t> starts(shape.rank(), 0);
std::vector<int64_t> limits(shape.dimensions().begin(),
shape.dimensions().end());
starts[concat_dim] = element_offsets[element_index];
limits[concat_dim] += starts[concat_dim];
auto slice = comp->AddInstruction(
HloInstruction::CreateSlice(shape, full_data, starts, limits,
std::vector<int64_t>(shape.rank(), 1)));
if (!inserted_concat_dim) {
return slice;
}
std::vector<int64_t> element_shape;
element_shape.reserve(shape.rank() - 1);
for (int64_t i = 0; i < shape.rank(); ++i) {
if (i != concat_dim) {
element_shape.push_back(shape.dimensions(i));
}
}
return comp->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(shape.element_type(), element_shape), slice));
}
HloInstruction* CreateConcat(std::vector<HloInstruction*> input_elements,
HloComputation* comp) const {
if (inserted_concat_dim) {
for (int64_t i = 0; i < input_elements.size(); ++i) {
std::vector<int64_t> element_shape;
element_shape.reserve(input_elements[i]->shape().rank() + 1);
for (int64_t j = 0; j < input_elements[i]->shape().rank(); ++j) {
if (j == concat_dim) {
element_shape.push_back(1);
}
element_shape.push_back(input_elements[i]->shape().dimensions(j));
}
if (element_shape.size() == concat_dim) {
element_shape.push_back(1);
}
input_elements[i] = comp->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(input_elements[i]->shape().element_type(),
element_shape),
input_elements[i]));
}
}
return comp->AddInstruction(HloInstruction::CreateConcatenate(
GetConcatShape(), input_elements, concat_dim));
}
std::vector<HloInstruction*> elements;
std::vector<int64_t> element_sizes;
std::vector<int64_t> element_offsets;
int64_t concat_dim;
bool inserted_concat_dim;
};
class ConcatGroups {
public:
std::optional<std::pair<int64_t, int64_t>> GetGroupIndex(
const HloInstruction* hlo) const {
auto it = element_to_group_.find(hlo);
if (it == element_to_group_.end()) {
return std::nullopt;
}
return it->second;
}
const ConcatGroup& GetGroup(int64_t index) const { return groups_[index]; }
std::pair<bool, int64_t> MaybeCreateNewGroup(ConcatGroup group) {
int64_t group_id = -1;
absl::flat_hash_set<HloInstruction*> elements_dedup;
for (int64_t i = 0; i < group.elements.size(); ++i) {
if (!elements_dedup.insert(group.elements[i]).second) {
VLOG(2) << "Duplicates in group. Element: "
<< group.elements[i]->ToString();
}
if (concat_disallowed_.contains(group.elements[i])) {
VLOG(2) << "Failed creating group. Grouping disallowed on "
<< group.elements[i]->ToString();
return std::pair<bool, int64_t>(false, -1);
}
auto existing = GetGroupIndex(group.elements[i]);
if (existing.has_value() &&
(i != existing->second ||
groups_[existing->first].concat_dim != group.concat_dim)) {
VLOG(2)
<< "Failed creating group. Different than existing group. Element: "
<< group.elements[i]->ToString();
return std::pair<bool, int64_t>(false, -1);
}
if (i == 0 && existing.has_value()) {
group_id = existing->first;
}
if (i > 0) {
if (existing.has_value() && existing->first != group_id) {
VLOG(2) << "Failed creating group. Different than existing group. "
"Element: "
<< group.elements[i]->ToString();
return std::pair<bool, int64_t>(false, -1);
}
if (!existing.has_value() && group_id >= 0) {
VLOG(2) << "Failed creating group. Different than existing group. "
"Element: "
<< group.elements[i]->ToString();
return std::pair<bool, int64_t>(false, -1);
}
}
}
if (group_id >= 0) {
VLOG(2) << "Group already exists at " << group_id << " for "
<< group.elements[0]->ToString();
return std::pair<bool, int64_t>(false, group_id);
}
int64_t index = groups_.size();
for (int64_t i = 0; i < group.elements.size(); ++i) {
element_to_group_[group.elements[i]] =
std::pair<int64_t, int64_t>(index, i);
}
VLOG(2) << "Created new group at " << index << " for "
<< group.elements[0]->ToString()
<< ", concat_dim: " << group.concat_dim
<< ", inserted: " << group.inserted_concat_dim;
groups_.push_back(std::move(group));
return std::pair<bool, int64_t>(true, index);
}
const std::vector<ConcatGroup>& Groups() const { return groups_; }
int64_t NextGroupIndex() const { return groups_.size(); }
void RemoveTailingGroups(int64_t start_index) {
while (groups_.size() > start_index) {
for (auto element : groups_.back().elements) {
element_to_group_.erase(element);
}
groups_.pop_back();
}
}
void DisallowGroupingOn(const HloInstruction* hlo) {
VLOG(2) << "Disallow grouping on " << hlo->ToString();
concat_disallowed_.insert(hlo);
}
private:
absl::flat_hash_map<const HloInstruction*, std::pair<int64_t, int64_t>>
element_to_group_;
std::vector<ConcatGroup> groups_;
absl::flat_hash_set<const HloInstruction*> concat_disallowed_;
};
std::optional<std::pair<int64_t, bool>> GetOperandConcatDim(
const HloInstruction* hlo, int64_t operand_index, int64_t hlo_concat_dim,
bool hlo_inserted_concat_dim,
const ConcatGroup* combined_operand_group = nullptr) {
if (hlo->IsElementwise() || hlo->opcode() == HloOpcode::kAllReduce) {
return std::pair<int64_t, bool>(hlo_concat_dim, hlo_inserted_concat_dim);
}
int64_t operand_concat_dim = -1;
bool operand_inserted_concat_dim = false;
const Shape& operand_shape =
combined_operand_group == nullptr
? hlo->operand(operand_index)->shape()
: combined_operand_group->elements.back()->shape();
if (hlo->opcode() == HloOpcode::kBroadcast) {
operand_concat_dim = 0;
operand_inserted_concat_dim = true;
int64_t min_dist_to_concat_dim = hlo->shape().rank();
for (int64_t i = 0; i < operand_shape.rank(); ++i) {
if (hlo->dimensions(i) == hlo_concat_dim) {
operand_concat_dim = i;
operand_inserted_concat_dim = hlo_inserted_concat_dim;
break;
}
if (hlo->dimensions(i) < hlo_concat_dim &&
min_dist_to_concat_dim > hlo_concat_dim - hlo->dimensions(i)) {
operand_concat_dim = i + 1;
min_dist_to_concat_dim = hlo_concat_dim - hlo->dimensions(i);
}
if (hlo->dimensions(i) > hlo_concat_dim &&
min_dist_to_concat_dim > hlo->dimensions(i) - hlo_concat_dim) {
operand_concat_dim = i;
min_dist_to_concat_dim = hlo->dimensions(i) - hlo_concat_dim;
}
}
} else if (hlo->opcode() == HloOpcode::kReduce) {
if (operand_index != 0) {
return std::nullopt;
}
operand_concat_dim = hlo_concat_dim;
operand_inserted_concat_dim = hlo_inserted_concat_dim;
std::set<int64_t> sorted_reduce_dims;
for (int64_t dim : hlo->dimensions()) {
sorted_reduce_dims.insert(dim);
}
for (int64_t dim : sorted_reduce_dims) {
if ((hlo_inserted_concat_dim && dim < operand_concat_dim) ||
(!hlo_inserted_concat_dim && dim <= operand_concat_dim)) {
operand_concat_dim++;
}
}
} else if (hlo->opcode() == HloOpcode::kReshape) {
int64_t i = 0;
int64_t j = 0;
operand_inserted_concat_dim = false;
while (i < operand_shape.rank() || j <= hlo_concat_dim) {
if (i < operand_shape.rank() && j < hlo->shape().rank() &&
operand_shape.dimensions(i) == hlo->shape().dimensions(j)) {
if (j == hlo_concat_dim) {
operand_inserted_concat_dim =
hlo_inserted_concat_dim && operand_shape.dimensions(i) != 1;
operand_concat_dim = i;
break;
}
i++;
j++;
continue;
}
if (i < operand_shape.rank() && operand_shape.dimensions(i) == 1) {
if (j == hlo_concat_dim && hlo_inserted_concat_dim) {
operand_concat_dim = i;
break;
}
i++;
continue;
}
if (j == hlo_concat_dim) {
operand_concat_dim = i;
operand_inserted_concat_dim = true;
break;
}
if (j < hlo->shape().rank() && hlo->shape().dimensions(j) == 1) {
j++;
continue;
}
return std::nullopt;
}
} else {
return std::nullopt;
}
CHECK_GE(operand_concat_dim, 0);
return std::pair<int64_t, bool>(operand_concat_dim,
operand_inserted_concat_dim);
}
void ModifyHloPropertiesForConcatShape(const ConcatGroup& group,
HloInstruction* hlo) {
*hlo->mutable_shape() = group.GetConcatShape();
if (hlo->opcode() == HloOpcode::kBroadcast) {
auto operand_dim = GetOperandConcatDim(
group.elements.back(), 0, group.concat_dim, group.inserted_concat_dim);
CHECK(operand_dim.has_value());
int64_t operand_concat_dim = operand_dim->first;
bool operand_inserted_concat_dim = operand_dim->second;
if (operand_inserted_concat_dim) {
CHECK_EQ(hlo->operand(0)->shape().rank(), hlo->dimensions().size() + 1)
<< hlo->ToString();
} else {
CHECK_EQ(hlo->operand(0)->shape().rank(), hlo->dimensions().size());
}
std::vector<int64_t> dims;
const int64_t rank = hlo->operand(0)->shape().rank();
dims.reserve(rank);
for (int64_t i = 0; i < rank; ++i) {
if (i == operand_concat_dim && operand_inserted_concat_dim) {
dims.push_back(group.concat_dim);
} else {
if (i > operand_concat_dim && operand_inserted_concat_dim) {
dims.push_back(hlo->dimensions(i - 1));
} else {
dims.push_back(hlo->dimensions(i));
}
if (group.inserted_concat_dim && dims.back() >= group.concat_dim) {
dims.back()++;
}
}
}
*hlo->mutable_dimensions() = std::move(dims);
} else if (hlo->opcode() == HloOpcode::kReduce) {
auto operand_dim = GetOperandConcatDim(
group.elements.back(), 0, group.concat_dim, group.inserted_concat_dim);
int64_t operand_concat_dim = operand_dim->first;
bool operand_inserted_concat_dim = operand_dim->second;
CHECK(operand_dim.has_value());
if (operand_inserted_concat_dim) {
auto dims = hlo->mutable_dimensions();
for (int64_t i = 0; i < dims->size(); ++i) {
if ((*dims)[i] >= operand_concat_dim) {
(*dims)[i]++;
}
}
}
}
}
bool GroupHlosForConcat(
HloComputation* body, HloInstruction* concat,
absl::flat_hash_map<const HloInstruction*, int64_t> topological_order,
ConcatGroups* groups) {
const int64_t group_size = concat->operand_count();
absl::flat_hash_set<int64_t> used_groups;
auto root_tuple = body->root_instruction();
CHECK_EQ(root_tuple->opcode(), HloOpcode::kTuple);
absl::flat_hash_map<HloInstruction*, int64_t> root_tuple_element_use_count;
for (auto operand : root_tuple->operands()) {
root_tuple_element_use_count.emplace(operand, 0).first->second++;
}
std::multimap<int64_t, ConcatGroup> pq;
const int64_t first_group_id_to_create = groups->NextGroupIndex();
auto fail_and_cleanup = [&] {
VLOG(1) << "Failed to get the subcomputation to optimize for "
<< concat->ToString() << ", clear groups starting at "
<< first_group_id_to_create;
groups->RemoveTailingGroups(first_group_id_to_create);
return false;
};
struct GroupUse {
int64_t group_id;
bool newly_created;
bool already_used_by_subcomp;
};
auto maybe_create_group = [&](ConcatGroup group) {
auto res = groups->MaybeCreateNewGroup(std::move(group));
GroupUse use{res.second, false, false};
if (res.second < 0) {
return use;
}
use.newly_created = res.first;
use.already_used_by_subcomp = !used_groups.insert(res.second).second;
return use;
};
std::vector<HloInstruction*> concat_operands(concat->operands().begin(),
concat->operands().end());
int64_t concat_operand_order = -topological_order[concat_operands[0]];
pq.emplace(concat_operand_order,
ConcatGroup(std::move(concat_operands),
concat->concatenate_dimension(), false));
while (!pq.empty()) {
auto group = std::move(pq.begin()->second);
pq.erase(pq.begin());
const auto& hlos = group.elements;
VLOG(2) << "GroupHlosForConcat dequeued " << hlos[0]->ToString();
bool group_is_param_gtes = false;
if (absl::c_all_of(hlos, [&](const HloInstruction* element) {
return element == hlos[0];
})) {
if (groups->GetGroupIndex(hlos[0]).has_value()) {
VLOG(1) << "We do not support the case if a shared operand also part "
"of a group: "
<< hlos[0]->ToString();
return fail_and_cleanup();
}
groups->DisallowGroupingOn(hlos[0]);
continue;
}
if (absl::c_all_of(hlos, [&](const HloInstruction* element) {
return element->opcode() == HloOpcode::kGetTupleElement &&
element->operand(0) == body->parameter_instruction(0);
})) {
group_is_param_gtes = true;
} else if (((hlos[0]->IsElementwise() ||
hlos[0]->opcode() == HloOpcode::kAllReduce) &&
!hlos[0]->HasSideEffect()) ||
hlos[0]->opcode() == HloOpcode::kBroadcast ||
hlos[0]->opcode() == HloOpcode::kReduce ||
hlos[0]->opcode() == HloOpcode::kReshape ||
hlos[0]->IsCustomCall("Sharding")) {
if (hlos[0]->opcode() == HloOpcode::kAllReduce &&
(!hlos[0]->shape().IsArray() || hlos[0]->IsCrossModuleAllReduce())) {
VLOG(2) << "Unsupported allreduce: " << hlos[0]->ToString();
return fail_and_cleanup();
}
if (absl::c_any_of(hlos, [&](const HloInstruction* element) {
auto eq_operand = [](const HloInstruction* a,
const HloInstruction* b) {
return ShapeUtil::Compatible(a->shape(), b->shape());
};
auto eq_computations = [](const HloComputation* lhs,
const HloComputation* rhs) {
return lhs->Equal(*rhs, false);
};
if (!hlos[0]->Identical(*element, eq_operand, eq_computations,
false)) {
return true;
}
if (element->opcode() == HloOpcode::kReduce &&
(element->operand_count() != 2 ||
element->operand(1) != hlos[0]->operand(1))) {
return true;
}
return false;
})) {
VLOG(2) << "Different types of elements. First element: "
<< hlos[0]->ToString();
return fail_and_cleanup();
}
int64_t input_count = hlos[0]->operand_count();
if (hlos[0]->opcode() == HloOpcode::kReduce) {
CHECK_EQ(input_count, 2);
input_count = 1;
}
for (int64_t i = 0; i < input_count; ++i) {
std::vector<HloInstruction*> elements(group_size);
for (int64_t j = 0; j < group_size; ++j) {
elements[j] = hlos[j]->mutable_operand(i);
}
auto maybe_new_concat_dim = GetOperandConcatDim(
hlos[0], i, group.concat_dim, group.inserted_concat_dim);
if (!maybe_new_concat_dim.has_value()) {
VLOG(2) << "Cannot find operand concat dimension for operand " << i
<< " of " << hlos[0]->ToString();
return fail_and_cleanup();
}
int64_t new_group_concat_dim = maybe_new_concat_dim->first;
bool inserted_concat_dim = maybe_new_concat_dim->second;
int64_t element_order = -topological_order[elements[0]];
pq.emplace(element_order,
ConcatGroup(std::move(elements), new_group_concat_dim,
inserted_concat_dim));
}
} else if (hlos[0]->opcode() == HloOpcode::kSlice) {
int64_t offset = 0;
auto operand = hlos[0]->operand(0);
if (group.inserted_concat_dim) {
VLOG(2) << "Slices cannot be grouped on new dimension.";
return fail_and_cleanup();
}
if (groups->GetGroupIndex(operand).has_value()) {
return fail_and_cleanup();
}
groups->DisallowGroupingOn(operand);
for (int64_t i = 0; i < group_size; ++i) {
if (hlos[i]->operand(0) != operand) {
VLOG(2) << "Slices of different operands.";
return fail_and_cleanup();
}
for (int64_t j = 0; j < hlos[i]->shape().rank(); ++j) {
if (hlos[i]->slice_strides(j) != 1) {
VLOG(2) << "Slices with strides.";
return fail_and_cleanup();
}
if (j == group.concat_dim) {
if (hlos[i]->slice_starts(j) != offset) {
VLOG(2) << "Slices with unsupported offsets.";
return fail_and_cleanup();
}
offset += hlos[i]->shape().dimensions(j);
} else {
if (hlos[i]->slice_starts(j) != 0 ||
hlos[i]->slice_limits(j) != operand->shape().dimensions(j)) {
VLOG(2) << "Slice with unsupported offsets at dimension " << j
<< ", " << hlos[i]->ToString();
return fail_and_cleanup();
}
}
}
}
if (offset != operand->shape().dimensions(group.concat_dim)) {
VLOG(2) << "Slices with unsupported sizes.";
return fail_and_cleanup();
}
} else {
VLOG(2) << "Unsupported opcode: " << hlos[0]->ToString();
return fail_and_cleanup();
}
auto guse = maybe_create_group(std::move(group));
if (guse.group_id < 0) {
VLOG(2) << "Failed to create group.";
return fail_and_cleanup();
}
const auto& registered_group = groups->GetGroup(guse.group_id);
if (!guse.already_used_by_subcomp && group_is_param_gtes) {
std::vector<HloInstruction*> new_outputs(group_size);
for (int64_t i = 0; i < group_size; ++i) {
new_outputs[i] = root_tuple->mutable_operand(
registered_group.elements[i]->tuple_index());
}
int64_t new_output_order = -topological_order[new_outputs[0]];
pq.emplace(
new_output_order,
ConcatGroup(std::move(new_outputs), registered_group.concat_dim,
registered_group.inserted_concat_dim));
}
}
return groups->Groups().size() > first_group_id_to_create;
}
std::vector<bool> TupleElementsUsedInCond(HloInstruction* loop) {
std::vector<bool> result(loop->shape().tuple_shapes_size(), false);
for (auto user : loop->while_condition()->parameter_instruction(0)->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
absl::c_fill(result, true);
return result;
}
result[user->tuple_index()] = true;
}
return result;
}
absl::Status AddCopiesToRoot(HloComputation* body,
absl::Span<HloInstruction* const> param_gtes,
ConcatGroups* groups) {
auto root = body->root_instruction();
CHECK_EQ(root->opcode(), HloOpcode::kTuple);
std::vector<HloInstruction*> copies(root->operand_count(), nullptr);
for (int64_t i = 0; i < copies.size(); ++i) {
auto element = root->mutable_operand(i);
if (!element->shape().IsArray()) {
continue;
}
copies[i] = body->AddInstruction(HloInstruction::CreateUnary(
element->shape(), HloOpcode::kCopy, element));
TF_RETURN_IF_ERROR(root->ReplaceOperandWith(i, copies[i]));
}
for (int64_t i = 0; i < copies.size(); ++i) {
auto copy = copies[i];
if (groups->GetGroupIndex(copy).has_value()) {
continue;
}
auto param_group_index = groups->GetGroupIndex(param_gtes[i]);
if (!param_group_index.has_value()) {
continue;
}
const auto& param_group = groups->GetGroup(param_group_index->first);
std::vector<HloInstruction*> copy_group(param_group.elements.size());
for (int64_t j = 0; j < copy_group.size(); ++j) {
copy_group[j] = copies[param_group.elements[j]->tuple_index()];
}
CHECK(groups
->MaybeCreateNewGroup(
ConcatGroup(std::move(copy_group), param_group.concat_dim,
param_group.inserted_concat_dim))
.first);
}
return absl::OkStatus();
}
absl::Status RemoveCopiesFromRoot(HloComputation* body) {
auto root = body->root_instruction();
CHECK_EQ(root->opcode(), HloOpcode::kTuple);
for (int64_t i = 0; i < root->operand_count(); ++i) {
auto copy = root->mutable_operand(i);
if (copy->opcode() == HloOpcode::kCopy) {
TF_RETURN_IF_ERROR(root->ReplaceOperandWith(i, copy->mutable_operand(0)));
}
}
return absl::OkStatus();
}
absl::Status RewriteLoopWithConcatGroups(
HloInstruction* loop, absl::Span<HloInstruction* const> param_gtes,
ConcatGroups& groups) {
VLOG(1) << "RewriteLoopWithConcatGroups with " << groups.Groups().size()
<< " groups.";
absl::flat_hash_set<int64_t> processed_groups;
auto body = loop->while_body();
auto param = body->parameter_instruction(0);
auto cond_param = loop->while_condition()->parameter_instruction(0);
std::vector<HloInstruction*> init_elements(loop->shape().tuple_shapes_size());
for (int64_t i = 0; i < param_gtes.size(); ++i) {
init_elements[i] =
loop->parent()->AddInstruction(HloInstruction::CreateGetTupleElement(
loop->shape().tuple_shapes(i), loop->mutable_operand(0), i));
}
for (int64_t i = 0; i < param_gtes.size(); ++i) {
const auto& group_and_index = groups.GetGroupIndex(param_gtes[i]);
if (!group_and_index.has_value() || group_and_index->second != 0) {
continue;
}
const auto& group = groups.GetGroup(group_and_index->first);
*param_gtes[i]->mutable_shape() = group.GetConcatShape();
*param->mutable_shape()->mutable_tuple_shapes(i) = param_gtes[i]->shape();
*body->root_instruction()->mutable_shape()->mutable_tuple_shapes(i) =
param_gtes[i]->shape();
*cond_param->mutable_shape()->mutable_tuple_shapes(i) =
param_gtes[i]->shape();
*loop->mutable_shape()->mutable_tuple_shapes(i) = param_gtes[i]->shape();
processed_groups.insert(group_and_index->first);
std::vector<HloInstruction*> input_concat_elements;
input_concat_elements.reserve(group.elements.size());
for (auto param_gte : group.elements) {
input_concat_elements.push_back(init_elements[param_gte->tuple_index()]);
}
init_elements[i] =
group.CreateConcat(std::move(input_concat_elements), loop->parent());
}
TF_RETURN_IF_ERROR(loop->ReplaceOperandWithDifferentShape(
0, loop->parent()->AddInstruction(
HloInstruction::CreateTuple(init_elements))));
auto original_loop_users = loop->users();
const bool loop_is_root = loop == loop->parent()->root_instruction();
std::vector<HloInstruction*> output_elements(
loop->shape().tuple_shapes_size());
for (int64_t i = 0; i < param_gtes.size(); ++i) {
output_elements[i] =
loop->parent()->AddInstruction(HloInstruction::CreateGetTupleElement(
init_elements[i]->shape(), loop, i));
}
for (int64_t i = 0; i < param_gtes.size(); ++i) {
const auto& group_and_index = groups.GetGroupIndex(param_gtes[i]);
if (!group_and_index.has_value() || group_and_index->second != 0) {
continue;
}
const auto& group = groups.GetGroup(group_and_index->first);
auto concat_output = output_elements[group.elements[0]->tuple_index()];
for (int64_t j = 0; j < group.elements.size(); ++j) {
const auto param_gte = group.elements[j];
output_elements[param_gte->tuple_index()] =
group.CreateSlice(concat_output, j, loop->parent());
}
}
auto new_output_tuple = loop->parent()->AddInstruction(
HloInstruction::CreateTuple(output_elements));
for (auto user : original_loop_users) {
TF_RETURN_IF_ERROR(
loop->ReplaceUseWithDifferentShape(user, new_output_tuple));
}
if (loop_is_root) {
loop->parent()->set_root_instruction(new_output_tuple,
true);
}
std::vector<HloInstruction*> slices_to_remove;
absl::flat_hash_set<HloInstruction*> new_reshapes;
for (auto hlo : body->MakeInstructionPostOrder()) {
const auto& group_and_index = groups.GetGroupIndex(hlo);
if (!group_and_index.has_value() || group_and_index->second != 0) {
continue;
}
if (!processed_groups.insert(group_and_index->first).second) {
continue;
}
const auto& group = groups.GetGroup(group_and_index->first);
if (hlo->opcode() == HloOpcode::kSlice) {
slices_to_remove.push_back(hlo);
} else {
int64_t operand_count_to_adjust = hlo->operand_count();
if (hlo->opcode() == HloOpcode::kReduce) {
CHECK_EQ(operand_count_to_adjust, 2);
operand_count_to_adjust = 1;
}
for (int64_t i = 0; i < operand_count_to_adjust; ++i) {
auto operand_group_index = groups.GetGroupIndex(hlo->operand(i));
const ConcatGroup* operand_group =
operand_group_index.has_value()
? &groups.GetGroup(operand_group_index->first)
: nullptr;
auto maybe_operand_concat_dim = GetOperandConcatDim(
hlo, i, group.concat_dim, group.inserted_concat_dim, operand_group);
CHECK(maybe_operand_concat_dim.has_value())
<< "Operand " << i << " of " << hlo->ToString();
int64_t operand_concat_dim = maybe_operand_concat_dim->first;
bool operand_inserted_concat_dim = maybe_operand_concat_dim->second;
if (operand_group != nullptr) {
CHECK_EQ(operand_concat_dim, operand_group->concat_dim);
if (operand_inserted_concat_dim !=
operand_group->inserted_concat_dim) {
std::vector<int64_t> new_dims;
int64_t d = 0;
for (; d < operand_concat_dim; ++d) {
new_dims.push_back(hlo->operand(i)->shape().dimensions(d));
}
if (operand_inserted_concat_dim) {
new_dims.push_back(group.elements.size());
new_dims.push_back(
hlo->operand(i)->shape().dimensions(operand_concat_dim) /
group.elements.size());
d = operand_concat_dim + 1;
} else {
new_dims.push_back(
group.elements.size() *
hlo->operand(i)->shape().dimensions(operand_concat_dim + 1));
d = operand_concat_dim + 2;
}
for (; d < hlo->operand(i)->shape().rank(); ++d) {
new_dims.push_back(hlo->operand(i)->shape().dimensions(d));
}
auto reshape = body->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(hlo->operand(i)->shape().element_type(),
new_dims),
hlo->mutable_operand(i)));
new_reshapes.insert(reshape);
TF_RETURN_IF_ERROR(
hlo->ReplaceOperandWithDifferentShape(i, reshape));
}
continue;
}
CHECK(
absl::c_all_of(group.elements, [&](const HloInstruction* element) {
return element->operand(i) == hlo->operand(i);
}));
VLOG(2) << "Broadcasting shared operand "
<< hlo->operand(i)->ToString();
Shape data_shape = hlo->operand(i)->shape();
std::vector<int64_t> broadcast_dims;
std::vector<int64_t> broadcast_shape;
const int64_t data_shape_rank = data_shape.rank();
broadcast_dims.reserve(data_shape_rank);
broadcast_shape.reserve(data_shape_rank + 1);
for (int64_t j = 0; j < data_shape_rank; ++j) {
if (j < operand_concat_dim) {
broadcast_dims.push_back(j);
} else {
broadcast_dims.push_back(j + 1);
}
if (j == operand_concat_dim) {
broadcast_shape.push_back(group.elements.size());
}
broadcast_shape.push_back(data_shape.dimensions(j));
}
if (broadcast_shape.size() == data_shape.rank()) {
broadcast_shape.push_back(group.elements.size());
}
auto broadcast = body->AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(data_shape.element_type(), broadcast_shape),
hlo->mutable_operand(i), broadcast_dims));
if (!operand_inserted_concat_dim) {
data_shape.set_dimensions(
operand_concat_dim,
data_shape.dimensions(operand_inserted_concat_dim) *
group.elements.size());
broadcast = body->AddInstruction(
HloInstruction::CreateReshape(data_shape, broadcast));
}
TF_RETURN_IF_ERROR(hlo->ReplaceOperandWithDifferentShape(i, broadcast));
}
}
VLOG(2) << "Modifying HLO to full shape " << hlo->ToString();
ModifyHloPropertiesForConcatShape(group, hlo);
VLOG(2) << "Modified HLO to full shape " << hlo->ToString();
}
for (auto hlo : body->MakeInstructionPostOrder()) {
if (new_reshapes.contains(hlo)) {
continue;
}
const auto& group_and_index = groups.GetGroupIndex(hlo);
if ((!group_and_index.has_value() || hlo->opcode() == HloOpcode::kReduce) &&
hlo != body->root_instruction()) {
auto operands = hlo->operands();
if (group_and_index.has_value()) {
CHECK_EQ(operands.size(), 2);
CHECK_EQ(hlo->opcode(), HloOpcode::kReduce);
operands.erase(operands.begin());
}
for (int64_t i = 0; i < operands.size(); ++i) {
auto operand = operands[i];
auto operand_group_index = groups.GetGroupIndex(operand);
if (!operand_group_index.has_value()) {
continue;
}
const auto& operand_group = groups.GetGroup(operand_group_index->first);
auto slice = operand_group.CreateSlice(
operand_group.elements[0], operand_group_index->second, body);
TF_RETURN_IF_ERROR(hlo->ReplaceOperandWithDifferentShape(i, slice));
}
}
}
for (auto slice : slices_to_remove) {
TF_RETURN_IF_ERROR(slice->ReplaceAllUsesWith(slice->mutable_operand(0)));
TF_RETURN_IF_ERROR(body->RemoveInstruction(slice));
}
return absl::OkStatus();
}
absl::StatusOr<bool> RunOnLoop(HloInstruction* loop,
int64_t min_operand_count_to_optimize) {
auto body = loop->while_body();
auto param = body->parameter_instruction(0);
auto root = body->root_instruction();
if (!param->shape().IsTuple() || root->opcode() != HloOpcode::kTuple) {
return false;
}
std::vector<HloInstruction*> gtes(param->shape().tuple_shapes_size(),
nullptr);
ConcatGroups groups;
auto indices_used_in_cond = TupleElementsUsedInCond(loop);
for (auto user : param->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
return false;
}
int64_t idx = user->tuple_index();
if (gtes[idx] != nullptr) {
return false;
}
gtes[idx] = user;
if (indices_used_in_cond[idx]) {
groups.DisallowGroupingOn(user);
}
}
std::vector<HloInstruction*> concats;
auto body_instructions = body->MakeInstructionPostOrder();
absl::flat_hash_map<const HloInstruction*, int64_t> topological_order;
for (int64_t i = 0; i < body_instructions.size(); ++i) {
auto hlo = body_instructions[i];
topological_order[hlo] = i;
if (hlo->opcode() == HloOpcode::kConcatenate &&
hlo->operand_count() >= min_operand_count_to_optimize) {
concats.push_back(hlo);
}
}
for (auto& concat : concats) {
if (!GroupHlosForConcat(body, concat, topological_order, &groups)) {
concat = nullptr;
}
}
if (groups.Groups().empty()) {
return false;
}
TF_RETURN_IF_ERROR(AddCopiesToRoot(body, gtes, &groups));
TF_RETURN_IF_ERROR(RewriteLoopWithConcatGroups(loop, gtes, groups));
for (auto concat : concats) {
if (concat == nullptr) {
continue;
}
auto new_slice = concat->mutable_operand(0);
CHECK_EQ(new_slice->opcode(), HloOpcode::kSlice);
TF_RETURN_IF_ERROR(
concat->ReplaceAllUsesWith(new_slice->mutable_operand(0)));
TF_RETURN_IF_ERROR(body->RemoveInstruction(concat));
}
TF_RETURN_IF_ERROR(RemoveCopiesFromRoot(body));
for (auto gte : gtes) {
auto group_index = groups.GetGroupIndex(gte);
if (group_index.has_value() && group_index->second > 0) {
TF_RETURN_IF_ERROR(root->ReplaceOperandWith(gte->tuple_index(), gte));
}
}
return true;
}
}
absl::StatusOr<bool> WhileLoopConcatCodeMotion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp :
module->MakeComputationPostOrder(execution_threads)) {
for (HloInstruction* hlo : comp->MakeInstructionPostOrder()) {
if (hlo->opcode() == HloOpcode::kWhile) {
TF_ASSIGN_OR_RETURN(bool loop_changed,
RunOnLoop(hlo, min_operand_count_to_optimize_));
changed |= loop_changed;
}
}
}
if (changed) {
HloPassPipeline pipeline("loop-concat-motion-cleanup");
pipeline.AddPass<TupleSimplifier>();
pipeline.AddPass<HloDCE>();
pipeline.AddPass<WhileLoopSimplifier>();
pipeline.AddPass<TupleSimplifier>();
pipeline.AddPass<HloDCE>();
TF_RETURN_IF_ERROR(pipeline.Run(module, execution_threads).status());
}
return changed;
}
} | #include "xla/service/while_loop_concat_code_motion.h"
#include <algorithm>
#include <iterator>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_verifier.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
class WhileLoopConcatCodeMotionTest : public HloTestBase {};
TEST_F(WhileLoopConcatCodeMotionTest, SimpleMotion) {
constexpr absl::string_view kHloModule = R"(
HloModule test
%cond {
%param = (s32[], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%constant = s32[] constant(5)
ROOT result = pred[] compare(%gte.0, %constant), direction=LT
}
%body {
%param = (s32[], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024,1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024,1024] get-tuple-element(%param), index=2
%concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0}
%ccall = f32[2048,1024] custom-call(%concat), custom_call_target="test"
%slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]}
%slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]}
%ccall2 = f32[1024,1024] custom-call(), custom_call_target="test2"
%add.0 = f32[1024,1024] add(%slice.0, %ccall2)
%add.1 = f32[1024,1024] add(%slice.1, %ccall2)
%t0 = token[] after-all()
%outfeed = token[] outfeed(%slice.1, %t0)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024])
tuple(%increment_iteration, %add.0, %add.1)
}
ENTRY test_main {
%param.0 = f32[1024,1024] parameter(0)
%param.1 = f32[1024,1024] parameter(1)
%constant.0 = s32[] constant(0)
%while_init = (s32[], f32[1024,1024], f32[1024,1024]) tuple(%constant.0, %param.0, %param.1)
ROOT %while = (s32[], f32[1024,1024], f32[1024,1024]) while(%while_init), condition=%cond, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConcatCodeMotion(2).Run(module.get()));
ASSERT_TRUE(changed);
VLOG(1) << module->ToString();
auto loop = op::While(
op::Tuple(op::Constant(),
AllOf(op::Shape("f32[2048,1024]"),
op::Concatenate(op::Parameter(0), op::Parameter(1)))));
ASSERT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(loop), op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop))));
auto while_op =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(while_op->while_body()->root_instruction(),
op::Tuple(op::Add(),
op::Add(op::CustomCall(),
op::Reshape(op::Broadcast(op::CustomCall())))));
}
TEST_F(WhileLoopConcatCodeMotionTest, NoMotionWithChangedElementOrder) {
constexpr absl::string_view kHloModule = R"(
HloModule test
%cond {
%param = (s32[], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%constant = s32[] constant(5)
ROOT result = pred[] compare(%gte.0, %constant), direction=LT
}
%body {
%param = (s32[], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024,1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024,1024] get-tuple-element(%param), index=2
%concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0}
%ccall = f32[2048,1024] custom-call(%concat), custom_call_target="test"
%slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]}
%slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]}
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024])
tuple(%increment_iteration, %slice.1, %slice.0)
}
ENTRY test_main {
%param.0 = f32[1024,1024] parameter(0)
%param.1 = f32[1024,1024] parameter(1)
%constant.0 = s32[] constant(0)
%while_init = (s32[], f32[1024,1024], f32[1024,1024]) tuple(%constant.0, %param.0, %param.1)
ROOT %while = (s32[], f32[1024,1024], f32[1024,1024]) while(%while_init), condition=%cond, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConcatCodeMotion(2).Run(module.get()));
ASSERT_FALSE(changed);
}
TEST_F(WhileLoopConcatCodeMotionTest, CascadedConcats) {
constexpr absl::string_view kHloModule = R"(
HloModule test
%cond {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%constant = s32[] constant(5)
ROOT result = pred[] compare(%gte.0, %constant), direction=LT
}
%body {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024,1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024,1024] get-tuple-element(%param), index=2
%concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0}
%gte.3 = f32[1024,1024] get-tuple-element(%param), index=3
%gte.4 = f32[1024,1024] get-tuple-element(%param), index=4
%ccall = f32[2048,1024] custom-call(%concat), custom_call_target="test"
%slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]}
%slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]}
%add.0 = f32[1024,1024] add(%slice.0, %gte.3)
%add.1 = f32[1024,1024] add(%slice.1, %gte.4)
%add.2 = f32[1024,1024] add(%gte.3, %gte.3)
%add.3 = f32[1024,1024] add(%gte.4, %gte.4)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
tuple(%increment_iteration, %add.0, %add.1, %add.2, %add.3)
}
ENTRY test_main {
%param.0 = f32[1024,1024] parameter(0)
%param.1 = f32[1024,1024] parameter(1)
%param.2 = f32[1024,1024] parameter(2)
%param.3 = f32[1024,1024] parameter(3)
%constant.0 = s32[] constant(0)
%while_init = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
tuple(%constant.0, %param.0, %param.1, %param.2, %param.3)
ROOT %while = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
while(%while_init), condition=%cond, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConcatCodeMotion(2).Run(module.get()));
ASSERT_TRUE(changed);
VLOG(1) << module->ToString();
auto loop = op::While(
op::Tuple(op::Constant(),
AllOf(op::Shape("f32[2048,1024]"),
op::Concatenate(op::Parameter(0), op::Parameter(1))),
AllOf(op::Shape("f32[2048,1024]"),
op::Concatenate(op::Parameter(2), op::Parameter(3)))));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(loop), op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop))));
}
TEST_F(WhileLoopConcatCodeMotionTest, TwoConcatsSharedGroups) {
constexpr absl::string_view kHloModule = R"(
HloModule test
%cond {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%constant = s32[] constant(5)
ROOT result = pred[] compare(%gte.0, %constant), direction=LT
}
%body {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024,1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024,1024] get-tuple-element(%param), index=2
%concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0}
%ccall = f32[2048,1024] custom-call(%concat), custom_call_target="test"
%slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]}
%slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]}
%gte.3 = f32[1024,1024] get-tuple-element(%param), index=3
%gte.4 = f32[1024,1024] get-tuple-element(%param), index=4
%concat.1 = f32[2048,1024] concatenate(%gte.3, %gte.4), dimensions={0}
%ccall.1 = f32[2048,1024] custom-call(%concat.1), custom_call_target="test"
%slice.2 = f32[1024,1024] slice(%ccall.1), slice={[0:1024], [0:1024]}
%slice.3 = f32[1024,1024] slice(%ccall.1), slice={[1024:2048], [0:1024]}
%add.0 = f32[1024,1024] add(%slice.0, %slice.2)
%add.1 = f32[1024,1024] add(%slice.1, %slice.3)
%sub.0 = f32[1024,1024] subtract(%slice.0, %slice.2)
%sub.1 = f32[1024,1024] subtract(%slice.1, %slice.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
tuple(%increment_iteration, %add.0, %add.1, %sub.0, %sub.1)
}
ENTRY test_main {
%param.0 = f32[1024,1024] parameter(0)
%param.1 = f32[1024,1024] parameter(1)
%param.2 = f32[1024,1024] parameter(2)
%param.3 = f32[1024,1024] parameter(3)
%constant.0 = s32[] constant(0)
%while_init = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
tuple(%constant.0, %param.0, %param.1, %param.2, %param.3)
ROOT %while = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
while(%while_init), condition=%cond, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConcatCodeMotion(2).Run(module.get()));
ASSERT_TRUE(changed);
VLOG(1) << module->ToString();
auto loop = op::While(
op::Tuple(op::Constant(),
AllOf(op::Shape("f32[2048,1024]"),
op::Concatenate(op::Parameter(0), op::Parameter(1))),
AllOf(op::Shape("f32[2048,1024]"),
op::Concatenate(op::Parameter(2), op::Parameter(3)))));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(loop), op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop))));
}
TEST_F(WhileLoopConcatCodeMotionTest, TwoConcatsDifferentOrders) {
constexpr absl::string_view kHloModule = R"(
HloModule test
%cond {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%constant = s32[] constant(5)
ROOT result = pred[] compare(%gte.0, %constant), direction=LT
}
%body {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024,1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024,1024] get-tuple-element(%param), index=2
%concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0}
%ccall = f32[2048,1024] custom-call(%concat), custom_call_target="test"
%slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]}
%slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]}
%gte.3 = f32[1024,1024] get-tuple-element(%param), index=3
%gte.4 = f32[1024,1024] get-tuple-element(%param), index=4
%concat.1 = f32[2048,1024] concatenate(%gte.3, %gte.4), dimensions={0}
%ccall.1 = f32[2048,1024] custom-call(%concat.1), custom_call_target="test"
%slice.2 = f32[1024,1024] slice(%ccall.1), slice={[0:1024], [0:1024]}
%slice.3 = f32[1024,1024] slice(%ccall.1), slice={[1024:2048], [0:1024]}
%add.0 = f32[1024,1024] add(%slice.0, %slice.3)
%add.1 = f32[1024,1024] add(%slice.1, %slice.2)
%sub.0 = f32[1024,1024] subtract(%slice.0, %slice.2)
%sub.1 = f32[1024,1024] subtract(%slice.1, %slice.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
tuple(%increment_iteration, %add.0, %add.1, %sub.0, %sub.1)
}
ENTRY test_main {
%param.0 = f32[1024,1024] parameter(0)
%param.1 = f32[1024,1024] parameter(1)
%param.2 = f32[1024,1024] parameter(2)
%param.3 = f32[1024,1024] parameter(3)
%constant.0 = s32[] constant(0)
%while_init = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
tuple(%constant.0, %param.0, %param.1, %param.2, %param.3)
ROOT %while = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
while(%while_init), condition=%cond, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConcatCodeMotion(2).Run(module.get()));
EXPECT_TRUE(changed);
VLOG(1) << module->ToString();
auto loop = op::While(
op::Tuple(op::Constant(), op::Parameter(0), op::Parameter(1),
AllOf(op::Shape("f32[2048,1024]"),
op::Concatenate(op::Parameter(2), op::Parameter(3)))));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(loop), op::GetTupleElement(loop),
op::GetTupleElement(loop), op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop))));
}
TEST_F(WhileLoopConcatCodeMotionTest, NonElementwiseOps) {
constexpr absl::string_view kHloModule = R"(
HloModule test
%cond {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%constant = s32[] constant(5)
ROOT result = pred[] compare(%gte.0, %constant), direction=LT
}
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
%body {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024,1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024,1024] get-tuple-element(%param), index=2
%reshape.0 = f32[1,1024,1024] reshape(%gte.1)
%reshape.1 = f32[1,1024,1024] reshape(%gte.2)
%concat = f32[2,1024,1024] concatenate(%reshape.0, %reshape.1), dimensions={0}
%ccall = f32[2,1024,1024] custom-call(%concat), custom_call_target="test"
%slice.0 = f32[1,1024,1024] slice(%ccall), slice={[0:1], [0:1024], [0:1024]}
%slice.1 = f32[1,1024,1024] slice(%ccall), slice={[1:2], [0:1024], [0:1024]}
%reshape.2 = f32[1024,1024] reshape(%slice.0 )
%reshape.3 = f32[1024,1024] reshape(%slice.1)
%gte.3 = f32[1024] get-tuple-element(%param), index=3
%gte.4 = f32[1024] get-tuple-element(%param), index=4
%constant.0 = f32[] constant(0)
%reduce.0 = f32[1024] reduce(%reshape.0, %constant.0), to_apply=%sum, dimensions={0,1}
%reduce.1 = f32[1024] reduce(%reshape.1, %constant.0), to_apply=%sum, dimensions={0,1}
%add.0 = f32[1024] add(%reduce.0, %gte.3)
%add.1 = f32[1024] add(%reduce.1, %gte.4)
%br0 = f32[1024,1024] broadcast(%add.0), dimensions={1}
%br1 = f32[1024,1024] broadcast(%add.1), dimensions={1}
%sub.0 = f32[1024,1024] subtract(%reshape.2, %br0)
%sub.1 = f32[1024,1024] subtract(%reshape.3, %br1)
%gte.5 = f32[1] get-tuple-element(%param), index=5
%gte.6 = f32[1] get-tuple-element(%param), index=6
%reshape.4 = f32[] reshape(%gte.5)
%reshape.5 = f32[] reshape(%gte.6)
%br2 = f32[1024] broadcast(%reshape.4), dimensions={}
%br3 = f32[1024] broadcast(%reshape.5), dimensions={}
%add.2 = f32[1024] add(%add.0, %br2)
%add.3 = f32[1024] add(%add.1, %br3)
%inc0 = f32[] add(%constant.0, %reshape.4)
%inc1 = f32[] add(%constant.0, %reshape.5)
%reshape.6 = f32[1] reshape(%inc0)
%reshape.7 = f32[1] reshape(%inc1)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1])
tuple(%increment_iteration, %sub.0, %sub.1, %add.2, %add.3, %reshape.6, %reshape.7)
}
ENTRY test_main {
%param.0 = f32[1024,1024] parameter(0)
%param.1 = f32[1024,1024] parameter(1)
%param.2 = f32[1024] parameter(2)
%param.3 = f32[1024] parameter(3)
%param.4 = f32[1] parameter(4)
%param.5 = f32[1] parameter(5)
%constant.0 = s32[] constant(0)
%while_init = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1])
tuple(%constant.0, %param.0, %param.1, %param.2, %param.3, %param.4, %param.5)
ROOT %while = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1])
while(%while_init), condition=%cond, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConcatCodeMotion(2).Run(module.get()));
ASSERT_TRUE(changed);
VLOG(1) << module->ToString();
auto loop = op::While(
op::Tuple(op::Constant(),
AllOf(op::Shape("f32[2,1024,1024]"),
op::Concatenate(op::Reshape(op::Parameter(0)),
op::Reshape(op::Parameter(1)))),
AllOf(op::Shape("f32[2,1024]"),
op::Concatenate(op::Reshape(op::Parameter(2)),
op::Reshape(op::Parameter(3)))),
AllOf(op::Shape("f32[2]"),
op::Concatenate(op::Parameter(4), op::Parameter(5)))));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(loop),
op::Reshape(op::Slice(op::GetTupleElement(loop))),
op::Reshape(op::Slice(op::GetTupleElement(loop))),
op::Reshape(op::Slice(op::GetTupleElement(loop))),
op::Reshape(op::Slice(op::GetTupleElement(loop))),
op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_concat_code_motion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_concat_code_motion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a90867e2-ebc0-4bfe-a0e5-bb10947c0d3f | cpp | tensorflow/tensorflow | sort_simplifier | third_party/xla/xla/service/sort_simplifier.cc | third_party/xla/xla/service/sort_simplifier_test.cc | #include "xla/service/sort_simplifier.h"
#include <memory>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
namespace xla {
namespace {
absl::StatusOr<bool> RemoveUnusedOperandFromSort(HloInstruction* sort) {
if (!sort->shape().IsTuple()) {
return false;
}
HloComputation* computation = sort->parent();
if (computation->root_instruction() == sort) {
return false;
}
absl::flat_hash_set<int64_t> used_indices;
for (const HloInstruction* user : sort->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
return false;
}
used_indices.insert(user->tuple_index());
}
auto comparator = sort->to_apply();
for (int64_t i = 0; i < sort->operand_count() * 2; ++i) {
if (comparator->parameter_instruction(i)->user_count() > 0) {
used_indices.insert(i / 2);
}
}
if (used_indices.size() == sort->operand_count()) {
return false;
}
std::vector<HloInstruction*> operands;
std::vector<const Shape*> new_shapes;
for (int64_t i = 0; i < sort->operand_count(); ++i) {
if (used_indices.contains(i)) {
operands.push_back(sort->mutable_operand(i));
new_shapes.push_back(&sort->operand(i)->shape());
}
}
Shape new_sort_shape = new_shapes.size() == 1
? *new_shapes[0]
: ShapeUtil::MakeTupleShapeWithPtrs(new_shapes);
HloInstruction* new_sort = computation->AddInstruction(
sort->CloneWithNewOperands(new_sort_shape, operands));
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
int64_t parameter_number = 0;
for (int64_t i = 0; i < sort->operand_count(); ++i) {
auto* old_lhs_parameter = comparator->parameter_instruction(i * 2);
auto* old_rhs_parameter = comparator->parameter_instruction(i * 2 + 1);
if (used_indices.contains(i)) {
Shape scalar_shape =
ShapeUtil::MakeShape(sort->operand(i)->shape().element_type(), {});
replacements[old_lhs_parameter] = HloInstruction::CreateParameter(
parameter_number, scalar_shape,
absl::StrCat("p.", parameter_number / 2, ".lhs"));
++parameter_number;
replacements[old_rhs_parameter] = HloInstruction::CreateParameter(
parameter_number, scalar_shape,
absl::StrCat("p.", parameter_number / 2, ".rhs"));
++parameter_number;
} else {
replacements[old_lhs_parameter] = nullptr;
replacements[old_rhs_parameter] = nullptr;
}
}
HloModule* module = sort->GetModule();
HloComputation* new_compare = module->AddEmbeddedComputation(
comparator->CloneWithReplacements(&replacements));
new_sort->set_to_apply(new_compare);
absl::flat_hash_map<int64_t, HloInstruction*> result_map;
if (new_sort->shape().IsTuple()) {
int64_t new_index = 0;
for (int64_t i = 0; i < sort->operand_count(); ++i) {
if (used_indices.count(i)) {
result_map[i] =
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
*new_shapes[new_index], new_sort, new_index));
++new_index;
}
}
} else {
CHECK_EQ(used_indices.size(), 1);
result_map[*used_indices.begin()] = new_sort;
}
std::vector<HloInstruction*> users(sort->users().begin(),
sort->users().end());
for (HloInstruction* user : users) {
TF_RETURN_IF_ERROR(
user->ReplaceAllUsesWith(result_map.at(user->tuple_index())));
TF_RETURN_IF_ERROR(computation->RemoveInstructionAndUnusedOperands(user));
}
return true;
}
}
absl::StatusOr<bool> SortSimplifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "HLO module before SortSimplifier:";
XLA_VLOG_LINES(2, module->ToString());
bool changed = false;
std::vector<HloInstruction*> sort_instrs;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
absl::c_copy_if(comp->instructions(), std::back_inserter(sort_instrs),
HloPredicateIsOp<HloOpcode::kSort>);
}
for (HloInstruction* sort_instr : sort_instrs) {
TF_ASSIGN_OR_RETURN(bool result, RemoveUnusedOperandFromSort(sort_instr));
changed |= result;
}
if (changed) {
VLOG(2) << "HLO module after SortSimplifier:";
XLA_VLOG_LINES(2, module->ToString());
} else {
VLOG(2) << "HLO module unchanged after SortSimplifier";
}
return changed;
}
} | #include "xla/service/sort_simplifier.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace m = match;
using SortSimplifierTest = HloTestBase;
TEST_F(SortSimplifierTest, RemoveUnusedSortOperandArrayResult) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = s32[64,8732]{1,0} parameter(1)
sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values),
dimensions={1}, to_apply=compare
ROOT gte = f32[64,8732]{1,0} get-tuple-element(sort), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
SortSimplifier simplifier;
uint64_t num_executions = 0;
do {
num_executions++;
} while (simplifier.Run(module.get()).value());
EXPECT_EQ(num_executions, 2);
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Sort(m::Parameter(0))));
}
TEST_F(SortSimplifierTest, RemoveUnusedSortOperandTuple) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
p.2.lhs = u32[] parameter(4)
p.2.rhs = u32[] parameter(5)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,87] parameter(0)
values.0 = s32[64,87] parameter(1)
values.1 = u32[64,87] parameter(2)
sort = (f32[64,87], s32[64,87], u32[64,87]) sort(
keys, values.0, values.1),
dimensions={1}, to_apply=compare
gte.0 = f32[64,87] get-tuple-element(sort), index=0
gte.1 = u32[64,87] get-tuple-element(sort), index=2
ROOT tuple = (f32[64,87], u32[64,87]) tuple(gte.0, gte.1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
SortSimplifier simplifier;
EXPECT_TRUE(simplifier.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(m::Tuple(
m::GetTupleElement(m::Sort(m::Parameter(0), m::Parameter(2)), 0),
m::GetTupleElement(m::Sort(m::Parameter(0), m::Parameter(2)), 1))));
}
TEST_F(SortSimplifierTest, DontRemoveUnusedSortKey) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = s32[64,8732]{1,0} parameter(1)
sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values), dimensions={1}, to_apply=compare
ROOT gte = s32[64,8732]{1,0} get-tuple-element(sort), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
SortSimplifier simplifier;
EXPECT_FALSE(simplifier.Run(module.get()).value());
}
TEST_F(SortSimplifierTest, RemoveUnusedFirstOperand) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
ROOT lt = pred[] compare(p.1.lhs, p.1.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = s32[64,8732]{1,0} parameter(1)
sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values),
dimensions={1}, to_apply=compare
ROOT gte = s32[64,8732]{1,0} get-tuple-element(sort), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
SortSimplifier simplifier;
uint64_t num_executions = 0;
do {
num_executions++;
} while (simplifier.Run(module.get()).value());
EXPECT_EQ(num_executions, 2);
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Sort(m::Parameter(1))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/sort_simplifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/sort_simplifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8585c3c1-3110-4d7f-9df7-16798a77f5b7 | cpp | tensorflow/tensorflow | stochastic_convert_decomposer | third_party/xla/xla/service/stochastic_convert_decomposer.cc | third_party/xla/xla/service/stochastic_convert_decomposer_test.cc | #include "xla/service/stochastic_convert_decomposer.h"
#include <cstdint>
#include <limits>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/shape_inference.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::Status DecomposeStochasticConvert(HloComputation* comp,
HloInstruction* instruction) {
CHECK(instruction->opcode() == HloOpcode::kStochasticConvert)
<< "requires a stochastic_convert instruction to decompose, but got: "
<< instruction->opcode();
CHECK(instruction->operand_count() == 2)
<< "requires 2 operands for stochastic convert, but got: "
<< instruction->operand_count();
HloInstruction* operand = instruction->mutable_operand(0);
HloInstruction* random = instruction->mutable_operand(1);
PrimitiveType from_type = operand->shape().element_type();
PrimitiveType random_type = random->shape().element_type();
PrimitiveType to_type = instruction->shape().element_type();
TF_RETURN_IF_ERROR(ShapeInference::InferStochasticConvertShape(
operand->shape(), random->shape(), to_type)
.status());
VLOG(1) << "Decomposing instruction: " << instruction->ToString();
if (primitive_util::IsSignedIntegralType(to_type)) {
TF_ASSIGN_OR_RETURN(HloInstruction * operand_sign,
MakeUnaryHlo(HloOpcode::kSign, operand));
TF_ASSIGN_OR_RETURN(HloInstruction * should_neg,
MakeCompareHlo(Comparison::Direction::kLt, operand_sign,
MakeScalarLike(operand_sign, 0)));
TF_ASSIGN_OR_RETURN(HloInstruction * operand_abs,
MakeUnaryHlo(HloOpcode::kAbs, operand));
TF_ASSIGN_OR_RETURN(HloInstruction * truncated_fp,
MakeUnaryHlo(HloOpcode::kFloor, operand_abs));
TF_ASSIGN_OR_RETURN(
HloInstruction * fractional,
MakeBinaryHlo(HloOpcode::kSubtract, operand_abs, truncated_fp));
if (from_type == F16) {
fractional = MakeConvertToHlo(fractional, F32);
}
TF_ASSIGN_OR_RETURN(
HloInstruction * fixed_fractional,
MakeBinaryHlo(
HloOpcode::kMultiply, fractional,
MakeScalarLike(fractional, IPow<double>(2, primitive_util::BitWidth(
random_type)))));
TF_ASSIGN_OR_RETURN(
HloInstruction * should_round_up,
MakeCompareHlo(Comparison::Direction::kLt, random,
MakeConvertToHlo(fixed_fractional, random_type)));
HloInstruction* truncated_int = MakeConvertToHlo(truncated_fp, to_type);
TF_ASSIGN_OR_RETURN(
truncated_int,
MakeSelectHlo(should_round_up,
MakeBinaryHlo(HloOpcode::kAdd, truncated_int,
MakeScalarLike(truncated_int, 1))
.value(),
truncated_int));
TF_ASSIGN_OR_RETURN(
HloInstruction * result,
MakeSelectHlo(should_neg,
MakeUnaryHlo(HloOpcode::kNegate, truncated_int).value(),
truncated_int));
auto to_bits = primitive_util::BitWidth(to_type);
auto min = static_cast<int64_t>(
(static_cast<uint64_t>(1) + ~static_cast<uint64_t>(1))
<< (to_bits - 1));
TF_ASSIGN_OR_RETURN(HloInstruction * is_min,
MakeCompareHlo(Comparison::Direction::kLe, operand,
MakeScalarLike(operand, min)));
TF_ASSIGN_OR_RETURN(
result, MakeSelectHlo(is_min, MakeScalarLike(result, min), result));
auto max =
static_cast<int64_t>((static_cast<uint64_t>(1) << (to_bits - 1)) - 1);
TF_ASSIGN_OR_RETURN(HloInstruction * is_max,
MakeCompareHlo(Comparison::Direction::kGe, operand,
MakeScalarLike(operand, max)));
TF_ASSIGN_OR_RETURN(
result, MakeSelectHlo(is_max, MakeScalarLike(result, max), result));
TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(result));
TF_RETURN_IF_ERROR(comp->RemoveInstruction(instruction));
return absl::OkStatus();
}
return Internal("Unsupported stochastic convert: from %s to %s",
PrimitiveType_Name(from_type),
PrimitiveType_Name(to_type));
}
absl::StatusOr<bool> StochasticConvertDecomposer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (instruction->opcode() != HloOpcode::kStochasticConvert) {
continue;
}
TF_RETURN_IF_ERROR(DecomposeStochasticConvert(computation, instruction));
changed = true;
}
}
return changed;
}
} | #include "xla/service/stochastic_convert_decomposer.h"
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using StochasticConvertDecomposerTest = HloTestBase;
using ::testing::HasSubstr;
TEST_F(StochasticConvertDecomposerTest, DecomposeStochasticConvertF32ToS32) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
%arg_param.1 = f32[65536]{0} parameter(0)
%random_param.2 = u32[65536]{0} parameter(1)
ROOT %stochastic-convert.3 = s32[65536]{0} stochastic-convert(f32[65536]{0} %arg_param.1, u32[65536]{0} %random_param.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
StochasticConvertDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Select(op::Compare(), op::Broadcast(),
op::Select(op::Compare(), op::Broadcast(),
op::Select(op::Compare(), op::Negate(),
op::Select()))));
}
TEST_F(StochasticConvertDecomposerTest, DecomposeStochasticConvertBF16ToS8) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
%arg_param.1 = bf16[65536]{0} parameter(0)
%random_param.2 = u16[65536]{0} parameter(1)
ROOT %stochastic-convert.3 = s8[65536]{0} stochastic-convert(bf16[65536]{0} %arg_param.1, u16[65536]{0} %random_param.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
StochasticConvertDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Select(op::Compare(), op::Broadcast(),
op::Select(op::Compare(), op::Broadcast(),
op::Select(op::Compare(), op::Negate(),
op::Select()))));
}
TEST_F(StochasticConvertDecomposerTest, WrongRandomBitWidth) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
%arg_param.1 = bf16[65536]{0} parameter(0)
%random_param.2 = u32[65536]{0} parameter(1)
ROOT %stochastic-convert.3 = s32[65536]{0} stochastic-convert(bf16[65536]{0} %arg_param.1, u32[65536]{0} %random_param.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
StochasticConvertDecomposer decomposer;
auto result = decomposer.Run(module.get());
EXPECT_NE(absl::OkStatus(), result.status());
EXPECT_THAT(result.status().message(), HasSubstr("have same bits"));
}
TEST_F(StochasticConvertDecomposerTest, WrongRandomType) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
%arg_param.1 = f32[65536]{0} parameter(0)
%random_param.2 = s32[65536]{0} parameter(1)
ROOT %stochastic-convert.3 = s32[65536]{0} stochastic-convert(f32[65536]{0} %arg_param.1, s32[65536]{0} %random_param.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
StochasticConvertDecomposer decomposer;
auto result = decomposer.Run(module.get());
EXPECT_NE(absl::OkStatus(), result.status());
EXPECT_THAT(result.status().message(),
HasSubstr("must be unsigned integers"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/stochastic_convert_decomposer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/stochastic_convert_decomposer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ebd2d89a-eae6-4d8a-8ae4-35118a5a7120 | cpp | tensorflow/tensorflow | map_inliner | third_party/xla/xla/service/map_inliner.cc | third_party/xla/xla/service/map_inliner_test.cc | #include "xla/service/map_inliner.h"
#include <memory>
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
class MapInlinerVisitor : public DfsHloVisitorWithDefault {
public:
explicit MapInlinerVisitor(HloComputation* computation)
: computation_(computation) {}
absl::Status DefaultAction(HloInstruction* ) override {
return absl::OkStatus();
}
absl::Status HandleMap(HloInstruction* map) override;
absl::StatusOr<bool> Run(HloComputation* computation);
private:
HloComputation* computation_;
bool changed_ = false;
};
absl::StatusOr<bool> MapInlinerVisitor::Run(HloComputation* computation) {
changed_ = false;
computation_ = computation;
TF_RETURN_IF_ERROR(computation->root_instruction()->Accept(this));
return changed_;
}
absl::Status MapInlinerVisitor::HandleMap(HloInstruction* map) {
HloComputation* function = map->to_apply();
HloInstruction& root = *function->root_instruction();
if (hlo_query::AllOperandsAreParameters(root)) {
if (root.opcode() == HloOpcode::kFusion) {
return absl::OkStatus();
}
VLOG(10) << "inlining map({X ... Y}, op) => : op(X ... Y) with function "
<< root.ToShortString();
if (root.opcode() == HloOpcode::kParameter) {
TF_RETURN_IF_ERROR(
map->ReplaceAllUsesWith(map->operands()[root.parameter_number()]));
TF_RETURN_IF_ERROR(computation_->RemoveInstruction(map));
} else if (root.opcode() == HloOpcode::kConstant) {
HloInstruction* constant = computation_->AddInstruction(root.Clone());
HloInstruction* placed_instruction = computation_->AddInstruction(
HloInstruction::CreateBroadcast(map->shape(), constant, {}));
TF_RETURN_IF_ERROR(
computation_->ReplaceInstruction(map, placed_instruction));
} else {
std::vector<HloInstruction*> params;
for (int64_t o = 0; o < root.operands().size(); o++) {
params.push_back(map->operands()[root.operand(o)->parameter_number()]);
}
HloInstruction* placed_instruction = computation_->AddInstruction(
root.CloneWithNewOperands(map->shape(), params));
TF_RETURN_IF_ERROR(
computation_->ReplaceInstruction(map, placed_instruction));
}
changed_ = true;
return absl::OkStatus();
}
return absl::OkStatus();
}
absl::StatusOr<bool> MapInliner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
MapInlinerVisitor visitor(nullptr);
bool changed = false;
for (HloComputation* computation : module->computations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool computation_changed, visitor.Run(computation));
changed |= computation_changed;
}
return changed;
}
} | #include "xla/service/map_inliner.h"
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/xla_data.pb.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
using MapInlinerTest = HloTestBase;
TEST_F(MapInlinerTest, MapMax) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto max_builder = HloComputation::Builder(TestName());
auto param1 = max_builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "x"));
auto param2 = max_builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32, "y"));
max_builder.AddInstruction(HloInstruction::CreateBinary(
param1->shape(), HloOpcode::kMaximum, param1, param2));
auto max_f32 = max_builder.Build();
auto builder = HloComputation::Builder("MapMaxFunction");
auto lhs = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 2, 3, 4})));
auto rhs = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({4, 3, 2, 1})));
builder.AddInstruction(
HloInstruction::CreateMap(lhs->shape(), {lhs, rhs}, max_f32.get()));
auto computation = builder.Build();
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEmbeddedComputation(std::move(max_f32));
hlo_module->AddEntryComputation(std::move(computation));
MapInliner inliner;
EXPECT_TRUE(inliner.Run(hlo_module.get()).value());
EXPECT_THAT(hlo_module->entry_computation()->root_instruction(),
op::Maximum(lhs, rhs));
auto result = ExecuteAndTransfer(hlo_module->Clone(), {});
auto expected = LiteralUtil::CreateR1<float>({4, 3, 3, 4});
EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));
}
TEST_F(MapInlinerTest, MapConstant) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto const2_builder = HloComputation::Builder(TestName());
auto param1 = const2_builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "x"));
(void)param1;
const2_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
auto const2_f32 = const2_builder.Build();
auto builder = HloComputation::Builder("MapConstFunction");
auto lhs = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1, 2, 3, 4}, {5, 6, 7, 8}})));
builder.AddInstruction(
HloInstruction::CreateMap(lhs->shape(), {lhs}, const2_f32.get()));
auto computation = builder.Build();
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEmbeddedComputation(std::move(const2_f32));
hlo_module->AddEntryComputation(std::move(computation));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
MapInliner inliner;
EXPECT_TRUE(inliner.Run(hlo_module.get()).value());
root = hlo_module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Broadcast(op::Constant()));
auto result = ExecuteAndTransfer(hlo_module->Clone(), {});
auto expected = LiteralUtil::CreateR2<float>({{2, 2, 2, 2}, {2, 2, 2, 2}});
EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));
}
TEST_F(MapInlinerTest, MapSubtractOppositeOrder) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto max_builder = HloComputation::Builder(TestName());
auto param1 = max_builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32, "x"));
auto param2 = max_builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "y"));
max_builder.AddInstruction(HloInstruction::CreateBinary(
param1->shape(), HloOpcode::kSubtract, param1, param2));
auto max_f32 = max_builder.Build();
auto builder = HloComputation::Builder("MapSubFunction");
auto lhs = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 2, 3, 4})));
auto rhs = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({4, 3, 2, 1})));
builder.AddInstruction(
HloInstruction::CreateMap(lhs->shape(), {lhs, rhs}, max_f32.get()));
auto computation = builder.Build();
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEmbeddedComputation(std::move(max_f32));
hlo_module->AddEntryComputation(std::move(computation));
MapInliner inliner;
EXPECT_TRUE(inliner.Run(hlo_module.get()).value());
EXPECT_THAT(hlo_module->entry_computation()->root_instruction(),
op::Subtract(rhs, lhs));
auto result = ExecuteAndTransfer(hlo_module->Clone(), {});
auto expected = LiteralUtil::CreateR1<float>({3, 1, -1, -3});
EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));
}
TEST_F(MapInlinerTest, MapParameter) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto param_builder = HloComputation::Builder(TestName());
param_builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32, "p0"));
param_builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32, "p1"));
auto param_f32 = param_builder.Build();
auto builder = HloComputation::Builder("MapParamFunction");
auto lhs = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1)));
auto rhs = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(4)));
builder.AddInstruction(
HloInstruction::CreateMap(lhs->shape(), {lhs, rhs}, param_f32.get()));
auto computation = builder.Build();
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEmbeddedComputation(std::move(param_f32));
hlo_module->AddEntryComputation(std::move(computation));
MapInliner inliner;
EXPECT_TRUE(inliner.Run(hlo_module.get()).value());
EXPECT_THAT(hlo_module->entry_computation()->root_instruction(), rhs);
auto result = ExecuteAndTransfer(hlo_module->Clone(), {});
auto expected = LiteralUtil::CreateR0<float>(4);
EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/map_inliner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/map_inliner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f522495c-3a6f-4a50-bfd7-e7546441fa98 | cpp | tensorflow/tensorflow | triangular_solve_expander | third_party/xla/xla/service/triangular_solve_expander.cc | third_party/xla/xla/service/triangular_solve_expander_test.cc | #include "xla/service/triangular_solve_expander.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <numeric>
#include <string>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/math.h"
#include "xla/hlo/builder/lib/matrix.h"
#include "xla/hlo/builder/lib/slicing.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
XlaOp DiagonalBlocks(XlaOp a, int64_t block_size) {
XlaBuilder* builder = a.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(a));
int ndims = shape.rank();
int64_t n = ShapeUtil::GetDimension(shape, -1);
int64_t num_blocks = n / block_size;
absl::Span<int64_t const> batch_dims = absl::MakeConstSpan(
shape.dimensions().begin(), shape.dimensions().begin() + (ndims - 2));
XlaOp diag_blocks;
if (n == block_size) {
std::vector<int64_t> permutation(ndims);
std::iota(permutation.begin(), permutation.end(), 1);
permutation.insert(permutation.end() - 2, 0);
return Transpose(Broadcast(a, {1}), permutation);
}
if (n > block_size) {
auto start_indices =
Transpose(Broadcast(Mul(Iota(builder, S32, num_blocks),
ConstantR0<int32_t>(builder, block_size)),
{2}),
{1, 0});
std::vector<int64_t> slice_sizes(ndims);
GatherDimensionNumbers dim_numbers;
for (int i = 0; i < ndims - 2; ++i) {
dim_numbers.add_offset_dims(i);
slice_sizes[i] = ShapeUtil::GetDimension(shape, i);
}
slice_sizes[ndims - 2] = slice_sizes[ndims - 1] = block_size;
dim_numbers.add_offset_dims(ndims - 1);
dim_numbers.add_offset_dims(ndims);
dim_numbers.add_start_index_map(ndims - 2);
dim_numbers.add_start_index_map(ndims - 1);
dim_numbers.set_index_vector_dim(1);
diag_blocks = Gather(a, start_indices, dim_numbers, slice_sizes);
}
if (n % block_size != 0) {
auto last_blocks =
SliceInMinorDims(a, {n - n % block_size, n - n % block_size}, {n, n});
PaddingConfig config = MakeNoPaddingConfig(ndims);
int64_t padding = block_size - n % block_size;
config.mutable_dimensions(ndims - 2)->set_edge_padding_high(padding);
last_blocks =
Pad(last_blocks, Zero(builder, shape.element_type()), config);
auto eye =
IdentityMatrix(builder, shape.element_type(), padding, padding);
config = MakeNoPaddingConfig(2);
config.mutable_dimensions(0)->set_edge_padding_low(n % block_size);
eye = Pad(eye, Zero(builder, shape.element_type()), config);
eye = Broadcast(eye, batch_dims);
last_blocks = ConcatInDim(builder, {last_blocks, eye}, ndims - 1);
TF_ASSIGN_OR_RETURN(Shape blocks_shape, builder->GetShape(last_blocks));
auto shape_dims = blocks_shape.dimensions();
auto last_blocks_dims = std::vector<int64_t>(ndims);
std::copy(shape_dims.begin(), shape_dims.end(), last_blocks_dims.begin());
last_blocks_dims.insert(last_blocks_dims.end() - 2, 1);
last_blocks = Reshape(last_blocks, last_blocks_dims);
if (n > block_size) {
diag_blocks =
ConcatInDim(builder, {diag_blocks, last_blocks}, ndims - 2);
} else {
diag_blocks = last_blocks;
}
}
return diag_blocks;
});
}
XlaOp SolveWithInvertedDiagonalBlocks(XlaOp a, XlaOp b, XlaOp inv_diag_blocks,
bool left_side, bool lower,
bool transpose_a, bool conjugate_a,
PrecisionConfig::Precision precision) {
XlaBuilder* builder = a.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape blocks_shape, builder->GetShape(inv_diag_blocks));
TF_ASSIGN_OR_RETURN(Shape b_shape, builder->GetShape(b));
int64_t block_size = ShapeUtil::GetDimension(blocks_shape, -1);
TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a));
int64_t ndims = a_shape.rank();
int64_t n = ShapeUtil::GetDimension(a_shape, -1);
int64_t num_blocks = n / block_size + (n % block_size != 0);
int64_t m_dim = (left_side) ? -1 : -2;
int64_t m = ShapeUtil::GetDimension(b_shape, m_dim);
std::vector<XlaOp> update_ops;
int bdims = b_shape.rank();
int64_t block_dim = (left_side) ? bdims - 2 : bdims - 1;
XlaOp x;
for (int i = 0; i < num_blocks; i++) {
bool backward = left_side ^ lower ^ transpose_a;
auto j = backward ? num_blocks - 1 - i : i;
int64_t block = (n % block_size != 0 && j + 1 == num_blocks)
? n % block_size
: block_size;
auto inv_block =
MaybeConjugate(Collapse(SliceInMinorDims(inv_diag_blocks, {j, 0, 0},
{j + 1, block, block}),
{ndims - 2, ndims - 1}),
conjugate_a);
int64_t k = std::min((j + 1) * block_size, n);
std::vector<int64_t> start = {j * block_size, 0};
std::vector<int64_t> end = {k, m};
if (!left_side) {
std::swap(start[0], start[1]);
std::swap(end[0], end[1]);
}
auto b_row = SliceInMinorDims(b, start, end);
XlaOp remainder;
if (i == 0) {
remainder = b_row;
} else {
if (backward) {
start = {j * block_size,
std::max(int64_t{0}, (num_blocks - i) * block_size)};
end = {k, n};
} else {
start = {j * block_size, 0};
end = {k, std::min(i * block_size, n)};
}
if (!left_side ^ transpose_a) {
std::swap(start[0], start[1]);
std::swap(end[0], end[1]);
}
auto a_row =
MaybeConjugate(SliceInMinorDims(a, start, end), conjugate_a);
if (left_side) {
remainder = b_row - BatchDot(a_row, transpose_a, x, false, precision);
} else {
remainder = b_row - BatchDot(x, false, a_row, transpose_a, precision);
}
}
XlaOp x_update;
if (left_side) {
x_update =
BatchDot(inv_block, transpose_a, remainder, false, precision);
} else {
x_update =
BatchDot(remainder, false, inv_block, transpose_a, precision);
}
if (i == 0) {
x = x_update;
} else {
if (backward) {
x = ConcatInDim(builder, {x_update, x}, block_dim);
} else {
x = ConcatInDim(builder, {x, x_update}, block_dim);
}
}
}
return x;
});
}
}
XlaOp TriangularSolveExpander::InvertDiagonalBlocks(
XlaOp diag_blocks, bool lower_triangular,
PrecisionConfig::Precision precision) {
XlaBuilder* builder = diag_blocks.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(diag_blocks));
int64_t block_size = ShapeUtil::GetDimension(shape, -1);
int64_t num_blocks = ShapeUtil::ElementsIn(shape) / IPow(block_size, 2);
diag_blocks = Reshape(diag_blocks, {num_blocks, block_size, block_size});
diag_blocks = Triangle(diag_blocks, lower_triangular);
auto diags = GetMatrixDiagonal(diag_blocks);
auto scaled_diag_blocks = Div(diag_blocks, diags, {0, 2});
auto identity =
IdentityMatrix(builder, shape.element_type(), block_size, block_size);
auto neg_identity = -identity;
auto pos_one = Reshape(One(builder, shape.element_type()), {1, 1});
auto start_index =
ConstantR0<int>(builder, lower_triangular ? 0 : block_size - 1);
auto output_block =
DynamicUpdateSlice(neg_identity, pos_one,
{start_index, start_index});
XlaOp output = Broadcast(output_block,
{num_blocks});
std::vector<Shape> tuple_shapes = {
ShapeUtil::MakeShape(S32, {}),
ShapeUtil::MakeShape(shape.element_type(),
{num_blocks, block_size, block_size}),
ShapeUtil::MakeShape(shape.element_type(),
{num_blocks, block_size, block_size})};
Shape tuple_shape = ShapeUtil::MakeTupleShape(tuple_shapes);
auto init_i = One(builder, S32);
auto init = Tuple(builder, {init_i, output, scaled_diag_blocks});
std::unique_ptr<XlaBuilder> condb =
builder->CreateSubBuilder("InvertDiagCond");
{
auto i = GetTupleElement(
Parameter(condb.get(), 0, tuple_shape, "InvertDiagCondTuple"), 0);
Lt(i, ConstantR0<int32_t>(condb.get(), block_size));
}
TF_ASSIGN_OR_RETURN(auto cond, condb->Build());
std::unique_ptr<XlaBuilder> bodyb =
builder->CreateSubBuilder("InvertDiagBody");
{
auto input_tuple =
Parameter(bodyb.get(), 0, tuple_shape, "InvertDiagBodyTuple");
auto i = GetTupleElement(input_tuple, 0);
auto body_out = GetTupleElement(input_tuple, 1);
auto body_input = GetTupleElement(input_tuple, 2);
auto zero = ConstantR0<int32_t>(bodyb.get(), 0);
auto j = lower_triangular ? i : ScalarLike(i, block_size - 1) - i;
auto input_row =
DynamicSlice(body_input, {zero, j, zero},
{num_blocks, 1, block_size});
DotDimensionNumbers dnums;
dnums.add_lhs_batch_dimensions(0);
dnums.add_rhs_batch_dimensions(0);
dnums.add_lhs_contracting_dimensions(2);
dnums.add_rhs_contracting_dimensions(1);
PrecisionConfig precision_proto;
precision_proto.add_operand_precision(precision);
precision_proto.add_operand_precision(precision);
auto update = -DotGeneral(input_row, body_out, dnums, &precision_proto);
body_out = DynamicUpdateSlice(body_out, update, {zero, j, zero});
auto next_i = i + ScalarLike(i, 1);
Tuple(bodyb.get(), {next_i, body_out, body_input});
}
TF_ASSIGN_OR_RETURN(auto body, bodyb->Build());
auto invert_while = While(cond, body, init);
auto inv_diag_blocks = GetTupleElement(invert_while, 1);
inv_diag_blocks = Div(inv_diag_blocks, diags,
{0, 1});
return Reshape(inv_diag_blocks, shape.dimensions());
});
}
XlaOp TriangularSolveExpander::SolveByInvertingDiagonalBlocks(
XlaOp a, XlaOp b, bool left_side, bool lower, bool transpose_a,
bool conjugate_a, bool unit_diagonal,
PrecisionConfig::Precision precision) {
XlaBuilder* builder = a.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a));
const int64_t ndims = a_shape.rank();
int64_t k = ShapeUtil::GetDimension(a_shape, -1);
if (unit_diagonal) {
a = lower ? Select(TriangleMask(a, -1), a, ZerosLike(a))
: Select(TriangleMask(a, 0), ZerosLike(a), a);
a = xla::Add(a, IdentityMatrix(builder, a_shape.element_type(), k, k),
{ndims - 2, ndims - 1});
} else {
a = Triangle(a, lower);
}
int64_t block_size = std::min(block_size_, k);
auto diag_blocks = DiagonalBlocks(a, block_size);
auto inv_diag_blocks = InvertDiagonalBlocks(diag_blocks, lower, precision);
return SolveWithInvertedDiagonalBlocks(a, b, inv_diag_blocks, left_side,
lower, transpose_a, conjugate_a,
precision);
});
}
XlaOp TriangularSolveExpander::SolveDirectly(
XlaOp a, XlaOp b, bool left_side, bool lower, bool transpose_a,
bool conjugate_a, bool unit_diagonal,
PrecisionConfig::Precision precision) {
XlaBuilder* builder = a.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a));
TF_ASSIGN_OR_RETURN(Shape b_shape, builder->GetShape(b));
int64_t m = ShapeUtil::GetDimension(b_shape, -2);
int64_t n = ShapeUtil::GetDimension(b_shape, -1);
const int64_t a_size = ShapeUtil::GetDimension(a_shape, -1);
a = MaybeConjugate(a, conjugate_a);
bool backwards = transpose_a ^ lower ^ !left_side;
for (int64_t i = 0; i < a_size; ++i) {
int64_t j = backwards ? i : (a_size - i - 1);
std::vector<int64_t> b_row_start, b_row_end;
if (left_side) {
b_row_start = {j, 0};
b_row_end = {j + 1, n};
} else {
b_row_start = {0, j};
b_row_end = {m, j + 1};
}
auto b_row = SliceInMinorDims(b, b_row_start, b_row_end);
std::vector<int64_t> a_start = {j, backwards ? 0 : (j + 1)};
std::vector<int64_t> a_end = {j + 1, backwards ? j : a_size};
if (transpose_a ^ !left_side) {
std::swap(a_start[0], a_start[1]);
std::swap(a_end[0], a_end[1]);
}
auto a_chunk = SliceInMinorDims(a, a_start, a_end);
if (left_side) {
bool which = transpose_a ^ lower;
auto b_chunk =
SliceInMinorDims(b, {which ? 0 : (j + 1), 0}, {which ? j : m, n});
b_row = b_row - BatchDot(a_chunk, transpose_a, b_chunk,
false, precision);
} else {
bool which = transpose_a ^ !lower;
auto b_chunk =
SliceInMinorDims(b, {0, which ? 0 : (j + 1)}, {m, which ? j : n});
b_row = b_row - BatchDot(b_chunk, false, a_chunk,
transpose_a, precision);
}
if (!unit_diagonal) {
auto a_diag = SliceInMinorDims(a, {j, j}, {j + 1, j + 1});
b_row = b_row / a_diag;
}
b = UpdateSliceInMinorDims(b, b_row, b_row_start);
}
return b;
});
}
XlaOp TriangularSolveExpander::BuildTriangularSolve(
XlaOp a, XlaOp b, bool left_side, bool lower, bool transpose_a,
bool conjugate_a, bool unit_diagonal, int64_t block_size,
PrecisionConfig::Precision precision) {
XlaBuilder* builder = a.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a));
TF_ASSIGN_OR_RETURN(Shape b_shape, builder->GetShape(b));
if (a_shape.rank() != b_shape.rank()) {
return InvalidArgument(
"Arguments to TriangularSolve have shapes with different ranks: "
"%s vs. %s",
ShapeUtil::HumanString(a_shape), ShapeUtil::HumanString(b_shape));
}
const int64_t ndims = a_shape.rank();
if (ndims < 2) {
return InvalidArgument(
"Arguments to TriangularSolve was rank %d but must have rank >= 2.",
ndims);
}
std::vector<int64_t> batch_dimensions;
int64_t batch = 1;
for (int i = 0; i < ndims - 2; ++i) {
int64_t a_size = a_shape.dimensions(i);
int64_t b_size = b_shape.dimensions(i);
if (a_size != b_size) {
return InvalidArgument(
"Batch dimensions of arguments to TriangularSolve must be equal; "
"shapes were %s and %s.",
ShapeUtil::HumanString(a_shape), ShapeUtil::HumanString(b_shape));
}
batch_dimensions.push_back(a_size);
batch *= a_size;
}
if (ShapeUtil::GetDimension(a_shape, -1) !=
ShapeUtil::GetDimension(a_shape, -2)) {
return InvalidArgument(
"The 'a' argument to TriangularSolve must be a batched square matrix;"
" shape was: %s",
ShapeUtil::HumanString(a_shape));
}
const int64_t m = ShapeUtil::GetDimension(b_shape, -2);
const int64_t n = ShapeUtil::GetDimension(b_shape, -1);
if ((left_side ? m : n) != ShapeUtil::GetDimension(a_shape, -1)) {
return InvalidArgument(
"Arguments to TriangularSolve have incompatible matrix shapes %s and "
"%s",
ShapeUtil::HumanString(a_shape), ShapeUtil::HumanString(b_shape));
}
int64_t a_size = ShapeUtil::GetDimension(a_shape, -1);
if (ShapeUtil::IsZeroElementArray(b_shape)) {
return b;
}
if (a_size == 1) {
return unit_diagonal ? b : Div(b, MaybeConjugate(a, conjugate_a));
}
if (UseDirectSolves() && batch > block_size_ / 16 &&
a_size < block_size_ / 4) {
return SolveDirectly(a, b, left_side, lower, transpose_a, conjugate_a,
unit_diagonal, precision);
} else {
return SolveByInvertingDiagonalBlocks(a, b, left_side, lower, transpose_a,
conjugate_a, unit_diagonal,
precision);
}
});
}
TriangularSolveExpander::TriangularSolveExpander(int64_t block_size)
: block_size_(block_size) {
CHECK_GE(block_size_, 1);
}
bool TriangularSolveExpander::InstructionMatchesPattern(
HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kTriangularSolve;
}
absl::StatusOr<HloInstruction*> TriangularSolveExpander::ExpandInstruction(
HloInstruction* instruction) {
const TriangularSolveOptions& options =
instruction->triangular_solve_options();
const std::string name = absl::StrFormat(
"xla.triangular_solve_%s_%s_%s_%s_%s_%s",
instruction->operand(0)->shape().ToString(),
instruction->operand(1)->shape().ToString(),
options.left_side() ? "left" : "right",
options.lower() ? "lower" : "upper",
TriangularSolveOptions_Transpose_Name(options.transpose_a()),
options.unit_diagonal() ? "unit" : "nonunit");
HloModule* module = instruction->GetModule();
HloComputation*& computation =
computation_cache_.emplace(name, nullptr).first->second;
if (!computation) {
XlaBuilder builder(name);
XlaOp a = Parameter(&builder, 0, instruction->operand(0)->shape(), "a");
XlaOp b = Parameter(&builder, 1, instruction->operand(1)->shape(), "b");
bool transpose_a =
options.transpose_a() != TriangularSolveOptions::NO_TRANSPOSE;
bool conjugate_a = options.transpose_a() == TriangularSolveOptions::ADJOINT;
BuildTriangularSolve(a, b, options.left_side(), options.lower(),
transpose_a, conjugate_a, options.unit_diagonal(),
block_size_,
PrecisionConfig::HIGHEST);
TF_ASSIGN_OR_RETURN(XlaComputation xla_computation, builder.Build());
TF_ASSIGN_OR_RETURN(ProgramShape program_shape,
xla_computation.GetProgramShape());
HloModuleConfig config(program_shape);
TF_ASSIGN_OR_RETURN(auto new_module, HloModule::CreateFromProto(
xla_computation.proto(), config));
HloCloneContext context(module);
computation =
module->DeepCloneComputation(new_module->entry_computation(), &context);
}
return instruction->parent()->AddInstruction(HloInstruction::CreateCall(
instruction->shape(), instruction->operands(), computation));
}
} | #include "xla/service/triangular_solve_expander.h"
#include <memory>
#include <utility>
#include "xla/literal.h"
#include "xla/reference_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
namespace xla {
namespace {
class TriangularExpanderTest : public HloTestBase,
public ::testing::WithParamInterface<int32_t> {};
TEST_P(TriangularExpanderTest, TestBlockSize) {
auto block_size = GetParam();
std::string hlo_string = R"(
HloModule TensorFlowTriangularSolve
ENTRY main {
a = f32[256,256]{1,0} parameter(0)
b = f32[256,192]{1,0} parameter(1)
ROOT triangular-solve = f32[256,192]{1,0} triangular-solve(a, b),
left_side=true, unit_diagonal=true,
lower=true, transpose_a=NO_TRANSPOSE
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
{
TriangularSolveExpander triangular_solve_expander(block_size);
TF_ASSERT_OK_AND_ASSIGN(
bool result, RunHloPass(&triangular_solve_expander, module.get()));
EXPECT_TRUE(result);
}
Array2D<float> a(256, 256);
for (int64_t row = 0; row < a.dim(0); ++row) {
a(row, row) = 1;
if (row > 0) {
a(row, row - 1) = 0.01;
}
}
Array2D<float> b(256, 192);
const float kMax = static_cast<float>(b.dim(0) * b.dim(1) + 1);
for (int64_t row = 0; row < b.dim(0); ++row) {
for (int64_t col = 0; col < b.dim(1); ++col) {
b(row, col) = static_cast<float>(row + col + 1) / kMax;
}
}
auto la = LiteralUtil::CreateR2FromArray2D(a);
auto lb = LiteralUtil::CreateR2FromArray2D(b);
TF_ASSERT_OK_AND_ASSIGN(Literal lx, Execute(std::move(module), {&la, &lb}));
auto x_shape = lx.shape();
EXPECT_EQ(x_shape.dimensions_size(), 2);
EXPECT_EQ(x_shape.dimensions(0), b.dim(0));
EXPECT_EQ(x_shape.dimensions(1), b.dim(1));
Array2D<float> x(x_shape.dimensions(0), x_shape.dimensions(1));
x.SetValues(lx.data<float>());
auto ref_b = ReferenceUtil::MatmulArray2D(a, x);
auto ref_lb = LiteralUtil::CreateR2FromArray2D(*ref_b);
EXPECT_TRUE(
LiteralTestUtil::NearOrEqual(ref_lb, lb, ErrorSpec{0.001, 0.001}));
}
INSTANTIATE_TEST_CASE_P(TriangularExpanderTestInstances, TriangularExpanderTest,
::testing::Range(2, 256, 7));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/triangular_solve_expander.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/triangular_solve_expander_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
849cb0cf-5da9-4a0b-9798-8ff341a34518 | cpp | tensorflow/tensorflow | transpose_folding | third_party/xla/xla/service/transpose_folding.cc | third_party/xla/xla/service/transpose_folding_test.cc | #include "xla/service/transpose_folding.h"
#include <algorithm>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
TransposeFolding::OperandIndices CanFoldOperandsIntoConvolution(
const HloInstruction& convolution,
const TransposeFolding::TransposableConvOperandsFn&
transposable_conv_operands) {
if (HloOpcode::kConvolution != convolution.opcode()) {
return {};
}
TransposeFolding::OperandIndices operand_set;
for (int64_t i = 0; i < convolution.operand_count(); ++i) {
auto& operand = *convolution.operand(i);
if (operand.opcode() == HloOpcode::kTranspose) {
operand_set.push_back(i);
}
}
return transposable_conv_operands(convolution, operand_set);
}
bool IsNonIdentityTranspose(const HloInstruction* instruction) {
if (instruction->opcode() == HloOpcode::kTranspose) {
for (int dim = 0; dim < instruction->dimensions().size(); ++dim) {
if (dim != instruction->dimensions(dim)) {
return true;
}
}
}
return false;
}
void TransposeDims(tsl::protobuf::RepeatedField<int64_t>& dims,
absl::Span<const int64_t> transpose_dims) {
for (auto& dim : dims) {
dim = transpose_dims[dim];
}
}
using InstructionOperandsPair =
std::pair<HloInstruction*, TransposeFolding::OperandIndices>;
absl::Status FoldTransposeIntoDot(InstructionOperandsPair& pair) {
HloInstruction* dot = pair.first;
DotDimensionNumbers new_dot_dims = dot->dot_dimension_numbers();
HloInstruction* lhs = dot->mutable_operand(0);
HloInstruction* rhs = dot->mutable_operand(1);
for (int64_t operand_index : pair.second) {
if (operand_index == 0) {
TransposeDims(*new_dot_dims.mutable_lhs_contracting_dimensions(),
lhs->dimensions());
TransposeDims(*new_dot_dims.mutable_lhs_batch_dimensions(),
lhs->dimensions());
lhs = lhs->mutable_operand(0);
} else {
CHECK_EQ(operand_index, 1);
TransposeDims(*new_dot_dims.mutable_rhs_contracting_dimensions(),
rhs->dimensions());
TransposeDims(*new_dot_dims.mutable_rhs_batch_dimensions(),
rhs->dimensions());
rhs = rhs->mutable_operand(0);
}
}
return dot->parent()->ReplaceWithNewInstruction(
dot, HloInstruction::CreateDot(dot->shape(), lhs, rhs, new_dot_dims,
dot->precision_config()));
}
bool FoldTransposeIntoConvolution(InstructionOperandsPair& pair) {
auto& convolution = *pair.first;
auto& operand_indices = pair.second;
if (operand_indices.empty()) {
return false;
}
const ConvolutionDimensionNumbers& dnums =
convolution.convolution_dimension_numbers();
ConvolutionDimensionNumbers new_dnums = dnums;
HloInstruction* new_lhs;
const int64_t kLhsIdx = 0;
if (absl::c_linear_search(operand_indices, kLhsIdx)) {
HloInstruction& transpose = *convolution.mutable_operand(kLhsIdx);
const auto& transpose_dimensions = transpose.dimensions();
HloInstruction& transpose_operand = *transpose.mutable_operand(0);
new_dnums.set_input_batch_dimension(
transpose_dimensions[dnums.input_batch_dimension()]);
new_dnums.set_input_feature_dimension(
transpose_dimensions[dnums.input_feature_dimension()]);
for (auto& input_spatial_dimension :
*new_dnums.mutable_input_spatial_dimensions()) {
input_spatial_dimension = transpose_dimensions[input_spatial_dimension];
}
new_lhs = &transpose_operand;
} else {
new_lhs = convolution.mutable_operand(kLhsIdx);
}
HloInstruction* new_rhs;
const int64_t kRhsIdx = 1;
if (absl::c_linear_search(operand_indices, kRhsIdx)) {
HloInstruction& transpose = *convolution.mutable_operand(kRhsIdx);
const auto& transpose_dimensions = transpose.dimensions();
HloInstruction& transpose_operand = *transpose.mutable_operand(0);
new_dnums.set_kernel_input_feature_dimension(
transpose_dimensions[dnums.kernel_input_feature_dimension()]);
new_dnums.set_kernel_output_feature_dimension(
transpose_dimensions[dnums.kernel_output_feature_dimension()]);
for (auto& kernel_spatial_dimension :
*new_dnums.mutable_kernel_spatial_dimensions()) {
kernel_spatial_dimension = transpose_dimensions[kernel_spatial_dimension];
}
new_rhs = &transpose_operand;
} else {
new_rhs = convolution.mutable_operand(kRhsIdx);
}
auto new_conv = HloInstruction::CreateConvolve(
convolution.shape(), new_lhs, new_rhs, convolution.feature_group_count(),
convolution.batch_group_count(), convolution.window(), new_dnums,
convolution.precision_config());
TF_CHECK_OK(convolution.parent()->ReplaceWithNewInstruction(
&convolution, std::move(new_conv)));
return true;
}
}
TransposeFolding::TransposeFolding(
CanFoldTransposeOperand dot_can_fold_transpose_operand,
TransposableConvOperandsFn transposable_conv_operands)
: dot_can_fold_transpose_operand_(
std::move(dot_can_fold_transpose_operand)),
transposable_conv_operands_(std::move(transposable_conv_operands)) {}
absl::StatusOr<bool> TransposeFolding::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::vector<InstructionOperandsPair> foldable_dots;
std::vector<InstructionOperandsPair> foldable_convolutions;
FunctionVisitor visit_fn([this, &foldable_dots, &foldable_convolutions](
HloInstruction* instruction) {
if (instruction->opcode() == HloOpcode::kDot) {
if ((instruction->operand(0)->shape().rank() < 2) ||
(instruction->operand(1)->shape().rank() < 2)) {
return absl::OkStatus();
}
OperandIndices operand_indices;
for (int64_t i = 0; i < 2; ++i) {
if (!IsNonIdentityTranspose(instruction->operand(i))) {
continue;
}
TF_ASSIGN_OR_RETURN(bool can_fold_operand,
dot_can_fold_transpose_operand_(*instruction, i));
if (can_fold_operand) {
operand_indices.push_back(i);
}
}
if (!operand_indices.empty()) {
foldable_dots.emplace_back(instruction, operand_indices);
}
}
{
OperandIndices operand_indices = CanFoldOperandsIntoConvolution(
*instruction, transposable_conv_operands_);
if (!operand_indices.empty()) {
foldable_convolutions.emplace_back(instruction, operand_indices);
}
}
return absl::OkStatus();
});
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
TF_RETURN_IF_ERROR(comp->Accept(&visit_fn));
}
bool changed = false;
for (InstructionOperandsPair& pair : foldable_dots) {
TF_RETURN_IF_ERROR(FoldTransposeIntoDot(pair));
changed = true;
}
for (InstructionOperandsPair& pair : foldable_convolutions) {
changed |= FoldTransposeIntoConvolution(pair);
}
return changed;
}
absl::StatusOr<bool>
TransposeFolding::IsRowColumnTransposeDotOperand(const HloInstruction& dot,
int64_t operand_idx) {
TF_RET_CHECK(dot.opcode() == HloOpcode::kDot);
TF_RET_CHECK(dot.operand_count() > operand_idx);
const HloInstruction& transpose = *dot.operand(operand_idx);
TF_RET_CHECK(transpose.opcode() == HloOpcode::kTranspose);
const DotDimensionNumbers& dot_dims = dot.dot_dimension_numbers();
auto batch_dims = (operand_idx == 0) ? dot_dims.lhs_batch_dimensions()
: dot_dims.rhs_batch_dimensions();
auto contracting_dims = (operand_idx == 0)
? dot_dims.lhs_contracting_dimensions()
: dot_dims.rhs_contracting_dimensions();
return (batch_dims.size() == transpose.shape().rank() - 2) &&
(contracting_dims.size() == 1) &&
absl::c_all_of(batch_dims, [&](int64_t dim) {
return transpose.dimensions(dim) == dim;
});
}
} | #include "xla/service/transpose_folding.h"
#include <memory>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/shape_inference.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status_matchers.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using ::tsl::testing::IsOkAndHolds;
using TransposeFoldingTest = HloTestBase;
TEST_F(TransposeFoldingTest, FoldDotTranspose) {
constexpr absl::string_view kHloString = R"(
HloModule FoldDotTranspose
ENTRY entry_computation {
x = f32[2,3]{1,0} parameter(0)
y = f32[2,3]{1,0} parameter(1)
transpose = f32[3,2]{1,0} transpose(y), dimensions={1,0}
ROOT dot = f32[2,2]{1,0} dot(x, transpose), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Dot(op::Parameter(0), op::Parameter(1),
1, 1));
}
TEST_F(TransposeFoldingTest, DontFoldTransposeOfBatchDimByDefault) {
constexpr absl::string_view kHloString = R"(
HloModule FoldDotTranspose
ENTRY entry_computation {
x = f32[2,3] parameter(0)
y = f32[3,2] parameter(1)
transpose = f32[2,3] transpose(y), dimensions={1,0}
ROOT dot = f32[2] dot(x, transpose), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false));
}
TEST_F(TransposeFoldingTest, FoldTransposeOfBatchWhenPermitted) {
constexpr absl::string_view kHloString = R"(
HloModule FoldDotTranspose
ENTRY entry_computation {
x = f32[5,2,3] parameter(0)
y = f32[3,5,4] parameter(1)
transpose = f32[5,3,4] transpose(y), dimensions={1,0,2}
ROOT dot = f32[5,2,4] dot(x, transpose), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TransposeFolding transpose_folding(
[](const HloInstruction&, int64_t) {
return true;
});
EXPECT_THAT(transpose_folding.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Dot(op::Parameter(0), op::Parameter(1),
2, 0));
}
TEST_F(TransposeFoldingTest, DontFoldTransposeOfRank1Dot) {
constexpr absl::string_view kHloString = R"(
HloModule FoldDotTranspose
ENTRY entry_computation {
x = f32[3] parameter(0)
y = f32[3,2] parameter(1)
transpose = f32[2,3] transpose(y), dimensions={1,0}
ROOT dot = f32[2] dot(x, transpose), lhs_batch_dims={}, rhs_batch_dims={}, lhs_contracting_dims={0}, rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false));
}
TEST_F(TransposeFoldingTest, DontFoldTransposeOfDotWithoutContractingDims) {
constexpr absl::string_view kHloString = R"(
HloModule FoldDotTranspose
ENTRY entry_computation {
x = f32[3,4] parameter(0)
y = f32[3,4,6,7] parameter(1)
transpose = f32[3,4,7,6] transpose(y), dimensions={0,1,3,2}
ROOT dot = f32[3,4,7,6] dot(x, transpose), lhs_batch_dims={0,1}, rhs_batch_dims={0,1}, lhs_contracting_dims={}, rhs_contracting_dims={}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false));
}
TEST_F(TransposeFoldingTest, FoldDotTransposeConstant) {
constexpr absl::string_view kHloString = R"(
HloModule FoldDotTransposeConstant
ENTRY entry_computation {
constant = f32[2,1]{1,0} constant({ { 1 }, { 2 } })
transpose = f32[1,2]{1,0} transpose(constant), dimensions={1,0}
constant.1 = f32[3,2]{1,0} constant({ { 1, 2 }, { 3, 4 }, { 5, 6 } })
transpose.1 = f32[2,3]{1,0} transpose(constant.1), dimensions={1,0}
ROOT dot = f32[1,3]{1,0} dot(transpose, transpose.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Dot(op::Constant(), op::Constant(),
0, 1));
}
TEST_F(TransposeFoldingTest, FuseDotWithConstantOperands) {
auto builder = HloComputation::Builder("entry");
HloInstruction* const1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
HloInstruction* const2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
HloInstruction* const3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0)));
HloInstruction* add = builder.AddInstruction(HloInstruction::CreateBinary(
const1->shape(), HloOpcode::kAdd, const1, const2));
HloInstruction* sub = builder.AddInstruction(HloInstruction::CreateBinary(
const2->shape(), HloOpcode::kSubtract, const2, const3));
HloInstruction* mul = builder.AddInstruction(HloInstruction::CreateBinary(
add->shape(), HloOpcode::kMultiply, add, sub));
auto module = CreateNewVerifiedModule("fuse_with_constant_operands");
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build(mul));
HloInstruction* call = module->OutlineExpressionFromComputation(
{add, sub, mul}, "entry", entry_computation);
EXPECT_EQ(call, entry_computation->root_instruction());
HloComputation* callee_computation = call->to_apply();
EXPECT_THAT(call->operands(),
::testing::UnorderedElementsAre(const1, const2, const3));
EXPECT_EQ(6, callee_computation->instruction_count());
}
TEST_F(TransposeFoldingTest, FoldDotTransposeInCall) {
constexpr absl::string_view kHloString = R"(
HloModule FoldDotTransposeInCall
callee {
name.0 = f32[2,3]{1,0} parameter(0)
name.1 = f32[2,3]{1,0} parameter(1)
transpose.clone = f32[3,2]{1,0} transpose(name.0), dimensions={1,0}
ROOT dot.clone = f32[2,2]{1,0} dot(name.1, transpose.clone), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY entry_computation {
y = f32[2,3]{1,0} parameter(1)
x = f32[2,3]{1,0} parameter(0)
ROOT call = f32[2,2]{1,0} call(y, x), to_apply=callee
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
const HloComputation* callee = module->GetComputationWithName("callee");
ASSERT_NE(callee, nullptr);
EXPECT_THAT(callee->root_instruction(),
op::Dot(op::Parameter(1), op::Parameter(0),
1, 1));
}
TEST_F(TransposeFoldingTest, FoldConvDimSwapTransposeRhs) {
auto builder = HloComputation::Builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {2, 3, 1, 1}),
"x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {3, 2, 1, 1}),
"y"));
HloInstruction* transpose_y =
builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), y, {1, 0, 2, 3}));
auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers();
Window window;
for (int i = 0; i < 2; ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_base_dilation(1);
dim->set_window_dilation(1);
dim->set_stride(1);
dim->set_size(
transpose_y->shape().dimensions(dnums.kernel_spatial_dimensions(i)));
}
absl::StatusOr<Shape> conv_shape = ShapeInference::InferConvolveShape(
x->shape(), transpose_y->shape(), 1,
1, window, dnums,
std::nullopt);
EXPECT_IS_OK(conv_shape);
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
conv_shape.value(), x, transpose_y,
1, 1, window, dnums,
DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule("test_module");
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build(conv));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
absl::flat_hash_set<HloInstruction*> instruction_set(
entry_computation->instructions().begin(),
entry_computation->instructions().end());
CHECK_EQ(1, instruction_set.erase(x)) << "x is not in entry_computation.";
CHECK_EQ(1, instruction_set.erase(y)) << "y is not in entry_computation.";
CHECK_EQ(1, instruction_set.size())
<< "entry_computation should contain exactly 3 instructions.";
HloInstruction* new_conv = *instruction_set.begin();
EXPECT_EQ(HloOpcode::kConvolution, new_conv->opcode());
EXPECT_EQ(dnums.kernel_input_feature_dimension(),
new_conv->convolution_dimension_numbers()
.kernel_output_feature_dimension());
EXPECT_EQ(dnums.kernel_output_feature_dimension(),
new_conv->convolution_dimension_numbers()
.kernel_input_feature_dimension());
}
TEST_F(TransposeFoldingTest, FoldConvComplexTransposeRhs) {
auto builder = HloComputation::Builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {2, 3, 1, 1}),
"x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {1, 2, 1, 3}),
"y"));
HloInstruction* transpose_y =
builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), y, {1, 3, 0, 2}));
auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers();
Window window;
for (int i = 0; i < 2; ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_base_dilation(1);
dim->set_window_dilation(1);
dim->set_stride(1);
dim->set_size(
transpose_y->shape().dimensions(dnums.kernel_spatial_dimensions(i)));
}
absl::StatusOr<Shape> conv_shape = ShapeInference::InferConvolveShape(
x->shape(), transpose_y->shape(), 1,
1, window, dnums,
std::nullopt);
EXPECT_IS_OK(conv_shape);
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
conv_shape.value(), x, transpose_y,
1, 1, window, dnums,
DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule("test_module");
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build(conv));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
absl::flat_hash_set<HloInstruction*> instruction_set(
entry_computation->instructions().begin(),
entry_computation->instructions().end());
CHECK_EQ(1, instruction_set.erase(x)) << "x is not in entry_computation.";
CHECK_EQ(1, instruction_set.erase(y)) << "y is not in entry_computation.";
CHECK_EQ(1, instruction_set.size())
<< "entry_computation should contain exactly 3 instructions.";
HloInstruction* new_conv = *instruction_set.begin();
EXPECT_EQ(HloOpcode::kConvolution, new_conv->opcode());
EXPECT_EQ(dnums.kernel_input_feature_dimension(),
new_conv->convolution_dimension_numbers()
.kernel_output_feature_dimension());
EXPECT_EQ(dnums.kernel_spatial_dimensions(1),
new_conv->convolution_dimension_numbers()
.kernel_input_feature_dimension());
EXPECT_EQ(
dnums.kernel_output_feature_dimension(),
new_conv->convolution_dimension_numbers().kernel_spatial_dimensions(0));
EXPECT_EQ(
dnums.kernel_spatial_dimensions(0),
new_conv->convolution_dimension_numbers().kernel_spatial_dimensions(1));
}
TEST_F(TransposeFoldingTest, FoldConvTransposeLhs) {
auto builder = HloComputation::Builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {3, 2, 1, 1}),
"x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {2, 3, 1, 1}),
"y"));
HloInstruction* transpose_x =
builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), x, {1, 0, 2, 3}));
auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers();
Window window;
for (int i = 0; i < 2; ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_base_dilation(1);
dim->set_window_dilation(1);
dim->set_stride(1);
dim->set_size(y->shape().dimensions(dnums.kernel_spatial_dimensions(i)));
}
absl::StatusOr<Shape> conv_shape = ShapeInference::InferConvolveShape(
transpose_x->shape(), y->shape(), 1,
1, window, dnums,
std::nullopt);
EXPECT_IS_OK(conv_shape);
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
conv_shape.value(), transpose_x, y,
1, 1, window, dnums,
DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule("test_module");
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build(conv));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
absl::flat_hash_set<HloInstruction*> instruction_set(
entry_computation->instructions().begin(),
entry_computation->instructions().end());
EXPECT_EQ(1, instruction_set.erase(x)) << "x is not in entry_computation.";
EXPECT_EQ(1, instruction_set.erase(y)) << "y is not in entry_computation.";
EXPECT_EQ(1, instruction_set.size())
<< "entry_computation should contain exactly 3 instructions.";
HloInstruction* new_conv = *instruction_set.begin();
EXPECT_EQ(HloOpcode::kConvolution, new_conv->opcode());
EXPECT_EQ(dnums.input_feature_dimension(),
new_conv->convolution_dimension_numbers().input_batch_dimension());
EXPECT_EQ(
dnums.input_batch_dimension(),
new_conv->convolution_dimension_numbers().input_feature_dimension());
EXPECT_EQ(
dnums.input_spatial_dimensions(0),
new_conv->convolution_dimension_numbers().input_spatial_dimensions(0));
EXPECT_EQ(
dnums.input_spatial_dimensions(1),
new_conv->convolution_dimension_numbers().input_spatial_dimensions(1));
EXPECT_EQ(
dnums.output_spatial_dimensions(0),
new_conv->convolution_dimension_numbers().output_spatial_dimensions(0));
EXPECT_EQ(
dnums.output_spatial_dimensions(1),
new_conv->convolution_dimension_numbers().output_spatial_dimensions(1));
}
TEST_F(TransposeFoldingTest, FoldConvComplexTransposeLhs) {
auto builder = HloComputation::Builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {3, 2, 1, 1}),
"x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {2, 3, 1, 1}),
"y"));
HloInstruction* transpose_x =
builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), x, {1, 0, 3, 2}));
auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers();
Window window;
for (int i = 0; i < 2; ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_base_dilation(1);
dim->set_window_dilation(1);
dim->set_stride(1);
dim->set_size(y->shape().dimensions(dnums.kernel_spatial_dimensions(i)));
}
absl::StatusOr<Shape> conv_shape = ShapeInference::InferConvolveShape(
transpose_x->shape(), y->shape(), 1,
1, window, dnums,
std::nullopt);
EXPECT_IS_OK(conv_shape);
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
conv_shape.value(), transpose_x, y,
1, 1, window, dnums,
DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule("test_module");
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build(conv));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
absl::flat_hash_set<HloInstruction*> instruction_set(
entry_computation->instructions().begin(),
entry_computation->instructions().end());
EXPECT_EQ(1, instruction_set.erase(x)) << "x is not in entry_computation.";
EXPECT_EQ(1, instruction_set.erase(y)) << "y is not in entry_computation.";
EXPECT_EQ(1, instruction_set.size())
<< "entry_computation should contain exactly 3 instructions.";
HloInstruction* new_conv = *instruction_set.begin();
EXPECT_EQ(HloOpcode::kConvolution, new_conv->opcode());
EXPECT_EQ(dnums.input_feature_dimension(),
new_conv->convolution_dimension_numbers().input_batch_dimension());
EXPECT_EQ(
dnums.input_batch_dimension(),
new_conv->convolution_dimension_numbers().input_feature_dimension());
EXPECT_EQ(
dnums.input_spatial_dimensions(0),
new_conv->convolution_dimension_numbers().input_spatial_dimensions(1));
EXPECT_EQ(
dnums.input_spatial_dimensions(1),
new_conv->convolution_dimension_numbers().input_spatial_dimensions(0));
EXPECT_EQ(
dnums.output_spatial_dimensions(0),
new_conv->convolution_dimension_numbers().output_spatial_dimensions(0));
EXPECT_EQ(
dnums.output_spatial_dimensions(1),
new_conv->convolution_dimension_numbers().output_spatial_dimensions(1));
}
TEST_F(TransposeFoldingTest, FoldBatchDotTranspose) {
constexpr absl::string_view kHloString = R"(
HloModule FoldBatchDotTranspose
ENTRY entry_computation {
x = f32[7,7,2,3]{3,2,1,0} parameter(0)
y = f32[7,7,2,3]{3,2,1,0} parameter(1)
transpose = f32[7,7,3,2]{3,2,1,0} transpose(y), dimensions={0,1,3,2}
ROOT dot = f32[7,7,2,2]{3,2,1,0} dot(x, transpose), lhs_contracting_dims={3},
rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Dot(op::Parameter(0), op::Parameter(1),
3, 3));
}
TEST_F(TransposeFoldingTest, NoFoldBatchDotTransposeBatch) {
constexpr absl::string_view kHloString = R"(
HloModule NoFoldBatchDotTransposeBatch
ENTRY entry_computation {
x = f32[7,7,2,3]{3,2,1,0} parameter(0)
y = f32[7,7,2,3]{3,2,1,0} parameter(1)
transpose = f32[7,7,3,2]{3,2,1,0} transpose(y), dimensions={1,0,3,2}
ROOT dot = f32[7,7,2,2]{3,2,1,0} dot(x, transpose), lhs_contracting_dims={3},
rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false));
}
TEST_F(TransposeFoldingTest, FoldBatchDotTransposeNonContiguousBatch) {
constexpr absl::string_view kHloString = R"(
HloModule FoldBatchDotTransposeNonContiguousBatch
ENTRY entry_computation {
x = f32[7,2,7,3]{3,2,1,0} parameter(0)
y = f32[7,2,7,3]{3,2,1,0} parameter(1)
transpose = f32[7,3,7,2]{3,2,1,0} transpose(y), dimensions={0,3,2,1}
ROOT dot = f32[7,7,2,2]{3,2,1,0} dot(x, transpose), lhs_contracting_dims={3},
rhs_contracting_dims={1}, lhs_batch_dims={0,2}, rhs_batch_dims={0,2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Dot(op::Parameter(0), op::Parameter(1),
3, 3));
}
TEST_F(TransposeFoldingTest, NoFoldBatchDotTransposeIdentity) {
constexpr absl::string_view kHloString = R"(
HloModule NoFoldBatchDotTransposeIdentity
ENTRY entry_computation {
x = f32[7,7,2,3]{3,2,1,0} parameter(0)
y = f32[7,7,3,2]{3,2,1,0} parameter(1)
transpose = f32[7,7,3,2]{3,2,1,0} transpose(y), dimensions={0,1,2,3}
ROOT dot = f32[7,7,2,2]{3,2,1,0} dot(x, transpose), lhs_contracting_dims={3},
rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/transpose_folding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/transpose_folding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cfec981d-552c-4da4-9a2f-8bcbd2723fba | cpp | tensorflow/tensorflow | reshape_mover | third_party/xla/xla/service/reshape_mover.cc | third_party/xla/xla/service/reshape_mover_test.cc | #include "xla/service/reshape_mover.h"
#include <algorithm>
#include <memory>
#include <vector>
#include "absl/algorithm/container.h"
#include "xla/permutation_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
bool IsRearrange(const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kReshape ||
instruction->opcode() == HloOpcode::kTranspose;
}
bool AreEquivalentRearranges(const HloInstruction* a, const HloInstruction* b) {
if (a->opcode() != b->opcode() ||
!ShapeUtil::SameDimensions(a->shape(), b->shape())) {
return false;
}
switch (a->opcode()) {
case HloOpcode::kTranspose:
return a->dimensions() == b->dimensions();
case HloOpcode::kReshape:
return ShapeUtil::SameDimensions(a->operand(0)->shape(),
b->operand(0)->shape());
default:
return false;
}
}
absl::InlinedVector<int64_t, 4> TransposedBcastDims(
absl::Span<const int64_t> bcast_dims,
absl::Span<const int64_t> transpose_dims) {
auto inv_perm = InversePermutation(transpose_dims);
absl::InlinedVector<int64_t, 4> new_bcast_dims;
for (int64_t dim : bcast_dims) {
new_bcast_dims.push_back(inv_perm[dim]);
}
return new_bcast_dims;
}
}
bool ReshapeMover::CanTriviallyRearrange(const HloInstruction* instr,
const HloInstruction* rearrange) {
CHECK(IsRearrange(rearrange)) << rearrange->ToString();
if (rearrange->opcode() == HloOpcode::kReshape &&
ShapeUtil::Equal(rearrange->shape(), rearrange->operand(0)->shape())) {
return true;
}
if (rearrange->opcode() == HloOpcode::kTranspose &&
IsIdentityPermutation(rearrange->dimensions())) {
return true;
}
if (instr->opcode() == HloOpcode::kConstant) {
return true;
}
if (instr->opcode() == HloOpcode::kRng && instr->user_count() == 1) {
return true;
}
if (instr->opcode() == HloOpcode::kBroadcast) {
if (!absl::c_is_sorted(instr->dimensions())) {
return false;
}
if (rearrange->opcode() == HloOpcode::kReshape) {
return ShapeUtil::IsScalar(instr->operand(0)->shape()) ||
(options_.reshape_of_1d_broadcast_is_cheap &&
ShapeUtil::TrueRank(instr->operand(0)->shape()) <= 1) ||
(options_.reshape_of_1d_broadcast_is_cheap &&
ShapeUtil::ReshapeLeavesDimensionsUnmodified(
rearrange->shape(),
rearrange->operand(0)->shape(),
instr->dimensions())
.has_value());
}
if (rearrange->opcode() == HloOpcode::kTranspose) {
return absl::c_is_sorted(TransposedBcastDims(
instr->dimensions(), InversePermutation(rearrange->dimensions())));
}
}
return false;
}
const HloInstruction* ReshapeMover::FirstNontrivialRearrange(
absl::Span<const HloInstruction* const> instrs) {
auto rearrange_it = absl::c_find_if(instrs, [&](const HloInstruction* instr) {
return IsRearrange(instr) &&
!CanTriviallyRearrange(instr->operand(0), instr);
});
if (rearrange_it == instrs.end()) {
return nullptr;
}
return *rearrange_it;
}
bool ReshapeMover::IsReshapeMoveCandidate(HloInstruction* instruction) {
auto print_no_metadata = HloPrintOptions().set_print_metadata(false);
VLOG(5) << "** Checking instruction: "
<< instruction->ToString(print_no_metadata);
if (!instruction->IsElementwise()) {
return false;
}
const HloInstruction* rearrange =
FirstNontrivialRearrange(instruction->operands());
if (rearrange == nullptr) {
return false;
}
return absl::c_all_of(
instruction->operands(), [&](const HloInstruction* operand) {
return (IsRearrange(operand) &&
AreEquivalentRearranges(operand, rearrange)) ||
(!IsRearrange(operand) &&
CanTriviallyRearrange(operand, rearrange));
});
}
absl::StatusOr<HloInstruction*> ReshapeMover::ApplyInverseRearrange(
const HloInstruction* rearrange, HloInstruction* operand) {
switch (rearrange->opcode()) {
case HloOpcode::kReshape: {
Shape new_shape = ShapeUtil::ChangeElementType(
rearrange->operand(0)->shape(), operand->shape().element_type());
if (operand->shape() != new_shape) {
return MakeReshapeHlo(new_shape, operand);
} else {
return operand;
}
}
case HloOpcode::kTranspose: {
if (!IsIdentityPermutation(rearrange->dimensions())) {
return MakeTransposeHlo(operand,
InversePermutation(rearrange->dimensions()));
} else {
return operand;
}
}
default:
LOG(FATAL) << "Invalid rearrange op: " << rearrange->ToString();
}
}
absl::StatusOr<bool> ReshapeMover::SinkRearrangeOperands(
HloInstruction* instruction) {
auto print_no_metadata = HloPrintOptions().set_print_metadata(false);
HloComputation* computation = instruction->parent();
const HloInstruction* rearrange =
FirstNontrivialRearrange(instruction->operands());
CHECK(rearrange != nullptr);
const Shape& new_operand_shape = rearrange->operand(0)->shape();
VLOG(3) << "** Sinking reshape or transpose: "
<< instruction->ToString(print_no_metadata)
<< "\n\tfirst rearrange operand: "
<< rearrange->ToString(print_no_metadata)
<< "\n\tnew operand shape: "
<< ShapeUtil::HumanString(new_operand_shape);
auto operands = instruction->operands();
for (size_t i = 0; i < operands.size(); ++i) {
VLOG(3) << "Updating operand #" << i << ": "
<< operands[i]->ToString(print_no_metadata);
TF_ASSIGN_OR_RETURN(operands[i],
ApplyInverseRearrange(rearrange, operands[i]));
VLOG(3) << "Updated operand #" << i
<< " to: " << operands[i]->ToString(print_no_metadata);
}
HloInstruction* new_elementwise =
computation->AddInstruction(instruction->CloneWithNewOperands(
ShapeUtil::ChangeElementType(new_operand_shape,
instruction->shape().element_type()),
operands));
std::unique_ptr<HloInstruction> new_rearrange;
switch (rearrange->opcode()) {
case HloOpcode::kReshape:
VLOG(3) << "Creating new reshape for new elementwise op: "
<< new_elementwise->ToString(print_no_metadata);
new_rearrange =
HloInstruction::CreateReshape(instruction->shape(), new_elementwise);
break;
case HloOpcode::kTranspose:
new_rearrange = HloInstruction::CreateTranspose(
instruction->shape(), new_elementwise, rearrange->dimensions());
break;
default:
LOG(FATAL) << "Bad opcode";
}
if (instruction->has_sharding()) {
new_elementwise->clear_sharding();
}
TF_RETURN_IF_ERROR(computation->ReplaceWithNewInstruction(
instruction, std::move(new_rearrange)));
return true;
}
absl::StatusOr<bool> ReshapeMover::TryReshapeMoveOnCandidates(
HloInstructionSet* candidates) {
bool removed = true;
while (!candidates->empty() && removed) {
if (VLOG_IS_ON(5)) {
for (const HloInstruction* instruction : *candidates) {
VLOG(5) << "candidate " << instruction->ToString();
}
}
ConstHloInstructionSet rearrange_operands;
for (const HloInstruction* instruction : *candidates) {
for (const auto* operand : instruction->operands()) {
if (IsRearrange(operand)) {
rearrange_operands.insert(operand);
}
}
}
removed = false;
for (auto operand : rearrange_operands) {
if (absl::c_any_of(operand->users(), [&](HloInstruction* user) {
return !candidates->count(user);
})) {
for (auto* user : operand->users()) {
removed |= candidates->erase(user) > 0;
}
}
}
}
if (candidates->empty()) {
return false;
}
for (HloInstruction* instruction : *candidates) {
if (!ConsumeFuel("reshape-mover", [&] {
return absl::StrCat("instruction: ", instruction->ToString(),
"\nFull module:\n",
instruction->GetModule()->ToString());
})) {
break;
}
TF_ASSIGN_OR_RETURN(bool did_change, SinkRearrangeOperands(instruction));
CHECK(did_change);
}
return true;
}
absl::StatusOr<bool> ReshapeMover::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
HloInstructionSet candidates;
for (HloInstruction* instruction : comp->instructions()) {
if (IsReshapeMoveCandidate(instruction)) {
candidates.insert(instruction);
}
}
TF_ASSIGN_OR_RETURN(bool did_change,
TryReshapeMoveOnCandidates(&candidates));
changed |= did_change;
}
return changed;
}
} | #include "xla/service/reshape_mover.h"
#include <memory>
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_fix.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace m = xla::match;
class ReshapeMoverTest : public HloTestBase {
protected:
absl::Status RunPass(HloModule* module, bool change_expected,
ReshapeMoverOptions options = ReshapeMoverOptions{}) {
TF_ASSIGN_OR_RETURN(bool changed,
RunHloPass(ReshapeMover(options), module));
SCOPED_TRACE(module->ToString());
EXPECT_EQ(changed, change_expected);
TF_EXPECT_OK(RunHloPass(HloVerifier(HloVerifierOpts()), module).status());
TF_EXPECT_OK(RunHloPass(HloPassFix<AlgebraicSimplifier>(
AlgebraicSimplifierOptions()),
module)
.status());
return absl::OkStatus();
}
};
TEST_F(ReshapeMoverTest, ReshapesWithDifferentInputShapesNotMoved) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
reshape0 = f32[8,7] reshape(f32[1,8,1,7] parameter(0))
reshape1 = f32[8,7] reshape(f32[1,8,7,1] parameter(1))
ROOT add = add(reshape0, reshape1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, OneConstantAndOneReshapesOnRngNotMoved) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
rng = f32[1,8,1,7,1] rng(f32[] constant(0), f32[] constant(1)), distribution=rng_uniform
ROOT add = add(f32[8,7] reshape(rng), f32[8,7] constant({...}))
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, EquivalentReshapesMoved) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
reshape0 = f32[8,7] reshape(f32[1,8,1,7] parameter(0))
reshape1 = f32[8,7] reshape(f32[1,8,1,7] parameter(1))
ROOT add = f32[8,7] add(reshape0, reshape1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(m::Add(m::Parameter(0), m::Parameter(1)))));
}
TEST_F(ReshapeMoverTest, SinkReshapeBelowSelect) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
ROOT select = f32[2,3] select(
pred[2,3] reshape(pred[6] parameter(0)),
f32[2,3] reshape(f32[6] parameter(1)),
f32[2,3] reshape(f32[6] parameter(2)))
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(m::Select(m::Parameter(0), m::Parameter(1),
m::Parameter(2)))));
}
TEST_F(ReshapeMoverTest, SinkReshapeBelowSelectWithConstant) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
ROOT select = f32[2,3] select(
pred[2,3] reshape(pred[6] parameter(0)),
f32[2,3] reshape(f32[6] parameter(1)),
f32[2,3] constant({...}))
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(m::Select(m::Parameter(0), m::Parameter(1),
m::Reshape(m::Constant())))));
}
TEST_F(ReshapeMoverTest, OneParameterAndOneReshapeNotMoved) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
reshape0 = f32[8,7] reshape(f32[1,8,1,7] parameter(0))
ROOT add = add(reshape0, f32[8,7] parameter(1))
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, DontSinkReshapesOfConstants) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
ROOT select = select(
pred[3,2] parameter(0),
f32[3,2] reshape(f32[2,3] constant({...})),
f32[3,2] reshape(f32[2,3] constant({...})))
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, OneNontrivialReshapeMoved) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
ROOT add = add(
f32[3,2] reshape(f32[2,3] parameter(0)),
f32[3,2] constant({...}))
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(
m::Add(m::Parameter(0), m::Reshape(m::Constant())))));
}
TEST_F(ReshapeMoverTest, MultipleReshapes) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
add0 = f32[8,7,1] add(
f32[8,7,1] reshape(f32[1,8,1,7] parameter(0)),
f32[8,7,1] reshape(f32[1,8,1,7] parameter(1)))
ROOT add1 = f32[8,7] add(
f32[8,7] reshape(add0),
f32[8,7] reshape(f32[8,7,1] parameter(2)))
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(
m::Add(m::Reshape(m::Add(m::Parameter(0), m::Parameter(1))),
m::Parameter(2)))));
}
TEST_F(ReshapeMoverTest, SinkTransposeAcrossBroadcastScalar) {
const std::string hlo_string = R"(
HloModule TransposeMulInversedTransposeModule
ENTRY TransposeMulInversedTranspose {
src0 = f32[20,8]{1,0} parameter(0)
transpose0 = f32[8,20]{1,0} transpose(src0), dimensions={1,0}
src1 = f32[] parameter(1)
broadcast0 = f32[8,20]{1,0} broadcast(src1), dimensions={}
ROOT multiply0 = f32[8,20]{1,0} multiply(transpose0, broadcast0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Transpose(m::Multiply(
m::Parameter(0), m::Broadcast(m::Parameter(1))))));
}
TEST_F(ReshapeMoverTest, ReshapeWithUsersOutsideCandidatesNotSink) {
const std::string hlo_string = R"(
HloModule ReshapeWithUsersOutsideCandidates
ENTRY ReshapeWithMultipleUsers {
param0 = f32[20,8]{1,0} parameter(0)
reshape0 = f32[8,20]{1,0} reshape(param0)
param1 = f32[] parameter(1)
broadcast0 = f32[8,20]{1,0} broadcast(param1), dimensions={}
param2 = f32[20,8]{1,0} parameter(2)
reshape1 = f32[8,20]{1,0} reshape(param2)
param3 = f32[20,8]{1,0} parameter(3)
reshape2 = f32[8,20]{1,0} reshape(param3)
param4 = f32[8,20]{1,0} parameter(4)
add0 = f32[8,20]{1,0} add(reshape0, broadcast0)
add1 = f32[8,20]{1,0} add(reshape0, reshape1)
add2 = f32[8,20]{1,0} add(reshape1, param4)
ROOT tuple = (f32[8,20]{1,0},f32[8,20]{1,0},
f32[8,20]{1,0}) tuple(add0, add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, ReshapeNoUsersOutsideCandidatesSink1) {
const std::string hlo_string = R"(
HloModule ReshapeNoUsersOutsideCandidates1
ENTRY ReshapeWithMultipleUsers1 {
param0 = f32[20,8]{1,0} parameter(0)
reshape0 = f32[8,20]{1,0} reshape(param0)
param1 = f32[] parameter(1)
broadcast0 = f32[8,20]{1,0} broadcast(param1), dimensions={}
param2 = f32[20,8]{1,0} parameter(2)
reshape1 = f32[8,20]{1,0} reshape(param2)
param3 = f32[20,8]{1,0} parameter(3)
reshape2 = f32[8,20]{1,0} reshape(param3)
add0 = f32[8,20]{1,0} add(reshape0, broadcast0)
add1 = f32[8,20]{1,0} add(reshape0, reshape1)
add2 = f32[8,20]{1,0} add(reshape1, reshape2)
ROOT tuple = (f32[8,20]{1,0},f32[8,20]{1,0},
f32[8,20]{1,0}) tuple(add0, add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Reshape(m::Add(m::Parameter(0), m::Broadcast(m::Parameter(1)))),
m::Reshape(m::Add(m::Parameter(0), m::Parameter(2))),
m::Reshape(m::Add(m::Parameter(2), m::Parameter(3))))));
}
TEST_F(ReshapeMoverTest, ReshapeNoUsersOutsideCandidatesSink2) {
const std::string hlo_string = R"(
HloModule ReshapeNoUsersOutsideCandidates2
ENTRY ReshapeWithMultipleUsers2 {
param0 = f32[20,8]{1,0} parameter(0)
reshape0 = f32[8,20]{1,0} reshape(param0)
ROOT add0 = f32[8,20]{1,0} add(reshape0, reshape0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(m::Add())));
}
TEST_F(ReshapeMoverTest, ReshapeOfRank1BroadcastIsNotTrivial) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
a = f32[2,3] broadcast(f32[2] parameter(0)), dimensions={0}
b = f32[2,3] reshape(f32[6] parameter(1))
ROOT add0 = add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, ReshapeOfRank1BroadcastIsTrivial) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
a = f32[2,3] broadcast(f32[2] parameter(0)), dimensions={0}
b = f32[2,3] reshape(f32[6] parameter(1))
ROOT add0 = add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
ReshapeMoverOptions options;
options.reshape_of_1d_broadcast_is_cheap = true;
TF_ASSERT_OK(RunPass(m.get(), true, options));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(
m::Add(m::Reshape(m::Broadcast(m::Parameter(0))), m::Parameter(1)))));
}
TEST_F(ReshapeMoverTest, ReshapeOfRank2BroadcastIsAllowed) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
a = f32[2,3,35] broadcast(f32[2,3] parameter(0)), dimensions={0,1}
b = f32[2,3,35] reshape(f32[2,3,5,7] parameter(1))
ROOT add0 = add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
ReshapeMoverOptions options;
options.reshape_of_1d_broadcast_is_cheap = true;
TF_ASSERT_OK(RunPass(m.get(), true, options));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(
m::Add(m::Broadcast(m::Parameter(0)), m::Parameter(1)))));
}
TEST_F(ReshapeMoverTest, SinkDisallowedIfReshapeChangesBroadcastDims) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
a = f32[2,3,35] broadcast(f32[2,3] parameter(0)), dimensions={0,1}
b = f32[2,3,35] reshape(f32[6,5,7] parameter(1))
ROOT add0 = add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, TransposeOfBroadcastIsAllowed) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
a = f32[2,3] broadcast(f32[2] parameter(0)), dimensions={0}
b = f32[2,3] transpose(f32[3,2] parameter(1)), dimensions={1,0}
ROOT add0 = add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Transpose(
m::Add(m::Broadcast(m::Parameter(0)), m::Parameter(1)))));
}
TEST_F(ReshapeMoverTest, TransposeReordersBroadcastDims) {
const std::string hlo_string = R"(
HloModule test
ENTRY test {
a = f32[2,3,5] broadcast(f32[2,3] parameter(0)), dimensions={0,1}
b = f32[2,3,5] transpose(f32[3,2,5] parameter(1)), dimensions={1,0,2}
ROOT add0 = add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), false));
}
TEST_F(ReshapeMoverTest, ShardingConsistencyPreservation) {
const std::string hlo_string = R"(
HloModule module
ENTRY entry {
copy.2424 = bf16[3,16,128]{2,1,0} parameter(0), sharding={replicated}
dot.987 = bf16[3,16,128,4096]{3,2,1,0} parameter(1), sharding={devices=[1,8,1,1]0,1,2,3,4,5,6,7}
reshape.5843 = bf16[3,16,128,1,4096]{4,3,2,1,0} reshape(dot.987), sharding={devices=[1,8,1,1,1]0,1,2,3,4,5,6,7}
transpose.21172 = bf16[3,1,4096,16,128]{2,1,4,3,0} transpose(reshape.5843), dimensions={0,3,4,1,2}, sharding={devices=[1,1,1,8,1]0,1,2,3,4,5,6,7}
reshape.291 = bf16[3,16,128]{2,1,0} reshape(copy.2424), sharding={devices=[1,8,1]0,1,2,3,4,5,6,7}
broadcast.21176 = bf16[3,1,4096,16,128]{4,3,2,1,0} broadcast(reshape.291), dimensions={0,3,4}, sharding={devices=[1,1,1,8,1]0,1,2,3,4,5,6,7}
multiply.21177 = bf16[3,1,4096,16,128]{2,1,4,3,0} multiply(transpose.21172, broadcast.21176), sharding={devices=[1,1,1,8,1]0,1,2,3,4,5,6,7}
ROOT slice.21180 = bf16[1,1,4096,16,128]{4,3,2,1,0} slice(multiply.21177), slice={[1:2], [0:1], [0:4096], [0:16], [0:128]}, sharding={devices=[1,1,1,8,1]0,1,2,3,4,5,6,7}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(m.get(), true));
auto elementwise_op = FindInstruction(m.get(), HloOpcode::kMultiply);
EXPECT_FALSE(elementwise_op->has_sharding());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reshape_mover.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reshape_mover_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cabe3974-40bd-4f00-8029-9f686f21bb85 | cpp | tensorflow/tensorflow | hlo_phi_graph | third_party/xla/xla/service/hlo_phi_graph.cc | third_party/xla/xla/service/hlo_phi_graph_test.cc | #include "xla/service/hlo_phi_graph.h"
#include <queue>
namespace xla {
HloValue::Id PhiGraph::GetOptimizedId(const HloValue& value) {
Node* node = value_id_to_node_[value.id()];
CHECK(!node->mark_as_dead);
return node->value_id;
}
bool PhiGraph::InputsEqualTo(const HloValue& value,
absl::Span<const HloValue* const> inputs) {
auto iter = value_id_to_node_.find(value.id());
CHECK(iter != value_id_to_node_.end());
absl::flat_hash_set<HloValue::Id> existing_set;
for (Node* operand : iter->second->operands) {
existing_set.insert(operand->value_id);
}
absl::flat_hash_set<HloValue::Id> new_set;
for (const HloValue* input : inputs) {
new_set.insert(input->id());
}
return existing_set == new_set;
}
HloValue::Id PhiGraph::FindOptimizedValue(const HloValue::Id id) {
auto iter = value_id_to_node_.find(id);
CHECK(iter != value_id_to_node_.end());
CHECK(!iter->second->mark_as_dead);
return iter->second->value_id;
}
PhiGraph::Node* PhiGraph::CreateOrReuseNode(const HloValue& value) {
auto iter = value_id_to_node_.find(value.id());
if (iter == value_id_to_node_.end()) {
node_storage_.emplace_back(std::make_unique<Node>());
Node* node = node_storage_.back().get();
node->value_id = value.id();
value_id_to_node_[value.id()] = node;
node_to_value_id_[node].push_back(value.id());
return node;
} else {
CHECK_NE(iter->second, nullptr);
CHECK_EQ(iter->second->value_id, value.id());
return iter->second;
}
}
void PhiGraph::ReplaceNodeWith(PhiGraph::Node* node, PhiGraph::Node* replace) {
CHECK(node->is_phi);
if (node->mark_as_dead) {
return;
}
if (replace->mark_as_dead) {
auto iter = value_id_to_node_.find(replace->value_id);
CHECK(iter != value_id_to_node_.end());
return ReplaceNodeWith(node, iter->second);
}
CHECK(!replace->mark_as_dead);
for (Node* user : node->users) {
absl::c_replace(user->operands, node, replace);
}
for (Node* operand : node->operands) {
absl::c_replace(operand->users, node, replace);
}
for (HloValue::Id value_id : node_to_value_id_[node]) {
CHECK(value_id_to_node_.contains(value_id));
value_id_to_node_[value_id] = replace;
}
absl::c_copy(node_to_value_id_[node],
std::back_inserter(node_to_value_id_[replace]));
node_to_value_id_[node].clear();
node->mark_as_dead = true;
}
void PhiGraph::RegisterPhi(const HloValue& value,
absl::Span<const HloValue* const> inputs) {
Node* node = CreateOrReuseNode(value);
CHECK(value.is_phi());
node->is_phi = true;
node->operands.clear();
for (auto input : inputs) {
CHECK(input != nullptr);
Node* input_node = CreateOrReuseNode(*input);
node->operands.push_back(input_node);
}
}
std::string PhiGraph::ToString() {
std::string out = "PhiGraph: \n";
for (auto& node : node_storage_) {
absl::StrAppend(&out, node->value_id);
if (node->is_phi) {
absl::StrAppend(&out, ", phi");
}
if (node->mark_as_dead) {
absl::StrAppend(&out, ", dead", ":\n");
}
for (Node* input : node->operands) {
absl::StrAppend(&out, " ", input->value_id, "\n");
}
}
return out;
}
void PhiGraph::Optimize() {
VLOG(2) << "Optimizing phi graph:";
XLA_VLOG_LINES(2, ToString());
for (auto& node : node_storage_) {
for (Node* input : node->operands) {
input->users.push_back(node.get());
}
}
bool changed = true;
while (changed) {
changed = false;
absl::flat_hash_set<Node*> checked_for_closure;
for (auto& node : node_storage_) {
if (!node->is_phi) {
continue;
}
if (node->mark_as_dead) {
continue;
}
Node* node_ptr = node.get();
VLOG(2) << "Optimizing: " << node_ptr->value_id;
CHECK_GE(node_ptr->operands.size(), 1);
auto it = absl::c_find(node_ptr->operands, node_ptr);
while (it != node_ptr->operands.end()) {
node_ptr->operands.erase(it);
it = absl::c_find(node_ptr->operands, node_ptr);
}
it = absl::c_find(node_ptr->users, node_ptr);
while (it != node_ptr->users.end()) {
node_ptr->users.erase(it);
it = absl::c_find(node_ptr->users, node_ptr);
}
CHECK_GE(node_ptr->operands.size(), 1);
bool all_inputs_are_same = absl::c_all_of(
node_ptr->operands,
[&](Node* elem) { return elem == node_ptr->operands[0]; });
if (all_inputs_are_same) {
VLOG(1) << "All inputs to node " << node_ptr->value_id
<< " are the same, replacing it with "
<< node_ptr->operands[0]->value_id;
ReplaceNodeWith(node_ptr, node_ptr->operands[0]);
changed = true;
continue;
}
if (checked_for_closure.contains(node_ptr)) {
continue;
}
absl::flat_hash_set<Node*> workset;
std::queue<Node*> worklist;
Node* non_phi = nullptr;
worklist.push(node_ptr);
while (!worklist.empty()) {
Node* todo = worklist.front();
worklist.pop();
if (workset.contains(todo)) {
continue;
}
checked_for_closure.insert(todo);
workset.insert(todo);
for (Node* operand : todo->operands) {
worklist.push(operand);
}
if (!todo->is_phi) {
if (non_phi != nullptr && non_phi != todo) {
non_phi = nullptr;
break;
} else {
non_phi = todo;
}
}
}
if (non_phi != nullptr) {
for (Node* node : workset) {
if (!node->is_phi) {
CHECK_EQ(node, non_phi);
continue;
}
VLOG(1) << "Replace node " << node->value_id
<< " in the closure with node " << non_phi->value_id;
ReplaceNodeWith(node, non_phi);
changed = true;
}
}
}
}
}
} | #include "xla/service/hlo_phi_graph.h"
#include "xla/literal_util.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class PhiGraphTest : public ::testing::Test {
protected:
HloValue NewHloValue(bool is_phi) {
static int64_t id = 0;
return HloValue(id++, dummy_inst_.get(), {}, is_phi);
}
void SetUp() override {
dummy_inst_ = HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0f));
}
std::unique_ptr<HloInstruction> dummy_inst_;
};
TEST_F(PhiGraphTest, SelfReferencingPhi) {
PhiGraph phi_graph;
HloValue A = NewHloValue(false);
HloValue B = NewHloValue(true);
phi_graph.RegisterPhi(B, {&A, &B});
phi_graph.Optimize();
EXPECT_EQ(A.id(), phi_graph.FindOptimizedValue(B.id()));
}
TEST_F(PhiGraphTest, PhiWithSameInputs) {
PhiGraph phi_graph;
HloValue A = NewHloValue(false);
HloValue B = NewHloValue(true);
phi_graph.RegisterPhi(B, {&A, &A});
phi_graph.Optimize();
EXPECT_EQ(A.id(), phi_graph.FindOptimizedValue(B.id()));
}
TEST_F(PhiGraphTest, CircularPhi) {
PhiGraph phi_graph;
HloValue A = NewHloValue(true);
HloValue B = NewHloValue(true);
HloValue C = NewHloValue(true);
HloValue D = NewHloValue(false);
phi_graph.RegisterPhi(A, {&B, &C});
phi_graph.RegisterPhi(B, {&D, &C});
phi_graph.RegisterPhi(C, {&A, &B});
phi_graph.Optimize();
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(A.id()));
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(B.id()));
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(C.id()));
}
TEST_F(PhiGraphTest, NestedPhiReduction) {
PhiGraph phi_graph;
HloValue A = NewHloValue(true);
HloValue B = NewHloValue(true);
HloValue C = NewHloValue(true);
HloValue D = NewHloValue(false);
HloValue E = NewHloValue(true);
phi_graph.RegisterPhi(A, {&B, &C});
phi_graph.RegisterPhi(B, {&E, &C});
phi_graph.RegisterPhi(C, {&A, &B});
phi_graph.RegisterPhi(E, {&D, &D});
phi_graph.Optimize();
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(A.id()));
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(B.id()));
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(C.id()));
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(E.id()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_phi_graph.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_phi_graph_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
eb0c36ad-0dad-4007-b2bb-20fde4559604 | cpp | tensorflow/tensorflow | tuple_util | third_party/xla/xla/service/tuple_util.cc | third_party/xla/xla/service/tuple_util_test.cc | #include "xla/service/tuple_util.h"
#include <cstdint>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_value.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
HloInstruction* TupleUtil::ExtractPrefix(HloInstruction* input_tuple,
int64_t elements,
absl::string_view name) {
CHECK(input_tuple->shape().IsTuple());
HloComputation* computation = input_tuple->parent();
const Shape& input_shape = input_tuple->shape();
std::vector<HloInstruction*> tuple_elements;
tuple_elements.reserve(elements);
for (int i = 0; i < elements; i++) {
std::string element_name;
if (!name.empty()) {
element_name = absl::StrCat(name, ".element.", i);
}
tuple_elements.push_back(computation->AddInstruction(
HloInstruction::CreateGetTupleElement(input_shape.tuple_shapes(i),
input_tuple, i),
element_name));
}
return computation->AddInstruction(
HloInstruction::CreateTuple(tuple_elements), name);
}
HloInstruction* TupleUtil::AppendSuffix(
HloInstruction* input_tuple,
absl::Span<HloInstruction* const> trailing_values) {
CHECK(input_tuple->shape().IsTuple());
HloComputation* computation = input_tuple->parent();
const Shape& input_shape = input_tuple->shape();
std::vector<HloInstruction*> tuple_elements;
tuple_elements.reserve(input_shape.tuple_shapes_size());
for (int i = 0; i < input_shape.tuple_shapes_size(); i++) {
tuple_elements.push_back(
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
input_shape.tuple_shapes(i), input_tuple, i)));
}
tuple_elements.insert(tuple_elements.end(), trailing_values.begin(),
trailing_values.end());
return computation->AddInstruction(
HloInstruction::CreateTuple(tuple_elements));
}
absl::StatusOr<HloInstruction*> TupleUtil::ReplaceTupleWith(
HloInstruction* new_instruction, HloInstruction* tuple,
ShapeIndex shape_index, bool insert_bitcast_if_different_shape) {
const Shape& tuple_shape = tuple->shape();
CHECK(tuple->shape().IsTuple())
<< "ReplaceTupleWith was called for a non-tuple. Tuple = "
<< tuple->ToString()
<< ", new_instruction = " << new_instruction->ToString()
<< ", shape_index = " << shape_index.ToString();
const HloInstruction* instruction = new_instruction;
bool equivalent = true;
for (int i = shape_index.size() - 1; i >= 0; --i) {
int index = shape_index[i];
if (instruction->opcode() != HloOpcode::kGetTupleElement ||
instruction->tuple_index() != index) {
equivalent = false;
break;
}
instruction = instruction->operand(0);
}
if (equivalent && instruction == tuple) {
VLOG(4) << "Instruction " << new_instruction->ToShortString()
<< " already exists at index " << shape_index.ToString() << " of "
<< tuple->ToShortString();
return tuple;
}
HloComputation* computation = new_instruction->parent();
std::vector<HloInstruction*> tuple_args(tuple_shape.tuple_shapes_size());
CHECK_GE(tuple_shape.tuple_shapes_size(), shape_index[0]);
for (int i = 0; i < tuple_shape.tuple_shapes_size(); ++i) {
const Shape& subshape = tuple_shape.tuple_shapes(i);
auto get_operand = [&]() {
if (tuple->opcode() == HloOpcode::kTuple) {
return tuple->mutable_operand(i);
} else {
return computation->AddInstruction(
HloInstruction::CreateGetTupleElement(subshape, tuple, i));
}
};
if (i == shape_index[0]) {
if (subshape.IsTuple()) {
TF_ASSIGN_OR_RETURN(tuple_args[i],
ReplaceTupleWith(new_instruction, get_operand(),
ShapeIndex(shape_index.begin() + 1,
shape_index.end())));
} else {
if (subshape != new_instruction->shape() &&
insert_bitcast_if_different_shape) {
VLOG(4) << "Old shape = " << subshape.ToString()
<< ", new shape = " << new_instruction->shape().ToString()
<< "; inserting a bitcast.";
new_instruction = computation->AddInstruction(
HloInstruction::CreateBitcast(subshape, new_instruction));
} else if (tuple->opcode() == HloOpcode::kTuple &&
tuple->operand(i) == new_instruction) {
VLOG(4) << "Tuple already contains the new instruction = "
<< new_instruction->ToShortString()
<< " tuple = " << tuple->ToShortString();
return tuple;
}
tuple_args[i] = new_instruction;
}
} else {
tuple_args[i] = get_operand();
}
}
if (shape_index[0] == tuple_shape.tuple_shapes_size()) {
tuple_args.push_back(new_instruction);
}
return computation->AddInstruction(HloInstruction::CreateTuple(tuple_args));
}
HloInstruction* TupleUtil::AddGetTupleElements(
const HloPosition& position) {
HloInstruction* instruction = position.instruction;
HloComputation* computation = instruction->parent();
for (int64_t index : position.index) {
auto gte_it = absl::c_find_if(
instruction->users(), [index](const HloInstruction* use) {
return use != use->parent()->root_instruction() &&
use->opcode() == HloOpcode::kGetTupleElement &&
use->tuple_index() == index;
});
if (gte_it != instruction->users().end()) {
instruction = *gte_it;
} else {
instruction =
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
instruction->shape().tuple_shapes(index), instruction, index));
}
}
return instruction;
}
ShapeTree<HloInstruction*> TupleUtil::DisassembleTupleInstruction(
HloInstruction* tuple) {
const Shape& shape = tuple->shape();
ShapeTree<HloInstruction*> result(shape);
result.ForEachMutableElement([&](ShapeIndexView index,
HloInstruction** element) {
if (index.empty()) {
*element = tuple;
} else {
ShapeIndexView parent_index = index.subspan(0, index.size() - 1);
HloInstruction* parent = result.element(parent_index);
std::string name = absl::StrCat(tuple->name(), ".disassembled.",
absl::StrJoin(index, "."));
*element = tuple->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(parent, index.back()), name);
}
});
return result;
}
HloInstruction* TupleUtil::AssembleTupleInstruction(
HloComputation* computation, ShapeTree<HloInstruction*> elements,
absl::string_view name) {
elements.ForEachMutableElementPostOrder(
[&](const ShapeIndex& index, HloInstruction** element) {
const Shape& subshape = ShapeUtil::GetSubshape(elements.shape(), index);
if (subshape.IsTuple()) {
absl::InlinedVector<HloInstruction*, 2> children;
ShapeIndex child_index = index;
for (int i = 0; i < subshape.tuple_shapes_size(); ++i) {
child_index.push_back(i);
children.push_back(elements.element(child_index));
child_index.pop_back();
}
std::string new_name;
if (!name.empty()) {
if (index.empty()) {
new_name = std::string(name);
} else {
new_name =
absl::StrCat(name, ".assembled.", absl::StrJoin(index, "."));
}
}
*element = computation->AddInstruction(
HloInstruction::CreateTuple(children), new_name);
}
});
return elements.element({});
}
} | #include "xla/service/tuple_util.h"
#include <memory>
#include <string>
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
using TupleUtilTest = HloTestBase;
TEST_F(TupleUtilTest, ExtractPrefix) {
const std::string hlo_string = R"(
HloModule Module
ENTRY entry {
p0 = (f32[32,32]{1,0},f32[32,32]{1,0},f32[32,32]{1,0}) parameter(0)
ROOT p1 = f32[32,32]{1,0} parameter(1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* param0 =
module->entry_computation()->parameter_instruction(0);
HloInstruction* prefix = TupleUtil::ExtractPrefix(param0, 2);
EXPECT_THAT(prefix, op::Tuple(op::GetTupleElement(op::Parameter(0), 0),
op::GetTupleElement(op::Parameter(0), 1)));
}
TEST_F(TupleUtilTest, AppendSuffix) {
const std::string hlo_string = R"(
HloModule Module
ENTRY entry {
p0 = (f32[32,32]{1,0},f32[32,32]{1,0},f32[32,32]{1,0}) parameter(0)
ROOT p1 = f32[32,32]{1,0} parameter(1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* param0 =
module->entry_computation()->parameter_instruction(0);
HloInstruction* param1 =
module->entry_computation()->parameter_instruction(1);
HloInstruction* with_suffix =
TupleUtil::AppendSuffix(param0, {param1, param1});
EXPECT_THAT(with_suffix, op::Tuple(op::GetTupleElement(op::Parameter(0), 0),
op::GetTupleElement(op::Parameter(0), 1),
op::GetTupleElement(op::Parameter(0), 2),
op::Parameter(1), op::Parameter(1)));
}
TEST_F(TupleUtilTest, ReplaceTupleWithTupleInst) {
const std::string hlo_string = R"(
HloModule Module
ENTRY entry {
p0 = f32[32,32]{1,0} parameter(0)
p1 = f32[32,32]{1,0} parameter(1)
ROOT tuple = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(p0, p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* p0 = FindInstruction(module.get(), "p0");
HloInstruction* tuple = FindInstruction(module.get(), "tuple");
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * new_tuple,
TupleUtil::ReplaceTupleWith(p0, tuple, {1}));
EXPECT_THAT(new_tuple, op::Tuple(op::Parameter(0), op::Parameter(0)));
}
TEST_F(TupleUtilTest, ReplaceTupleWithNonTupleInst) {
const std::string hlo_string = R"(
HloModule Module
ENTRY entry {
ROOT p0 = (f32[32,32]{1,0}, f32[32,32]{1,0}) parameter(0)
p1 = f32[32,32]{1,0} parameter(1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* p0 = FindInstruction(module.get(), "p0");
HloInstruction* p1 = FindInstruction(module.get(), "p1");
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * new_tuple,
TupleUtil::ReplaceTupleWith(p1, p0, {0}));
EXPECT_THAT(new_tuple, op::Tuple(op::Parameter(1),
op::GetTupleElement(op::Parameter(0), 1)));
}
TEST_F(TupleUtilTest, ReplaceTupleWithNonTupleInstNested) {
const std::string hlo_string = R"(
HloModule Module
ENTRY entry {
ROOT p0 = (f32[32,32]{1,0}, (f32[32,32]{1,0}, f32[32,32]{1,0})) parameter(0)
p1 = f32[32,32]{1,0} parameter(1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* p0 = FindInstruction(module.get(), "p0");
HloInstruction* p1 = FindInstruction(module.get(), "p1");
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * new_tuple,
TupleUtil::ReplaceTupleWith(p1, p0, {1, 0}));
EXPECT_THAT(
new_tuple,
op::Tuple(op::GetTupleElement(op::Parameter(0), 0),
op::Tuple(op::Parameter(1),
op::GetTupleElement(
op::GetTupleElement(op::Parameter(0), 1), 1))));
}
TEST_F(TupleUtilTest, AddGetTupleElements) {
const std::string hlo_string = R"(
HloModule Module
ENTRY entry {
p0 = (f32[32,32]{1,0}, (f32[32,32]{1,0}, f32[32,32]{1,0})) parameter(0)
gte = (f32[32,32]{1,0}, f32[32,32]{1,0}) get-tuple-element(p0), index=1
ROOT root = f32[32,32]{1,0} get-tuple-element(gte), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* p0 = FindInstruction(module.get(), "p0");
HloInstruction* existing_gte = FindInstruction(module.get(), "gte");
HloInstruction* new_gte = TupleUtil::AddGetTupleElements({p0, {1, 0}});
EXPECT_THAT(new_gte, op::GetTupleElement(existing_gte, 0));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/tuple_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/tuple_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0c1e0df2-8f32-4754-b90e-7b6a758a3abc | cpp | tensorflow/tensorflow | all_gather_decomposer | third_party/xla/xla/service/all_gather_decomposer.cc | third_party/xla/xla/service/all_gather_decomposer_test.cc | #include "xla/service/all_gather_decomposer.h"
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/collective_decomposer_utils.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
HloComputation* MakeBinaryAdd(PrimitiveType type, HloModule* module) {
HloComputation::Builder sum_b("add");
auto x = sum_b.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(type, {}), "x"));
auto y = sum_b.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(type, {}), "y"));
if (type == PRED) {
sum_b.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(type, {}), HloOpcode::kOr, x, y));
} else {
sum_b.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(type, {}), HloOpcode::kAdd, x, y));
}
HloComputation* reduction = module->AddEmbeddedComputation(sum_b.Build());
return reduction;
}
}
HloInstruction* AllGatherDecomposer::TranslateAllGatherToAllReducePerOperand(
CollectiveOpGroupMode group_mode, const HloAllGatherInstruction& ag,
const Shape& output_shape, HloInstruction* operand, HloComputation* comp,
int64_t ag_dim) {
std::vector<HloInstruction*> start_indices =
CreateStartIndicesForCollectiveDecomposition(
group_mode, ag.replica_groups(), operand->shape(), ag_dim, comp)
.value();
auto zero = comp->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(output_shape.element_type())));
zero = comp->AddInstruction(
HloInstruction::CreateBroadcast(output_shape, zero, {}));
auto dus = comp->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
zero->shape(), zero, operand, start_indices));
auto ar = comp->AddInstruction(HloInstruction::CreateAllReduce(
dus->shape(), {dus},
MakeBinaryAdd(dus->shape().element_type(), comp->parent()),
ag.device_list(),
ag.constrain_layout(), ag.channel_id(),
ag.use_global_device_ids()));
return ar;
}
absl::Status AllGatherDecomposer::DecomposeAllGather(
HloAllGatherInstruction* ag, HloComputation* comp) {
TF_ASSIGN_OR_RETURN(CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(ag->channel_id().has_value(),
ag->use_global_device_ids()));
if (ag->operand_count() > 1) {
std::vector<HloInstruction*> tuple_inputs;
for (int i = 0; i < ag->operand_count(); ++i) {
auto* input_operand = ag->mutable_operand(i);
const auto& output_shape = ag->shape().tuple_shapes(i);
auto* ar = TranslateAllGatherToAllReducePerOperand(
group_mode, *ag, output_shape, input_operand, comp,
ag->all_gather_dimension());
tuple_inputs.push_back(ar);
}
auto tup = comp->AddInstruction(HloInstruction::CreateTuple(tuple_inputs));
TF_RETURN_IF_ERROR(ag->ReplaceAllUsesWith(tup));
} else {
auto* ar = TranslateAllGatherToAllReducePerOperand(
group_mode, *ag, ag->shape(), ag->mutable_operand(0), comp,
ag->all_gather_dimension());
TF_RETURN_IF_ERROR(ag->ReplaceAllUsesWith(ar));
}
TF_RETURN_IF_ERROR(comp->RemoveInstructionAndUnusedOperands(ag));
return absl::OkStatus();
}
absl::StatusOr<bool> AllGatherDecomposer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (auto comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto hlo : comp->MakeInstructionPostOrder()) {
if (hlo->opcode() != HloOpcode::kAllGather) {
continue;
}
auto ag = Cast<HloAllGatherInstruction>(hlo);
if (ShouldDecompose(*ag)) {
TF_RETURN_IF_ERROR(DecomposeAllGather(ag, comp));
changed = true;
}
}
}
return changed;
}
} | #include "xla/service/all_gather_decomposer.h"
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::testing::AllOf;
namespace op = xla::testing::opcode_matchers;
using AllGatherDecomposerTest = HloTestBase;
TEST_F(AllGatherDecomposerTest, CrossReplicaAllGather) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
ROOT ag = f32[10,80] all-gather(param0), replica_groups={}, dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(),
op::Multiply(op::ReplicaId(), op::Constant()))));
}
TEST_F(AllGatherDecomposerTest, CrossReplicaAndPartitionAllGather) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
ROOT ag = f32[10,80] all-gather(param0), replica_groups={{0}}, channel_id=1,
dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(),
op::Multiply(op::PartitionId(), op::Constant()))));
}
TEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithTrivialGroup) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
ROOT ag = f32[10,80] all-gather(param0), replica_groups={{0,1,2,3}},
dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(),
op::Multiply(op::ReplicaId(), op::Constant()))));
}
TEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithSubgroups) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
ROOT ag = f32[10,80] all-gather(param0),
replica_groups={{2,1,0,3}, {4,6,7,5}}, dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
auto id =
AllOf(op::Shape("u32[]"),
op::Reshape(op::DynamicSlice(op::Constant(), op::ReplicaId())));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0),
op::Constant(), op::Multiply(id, op::Constant()))));
}
TEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithSubgroupsGlobalIds) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
ROOT ag = f32[10,80] all-gather(param0),
replica_groups={{2,1,0,3}, {4,6,7,5}}, dimensions={1}, channel_id=1,
use_global_device_ids=true
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
auto global_id =
op::Add(op::Multiply(op::ReplicaId(), op::Constant()), op::PartitionId());
auto id = AllOf(op::Shape("u32[]"),
op::Reshape(op::DynamicSlice(op::Constant(), global_id)));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0),
op::Constant(), op::Multiply(id, op::Constant()))));
}
TEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithTuple) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
param1 = f32[10,16] parameter(1)
ROOT ag = (f32[10,80], f32[10,64]) all-gather(param0, param1),
replica_groups={}, dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(),
op::Multiply(op::ReplicaId(), op::Constant()))),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(1), op::Constant(),
op::Multiply(op::ReplicaId(), op::Constant())))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_gather_decomposer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_gather_decomposer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
88b9adff-15bb-48a6-920b-e030f186c9d9 | cpp | tensorflow/tensorflow | all_reduce_folder | third_party/xla/xla/service/all_reduce_folder.cc | third_party/xla/xla/service/all_reduce_folder_test.cc | #include "xla/service/all_reduce_folder.h"
#include <algorithm>
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/all_reduce_key.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
std::optional<std::vector<ReplicaGroup>> FoldReplicaGroups(
absl::Span<const ReplicaGroup> replica_groups0,
absl::Span<const ReplicaGroup> replica_groups1) {
int64_t num_replicas = 0;
for (const ReplicaGroup &rg : replica_groups0) {
for (int64_t id : rg.replica_ids()) {
num_replicas = std::max(num_replicas, id);
}
}
num_replicas++;
std::vector<int> replica_group_no(num_replicas, -1);
for (int group_no = 0; group_no < replica_groups0.size(); ++group_no) {
for (int64_t id : replica_groups0[group_no].replica_ids()) {
replica_group_no[id] = group_no;
}
}
absl::flat_hash_map<std::vector<bool>, int64_t> contributor_set_id;
std::vector<int64_t> contributing_replicas_set_id(num_replicas, 0);
int64_t next_id = 1;
for (const ReplicaGroup &rg : replica_groups1) {
std::vector<bool> contributors(num_replicas, false);
for (int64_t id : rg.replica_ids()) {
int64_t group_no = replica_group_no[id];
for (int64_t contrib : replica_groups0[group_no].replica_ids()) {
if (contributors[contrib]) {
return std::nullopt;
}
contributors[contrib] = true;
}
}
int64_t set_id;
auto it = contributor_set_id.find(contributors);
if (it != contributor_set_id.end()) {
set_id = it->second;
} else {
set_id = next_id++;
contributor_set_id[contributors] = set_id;
}
for (int64_t id : rg.replica_ids()) {
contributing_replicas_set_id[id] = set_id;
}
}
std::vector<ReplicaGroup> new_replica_groups;
new_replica_groups.reserve(contributor_set_id.size());
for (const auto &it : contributor_set_id) {
const std::vector<bool> &contributors = it.first;
const int64_t set_id = it.second;
new_replica_groups.emplace_back();
ReplicaGroup &group = new_replica_groups.back();
for (int64_t replica = 0; replica < num_replicas; ++replica) {
if (contributors[replica]) {
if (contributing_replicas_set_id[replica] != set_id) {
return std::nullopt;
}
group.add_replica_ids(replica);
}
}
}
absl::c_sort(new_replica_groups,
[](const ReplicaGroup &a, const ReplicaGroup &b) {
return a.replica_ids(0) < b.replica_ids(0);
});
return new_replica_groups;
}
}
absl::StatusOr<bool> AllReduceFolder::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
if (hlo_query::ContainsLayoutConstrainedAllReduce(*module)) {
VLOG(1) << "Skip AllReduceFolder because the module contains all-reduce "
"with constrained layouts";
return false;
}
int64_t next_channel_id = hlo_query::NextChannelId(*module);
bool changed = false;
for (auto computation : module->computations(execution_threads)) {
for (HloInstruction *inst : computation->MakeInstructionPostOrder()) {
if (inst->opcode() != HloOpcode::kAllReduce ||
inst->operand(0)->opcode() != HloOpcode::kAllReduce) {
continue;
}
auto *ar0 = Cast<HloAllReduceInstruction>(inst->mutable_operand(0));
auto *ar1 = Cast<HloAllReduceInstruction>(inst);
if (ar0->user_count() != 1) {
continue;
}
std::optional<AllReduceKey> key0 = GetAllReduceKey(
ar0, nullptr, true);
std::optional<AllReduceKey> key1 = GetAllReduceKey(
ar1, nullptr, true);
if (!key0 || !key1 || *key0 != *key1 || ar0->replica_groups().empty() ||
ar1->replica_groups().empty()) {
continue;
}
std::optional<std::vector<ReplicaGroup>> new_replica_groups =
FoldReplicaGroups(ar0->replica_groups(), ar1->replica_groups());
if (!new_replica_groups) {
continue;
}
std::optional<int64_t> channel_id;
if (ar0->channel_id()) {
channel_id = next_channel_id++;
}
HloInstruction *new_ar =
computation->AddInstruction(HloInstruction::CreateAllReduce(
ar0->shape(), ar0->operands(), ar0->to_apply(),
CollectiveDeviceList(*new_replica_groups),
false, channel_id,
ar0->use_global_device_ids()));
TF_RETURN_IF_ERROR(ar1->ReplaceAllUsesWith(new_ar));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(ar1));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(ar0));
changed = true;
}
}
return changed;
}
} | #include "xla/service/all_reduce_folder.h"
#include <cstddef>
#include <initializer_list>
#include <memory>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace matcher = xla::testing::opcode_matchers;
using ::testing::HasSubstr;
class AllReduceFolderTest : public HloTestBase {};
const char *k2AllReduce = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ar0 = f32[8] all-reduce(p0), replica_groups=$group_0, to_apply=sum
ROOT ar1 = f32[8] all-reduce(ar0), replica_groups=$group_1, to_apply=sum
}
)";
size_t AllReduceCount(HloModule *module) {
return absl::c_count_if(module->entry_computation()->instructions(),
HloPredicateIsOp<HloOpcode::kAllReduce>);
}
void ExpectOneAllReduce(HloModule *module,
absl::string_view target_replica_groups) {
EXPECT_EQ(AllReduceCount(module), 1);
HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, matcher::AllReduce(matcher::Parameter(0)));
EXPECT_THAT(root->ToString(), HasSubstr(target_replica_groups));
}
TEST_F(AllReduceFolderTest, Simple) {
TF_ASSERT_OK_AND_ASSIGN(
auto module, RunAndCheckHloRewrite(k2AllReduce, AllReduceFolder(), true,
{{"$group_0", "{{0,1},{2,3}}"},
{"$group_1", "{{0,2},{1,3}}"}}));
ExpectOneAllReduce(module.get(), "replica_groups={{0,1,2,3}}");
}
TEST_F(AllReduceFolderTest, SimpleSwap) {
TF_ASSERT_OK_AND_ASSIGN(
auto module, RunAndCheckHloRewrite(k2AllReduce, AllReduceFolder(), true,
{{"$group_1", "{{0,1},{2,3}}"},
{"$group_0", "{{0,2},{1,3}}"}}));
ExpectOneAllReduce(module.get(), "replica_groups={{0,1,2,3}}");
}
TEST_F(AllReduceFolderTest, BothEmptyReplicaGroups_NotTransformed) {
TF_ASSERT_OK(RunAndCheckHloRewrite(k2AllReduce, AllReduceFolder(), false,
{{"$group_0", "{}"}, {"$group_1", "{}"}}));
}
TEST_F(AllReduceFolderTest, EmptyReplicaGroups_NotTransformed) {
TF_ASSERT_OK(RunAndCheckHloRewrite(
k2AllReduce, AllReduceFolder(), false,
{{"$group_0", "{}"}, {"$group_1", "{{0,2},{1,3}}"}}));
}
TEST_F(AllReduceFolderTest, MismatchOtherProperties0_NotTransformed) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ar0 = f32[8] all-reduce(p0), replica_groups={{0,1},{2,3}}, channel_id=1, to_apply=sum
ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,2},{1,3}}, to_apply=sum
}
)";
TF_ASSERT_OK(RunAndCheckHloRewrite(hlo_string, AllReduceFolder(), false));
}
TEST_F(AllReduceFolderTest, MismatchOtherProperties1_NotTransformed) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
mul {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT mul = f32[] multiply(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ar0 = f32[8] all-reduce(p0), replica_groups={{0,1},{2,3}}, to_apply=sum
ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,2},{1,3}}, to_apply=mul
}
)";
TF_ASSERT_OK(RunAndCheckHloRewrite(hlo_string, AllReduceFolder(), false));
}
TEST_F(AllReduceFolderTest, NotFoldable_NotTransformed) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ar0 = f32[8] all-reduce(p0), replica_groups={{0,1},{2,3}}, to_apply=sum
ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,1},{2,3}}, to_apply=sum
}
)";
TF_ASSERT_OK(RunAndCheckHloRewrite(hlo_string, AllReduceFolder(), false));
}
TEST_F(AllReduceFolderTest, Foldable0) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ar0 = f32[8] all-reduce(p0), replica_groups={{0,4},{1,5},{2,3},{6,7}}, to_apply=sum
ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,5},{4,1},{2,7},{3,6}}, to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunAndCheckHloRewrite(hlo_string, AllReduceFolder()));
ExpectOneAllReduce(module.get(), "replica_groups={{0,1,4,5},{2,3,6,7}}");
}
TEST_F(AllReduceFolderTest, FoldableChain) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ar0 = f32[8] all-reduce(p0), replica_groups={{0,1},{2,3},{4,5},{6,7}}, to_apply=sum
ar1 = f32[8] all-reduce(ar0), replica_groups={{0,2},{1,3},{4,6},{5,7}}, to_apply=sum
ROOT ar2 = f32[8] all-reduce(ar1), replica_groups={{0,4},{1,5},{2,6},{3,7}}, to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunAndCheckHloRewrite(hlo_string, AllReduceFolder()));
ExpectOneAllReduce(module.get(), "replica_groups={{0,1,2,3,4,5,6,7}}");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_folder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_folder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1adb9aee-06f6-4921-bc77-f195f56319b6 | cpp | tensorflow/tensorflow | sharding_remover | third_party/xla/xla/service/sharding_remover.cc | third_party/xla/xla/service/sharding_remover_test.cc | #include "xla/service/sharding_remover.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/spmd/shardy/constants.h"
#include "tsl/platform/errors.h"
namespace xla {
absl::StatusOr<bool> ShardingRemover::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
const absl::flat_hash_set<absl::string_view> to_remove_sharding_ops = {
"Sharding", "SPMDShardToFullShape", "SPMDFullToShardShape",
sdy::kFuncResultShardingTargetName};
for (HloComputation* computation : module->computations(execution_threads)) {
auto instructions = computation->MakeInstructionPostOrder();
std::reverse(instructions.begin(), instructions.end());
for (HloInstruction* instruction : instructions) {
if (instruction->opcode() != HloOpcode::kCustomCall) {
continue;
}
if (!to_remove_sharding_ops.contains(instruction->custom_call_target())) {
continue;
}
CHECK(instruction->operand_count() == 1)
<< "Sharding instruction must have exactly one operand";
TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(
instruction->mutable_operand(0), name()));
changed = true;
if (instruction->custom_call_target() == "Sharding" ||
instruction->custom_call_target() ==
sdy::kFuncResultShardingTargetName) {
auto copy = computation->AddInstruction(
HloInstruction::CreateUnary(instruction->shape(), HloOpcode::kCopy,
instruction->mutable_operand(0)));
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(instruction, copy));
instruction = copy;
}
}
}
return changed;
}
} | #include "xla/service/sharding_remover.h"
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_parser.h"
#include "xla/status_macros.h"
#include "xla/tests/hlo_test_base.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
using ShardingRemoverTest = HloTestBase;
TEST_F(ShardingRemoverTest, RemoveSharding) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%parameter.3379 = f32[1,1]{1,0} parameter(0)
%custom-call.3380 = f32[1,1]{1,0} custom-call(f32[1,1]{1,0} %parameter.3379),
custom_call_target="Sharding", sharding={replicated}
ROOT %reshape.6032 = f32[] reshape(f32[1,1]{1,0} %custom-call.3380)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardingRemover().Run(module.get()));
EXPECT_TRUE(changed);
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Reshape(op::Parameter()));
auto parameter = root->operand(0);
EXPECT_EQ(parameter->user_count(), 2);
bool replaced = false;
for (HloInstruction* user : parameter->users()) {
if (user->opcode() == HloOpcode::kCopy) {
replaced = true;
EXPECT_THAT(user, op::Copy(op::Parameter()));
break;
}
}
EXPECT_TRUE(replaced);
}
TEST_F(ShardingRemoverTest, RemoveSPMDShardingToFullShape) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%parameter.3379 = f32[1,1]{1,0} parameter(0)
%custom-call.3380 = f32[1,1]{1,0} custom-call(f32[1,1]{1,0} %parameter.3379),
custom_call_target="SPMDShardToFullShape", sharding={replicated}
ROOT %reshape.6032 = f32[] reshape(f32[1,1]{1,0} %custom-call.3380)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardingRemover().Run(module.get()));
EXPECT_TRUE(changed);
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Reshape(op::Parameter()));
}
TEST_F(ShardingRemoverTest, RemoveSPMDFullToShardShape) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%parameter.3379 = f32[1,1]{1,0} parameter(0)
%custom-call.3380 = f32[1,1]{1,0} custom-call(f32[1,1]{1,0} %parameter.3379),
custom_call_target="SPMDFullToShardShape", sharding={replicated}
ROOT %reshape.6032 = f32[] reshape(f32[1,1]{1,0} %custom-call.3380)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardingRemover().Run(module.get()));
EXPECT_TRUE(changed);
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Reshape(op::Parameter()));
}
TEST_F(ShardingRemoverTest, NoChangeForOtherCustomCall) {
const char* const hlo_string = R"(
HloModule cluster_2013453984438090939__.47
ENTRY %cluster_2013453984438090939__.47
(arg_tuple.1: ()) -> (bf16[2,2000], s32[2,2000]) {
%arg_tuple.1 = bf16[2,209664] parameter(0)
%custom-call = (bf16[2,2000]{1,0}, s32[2,2000]{1,0})
custom-call(bf16[2,209664]{1,0} %arg_tuple.1), custom_call_target="TopK"
%get-tuple-element = bf16[2,2000]{1,0}
get-tuple-element((bf16[2,2000]{1,0}, s32[2,2000]{1,0}) %custom-call),
index=0
%get-tuple-element.1 = s32[2,2000]{1,0} get-tuple-element((bf16[2,2000]{1,0},
s32[2,2000]{1,0}) %custom-call), index=1, sharding={replicated}
ROOT %tuple.46 = (bf16[2,2000]{1,0}, s32[2,2000]{1,0})
tuple(bf16[2,2000]{1,0} %get-tuple-element, s32[2,2000]{1,0}
%get-tuple-element.1),
metadata={op_name="XLA_Retvals"}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardingRemover().Run(module.get()));
EXPECT_FALSE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/sharding_remover.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/sharding_remover_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
de45acd3-74d9-4eaa-9c40-ea4acfaf2a07 | cpp | tensorflow/tensorflow | tuple_points_to_analysis | third_party/xla/xla/service/tuple_points_to_analysis.cc | third_party/xla/xla/service/tuple_points_to_analysis_test.cc | #include "xla/service/tuple_points_to_analysis.h"
#include <memory>
#include <ostream>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/map_util.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
std::string BufferAlias::ToString() const {
return absl::StrCat("BufferAlias(", instruction_->name(), "[",
absl::StrJoin(index_, ","), "])");
}
std::ostream& operator<<(std::ostream& out, const BufferAlias& buffer_alias) {
out << buffer_alias.ToString();
return out;
}
bool PointsToSet::IsAmbiguous() const {
bool ambiguous = false;
ForEachElement(
[&ambiguous](const ShapeIndex& , const BufferList& points_to) {
ambiguous |= points_to.size() > 1;
});
return ambiguous;
}
bool PointsToSet::IsDistinct() const {
bool distinct = true;
absl::flat_hash_set<const LogicalBuffer*> all_points_to;
ForEachElement([&](const ShapeIndex& , const BufferList& points_to) {
for (auto& buffer : points_to) {
if (all_points_to.contains(buffer)) {
distinct = false;
}
all_points_to.insert(buffer);
}
});
return distinct;
}
size_t PointsToSet::size() const {
return CreateFlattenedSet().size();
}
PointsToSet::BufferSet PointsToSet::CreateFlattenedSet() const {
BufferSet flat_set;
ForEachElement(
[&flat_set](const ShapeIndex& , const BufferList& buffers) {
flat_set.insert(buffers.begin(), buffers.end());
});
return flat_set;
}
bool PointsToSet::ContainsBuffer(const LogicalBuffer& buffer) const {
bool found = false;
ForEachElement([&found, &buffer](const ShapeIndex& ,
const BufferList& pointed_to_buffers) {
if (!found && absl::c_linear_search(pointed_to_buffers, &buffer)) {
found = true;
}
});
return found;
}
bool PointsToSet::ContainsBufferAtIndex(const LogicalBuffer& buffer,
const ShapeIndex& index) const {
const auto& pointed_to_buffers = element(index);
return absl::c_linear_search(pointed_to_buffers, &buffer);
}
void PointsToSet::AddPointedToBuffer(const LogicalBuffer& buffer,
const ShapeIndex& index) {
if (ContainsBufferAtIndex(buffer, index)) {
return;
}
mutable_element(index)->push_back(&buffer);
}
const PointsToSet::SourceSet& PointsToSet::tuple_sources(
const ShapeIndex& index) const {
return tree_.element(index).tuple_sources;
}
void PointsToSet::add_tuple_source(const ShapeIndex& index,
HloInstruction* tuple) {
tree_.mutable_element(index)->tuple_sources.insert(tuple);
}
namespace {
void GatherFusionInstructions(
HloInstruction* instruction,
std::vector<HloInstruction*>* fusion_instructions) {
CHECK_EQ(HloOpcode::kFusion, instruction->opcode());
for (auto* fused : instruction->fused_instructions()) {
if (fused->opcode() == HloOpcode::kFusion) {
GatherFusionInstructions(fused, fusion_instructions);
}
}
fusion_instructions->push_back(instruction);
}
}
absl::StatusOr<std::unique_ptr<TuplePointsToAnalysis>>
TuplePointsToAnalysis::Run(const HloModule* module) {
auto logical_buffer_analysis = LogicalBufferAnalysis::Run(module);
std::unique_ptr<TuplePointsToAnalysis> analysis(new TuplePointsToAnalysis(
module, std::move(logical_buffer_analysis).value()));
TF_RETURN_IF_ERROR(analysis->Analyze());
return std::move(analysis);
}
absl::Status TuplePointsToAnalysis::Analyze() {
per_instruction_.clear();
per_instruction_.reserve(module_->instruction_count());
logical_buffer_aliases_.clear();
logical_buffer_aliases_.resize(
logical_buffer_analysis_->num_logical_buffers());
std::vector<HloInstruction*> fusion_instructions;
for (auto* computation : module_->MakeNonfusionComputations()) {
TF_RETURN_IF_ERROR(computation->Accept(this));
TF_RETURN_IF_ERROR(
PopulateDefinedBuffersAndAliases(computation->instructions()));
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kFusion) {
GatherFusionInstructions(instruction, &fusion_instructions);
}
}
}
for (auto* instruction : fusion_instructions) {
TF_RETURN_IF_ERROR(instruction->fused_expression_root()->Accept(this));
TF_RETURN_IF_ERROR(
PopulateDefinedBuffersAndAliases(instruction->fused_instructions()));
}
XLA_VLOG_LINES(3, ToString());
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::PopulateDefinedBuffersAndAliases(
const decltype(std::declval<HloComputation>()
.instructions())& instructions) {
for (auto* instruction : instructions) {
PerInstruction* pi = PerInst(instruction);
TF_RETURN_IF_ERROR(GatherBuffersDefinedByInstruction(
instruction, &pi->instruction_defined_buffers));
const PointsToSet& points_to_set = GetPointsToSet(instruction);
points_to_set.ForEachElement(
[this, &instruction](
const ShapeIndex& index,
const PointsToSet::BufferList& pointed_to_buffers) {
for (const LogicalBuffer* buffer : pointed_to_buffers) {
logical_buffer_aliases_[buffer->id()].emplace_back(instruction,
index);
}
});
}
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::DefaultAction(
HloInstruction* hlo_instruction) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(hlo_instruction);
points_to_set.ForEachMutableElement(
[this, hlo_instruction](const ShapeIndex& index,
PointsToSet::BufferList* buffers) {
buffers->push_back(
&logical_buffer_analysis_->GetBuffer(hlo_instruction, index));
});
if (hlo_instruction->shape().IsTuple()) {
points_to_set.add_tuple_source({}, hlo_instruction);
}
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleGetTupleElement(
HloInstruction* get_tuple_element) {
int64_t element_index = get_tuple_element->tuple_index();
PointsToSet& points_to_set = CreateEmptyPointsToSet(get_tuple_element);
const PointsToSet& operand_points_to_set =
*PerInst(get_tuple_element->operand(0))->points_to_set;
points_to_set.ForEachMutableElement(
[&](const ShapeIndex& target_index, PointsToSet::BufferList* points_to) {
ShapeIndex src_index;
src_index.push_back(element_index);
for (auto element : target_index) {
src_index.push_back(element);
}
*points_to = operand_points_to_set.element(src_index);
for (HloInstruction* tuple :
operand_points_to_set.tuple_sources(src_index)) {
points_to_set.add_tuple_source(target_index, tuple);
}
});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleCopy(HloInstruction* copy) {
PointsToSet& points_to_set = CreateCopiedPointsToSet(copy, copy->operand(0));
points_to_set.mutable_element({})->clear();
points_to_set.AddPointedToBuffer(
logical_buffer_analysis_->GetBuffer(copy, {}),
{});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleBitcast(HloInstruction* bitcast) {
CreateCopiedPointsToSet(bitcast, bitcast->operand(0));
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleDomain(HloInstruction* domain) {
CreateCopiedPointsToSet(domain, domain->operand(0));
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleAddDependency(
HloInstruction* add_dependency) {
CreateCopiedPointsToSet(add_dependency, add_dependency->operand(0));
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleRecvDone(HloInstruction* recv_done) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(recv_done);
points_to_set.AddPointedToBuffer(
logical_buffer_analysis_->GetBuffer(recv_done, {}),
{});
points_to_set.AddPointedToBuffer(
logical_buffer_analysis_->GetBuffer(recv_done, {1}),
{1});
const PointsToSet& operand_points_to_set =
GetPointsToSet(recv_done->operand(0));
points_to_set.ForEachMutableElement(
[&points_to_set, &operand_points_to_set](
const ShapeIndex& index, PointsToSet::BufferList* buffers) {
if (index.empty() || index[0] != 0) {
return;
}
*buffers = operand_points_to_set.element(index);
for (auto& tuple_source : operand_points_to_set.tuple_sources(index)) {
points_to_set.add_tuple_source(index, tuple_source);
}
});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleAsyncStart(
HloInstruction* async_start) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(async_start);
points_to_set.ForEachMutableElement(
[&](const ShapeIndex& target_index, PointsToSet::BufferList* buffers) {
if (target_index.size() >= 2 && target_index.front() == 0) {
const PointsToSet& operand_points_to_set =
GetPointsToSet(async_start->operand(target_index[1]));
ShapeIndex source_index(target_index.begin() + 2, target_index.end());
*buffers = operand_points_to_set.element(source_index);
for (HloInstruction* tuple :
operand_points_to_set.tuple_sources(source_index)) {
points_to_set.add_tuple_source(target_index, tuple);
}
} else {
buffers->push_back(
&logical_buffer_analysis_->GetBuffer(async_start, target_index));
}
});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleAsyncUpdate(
HloInstruction* async_update) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(async_update);
const PointsToSet& operand_points_to_set =
GetPointsToSet(async_update->operand(0));
CHECK_EQ(async_update->shape(), async_update->operand(0)->shape());
points_to_set.ForEachMutableElement([&](const ShapeIndex& index,
PointsToSet::BufferList* buffers) {
*buffers = operand_points_to_set.element(index);
for (HloInstruction* tuple : operand_points_to_set.tuple_sources(index)) {
points_to_set.add_tuple_source(index, tuple);
}
});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleAsyncDone(
HloInstruction* async_done) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(async_done);
const PointsToSet& operand_points_to_set =
GetPointsToSet(async_done->operand(0));
operand_points_to_set.ForEachElement(
[&points_to_set, &operand_points_to_set](
const ShapeIndex& src_index,
const PointsToSet::BufferList& points_to) {
if (!src_index.empty() && src_index.front() == 1) {
const ShapeIndex target_index(src_index.begin() + 1, src_index.end());
*points_to_set.mutable_element(target_index) = points_to;
for (HloInstruction* tuple :
operand_points_to_set.tuple_sources(src_index)) {
points_to_set.add_tuple_source(target_index, tuple);
}
}
});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleCopyStart(
HloInstruction* copy_start) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(copy_start);
const PointsToSet& operand_points_to_set =
GetPointsToSet(copy_start->operand(0));
points_to_set.ForEachMutableElement(
[&](const ShapeIndex& target_index, PointsToSet::BufferList* buffers) {
if (target_index == ShapeIndex({1})) {
*buffers = operand_points_to_set.element({});
} else {
buffers->push_back(
&logical_buffer_analysis_->GetBuffer(copy_start, target_index));
}
});
for (HloInstruction* tuple :
operand_points_to_set.tuple_sources({})) {
points_to_set.add_tuple_source({1}, tuple);
}
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleCopyDone(HloInstruction* copy_done) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(copy_done);
const PointsToSet& operand_points_to_set =
GetPointsToSet(copy_done->operand(0));
operand_points_to_set.ForEachElement(
[&points_to_set, &operand_points_to_set](
const ShapeIndex& src_index,
const PointsToSet::BufferList& points_to) {
if (src_index == ShapeIndex({0})) {
const ShapeIndex target_index = {};
*points_to_set.mutable_element(target_index) = points_to;
for (HloInstruction* tuple :
operand_points_to_set.tuple_sources(src_index)) {
points_to_set.add_tuple_source(target_index, tuple);
}
}
});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleSend(HloInstruction* send) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(send);
auto top_buffer = points_to_set.mutable_element(ShapeIndex({}));
top_buffer->push_back(
&logical_buffer_analysis_->GetBuffer(send, ShapeIndex({})));
points_to_set.add_tuple_source({}, send);
auto context_buffer = points_to_set.mutable_element(ShapeIndex({1}));
context_buffer->push_back(
&logical_buffer_analysis_->GetBuffer(send, ShapeIndex({1})));
auto token_buffer = points_to_set.mutable_element(ShapeIndex({2}));
token_buffer->push_back(
&logical_buffer_analysis_->GetBuffer(send, ShapeIndex({2})));
const PointsToSet& operand_points_to_set = GetPointsToSet(send->operand(0));
operand_points_to_set.ForEachElement(
[&points_to_set, &operand_points_to_set](
const ShapeIndex& src_index,
const PointsToSet::BufferList& points_to) {
ShapeIndex target_index({0});
for (auto element : src_index) {
target_index.push_back(element);
}
*points_to_set.mutable_element(target_index) = points_to;
for (HloInstruction* tuple :
operand_points_to_set.tuple_sources(src_index)) {
points_to_set.add_tuple_source(target_index, tuple);
}
});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleTuple(HloInstruction* tuple) {
absl::Span<HloInstruction* const> operands(tuple->operands());
PointsToSet& points_to_set = CreateEmptyPointsToSet(tuple);
points_to_set.AddPointedToBuffer(
logical_buffer_analysis_->GetBuffer(tuple, {}),
{});
for (int64_t i = 0; i < operands.size(); ++i) {
const PointsToSet& operand_points_to_set =
*PerInst(operands[i])->points_to_set;
operand_points_to_set.ForEachElement(
[&points_to_set, &operand_points_to_set, i](
const ShapeIndex& src_index,
const PointsToSet::BufferList& points_to) {
ShapeIndex target_index;
target_index.push_back(i);
for (auto element : src_index) {
target_index.push_back(element);
}
*points_to_set.mutable_element(target_index) = points_to;
for (HloInstruction* tuple :
operand_points_to_set.tuple_sources(src_index)) {
points_to_set.add_tuple_source(target_index, tuple);
}
});
}
points_to_set.add_tuple_source({}, tuple);
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleCustomCall(
HloInstruction* custom_call) {
auto ccall = Cast<HloCustomCallInstruction>(custom_call);
PointsToSet& points_to_set = CreateEmptyPointsToSet(custom_call);
absl::flat_hash_map<ShapeIndex, std::pair<int64_t, ShapeIndex>>
aliased_outputs;
for (const auto& pair : ccall->output_to_operand_aliasing()) {
aliased_outputs.emplace(pair.first, pair.second);
}
points_to_set.ForEachMutableElement([&](const ShapeIndex& index,
PointsToSet::BufferList* buffers) {
auto it = aliased_outputs.find(index);
if (it == aliased_outputs.end() || !alias_buffer_across_dataflow_) {
points_to_set.AddPointedToBuffer(
logical_buffer_analysis_->GetBuffer(custom_call, index), index);
} else {
const PointsToSet& input_set =
*PerInst(ccall->operand(it->second.first))->points_to_set;
for (const LogicalBuffer* input_buffer :
input_set.element(it->second.second)) {
points_to_set.AddPointedToBuffer(*input_buffer, index);
}
for (HloInstruction* tuple : input_set.tuple_sources(it->second.second)) {
points_to_set.add_tuple_source(index, tuple);
}
}
});
points_to_set.add_tuple_source({}, custom_call);
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleFusion(HloInstruction* fusion) {
auto cfusion = Cast<HloFusionInstruction>(fusion);
PointsToSet& points_to_set = CreateEmptyPointsToSet(fusion);
absl::flat_hash_map<ShapeIndex, std::pair<int64_t, ShapeIndex>>
aliased_outputs;
for (const auto& pair : cfusion->output_to_operand_aliasing()) {
aliased_outputs.emplace(pair.first, pair.second);
}
points_to_set.ForEachMutableElement([&](const ShapeIndex& index,
PointsToSet::BufferList* buffers) {
auto it = aliased_outputs.find(index);
if (it == aliased_outputs.end()) {
points_to_set.AddPointedToBuffer(
logical_buffer_analysis_->GetBuffer(fusion, index), index);
} else {
const PointsToSet& input_set =
*PerInst(cfusion->operand(it->second.first))->points_to_set;
for (const LogicalBuffer* input_buffer :
input_set.element(it->second.second)) {
points_to_set.AddPointedToBuffer(*input_buffer, index);
}
for (HloInstruction* tuple : input_set.tuple_sources(it->second.second)) {
points_to_set.add_tuple_source(index, tuple);
}
}
});
points_to_set.add_tuple_source({}, fusion);
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleOptimizationBarrier(
HloInstruction* barrier) {
CreateCopiedPointsToSet(barrier, barrier->operand(0));
return absl::OkStatus();
}
const PointsToSet& TuplePointsToAnalysis::GetPointsToSet(
const HloInstruction* hlo_instruction) const {
return *PerInst(hlo_instruction)->points_to_set;
}
PointsToSet& TuplePointsToAnalysis::CreateEmptyPointsToSet(
const HloInstruction* instruction) {
PerInstruction* pi = PerInst(instruction);
CHECK(pi->points_to_set == nullptr)
<< "instruction should not have been present in the map.";
auto set = std::make_unique<PointsToSet>(&instruction->shape());
pi->points_to_set = std::move(set);
return *pi->points_to_set;
}
bool TuplePointsToAnalysis::InstructionDefinesBufferAtIndex(
const HloInstruction* instruction, const ShapeIndex& index) const {
const auto& buffers = GetPointsToSet(instruction).element(index);
return (buffers.size() == 1 && buffers[0]->instruction() == instruction);
}
absl::Status TuplePointsToAnalysis::VerifyBuffer(
const LogicalBuffer& buffer) const {
if (!InstructionDefinesBufferAtIndex(buffer.instruction(), buffer.index())) {
return FailedPrecondition(
"LogicalBuffer %s is ill-defined: instruction %s does not define a "
"buffer at that index",
buffer.ToString(), buffer.instruction()->name());
}
if (buffer.id() < 0 ||
buffer.id() >= logical_buffer_analysis_->num_logical_buffers()) {
return FailedPrecondition("LogicalBuffer %s is ill-defined: invalid id %d",
buffer.ToString(), buffer.id());
}
if (GetBuffer(buffer.id()).instruction() != buffer.instruction() ||
GetBuffer(buffer.id()).index() != buffer.index()) {
return FailedPrecondition(
"LogicalBuffer %s is ill-defined: buffer with same id differs: %s",
buffer.ToString(), GetBuffer(buffer.id()).ToString());
}
return absl::OkStatus();
}
const LogicalBuffer& TuplePointsToAnalysis::GetBuffer(
LogicalBuffer::Id id) const {
CHECK_GE(id, 0);
CHECK_LT(id, logical_buffer_analysis_->num_logical_buffers());
return logical_buffer_analysis_->GetBuffer(id);
}
absl::StatusOr<const LogicalBuffer*> TuplePointsToAnalysis::GetBufferDefinedAt(
const HloInstruction* instruction, const ShapeIndex& index) const {
const auto& buffers = GetPointsToSet(instruction).element(index);
if (buffers.size() != 1 || buffers[0]->instruction() != instruction) {
return FailedPrecondition(
"instruction %s does not define buffer at index {%s}",
instruction->name(), absl::StrJoin(index, ","));
}
return buffers[0];
}
const TuplePointsToAnalysis::BufferAliasVector&
TuplePointsToAnalysis::GetBufferAliases(const LogicalBuffer& buffer) const {
return logical_buffer_aliases_[buffer.id()];
}
const TuplePointsToAnalysis::BufferDefinitionVector&
TuplePointsToAnalysis::GetBuffersDefinedByInstruction(
const HloInstruction* instruction) const {
return PerInst(instruction)->instruction_defined_buffers;
}
absl::Status TuplePointsToAnalysis::GatherBuffersDefinedByInstruction(
const HloInstruction* instruction,
TuplePointsToAnalysis::BufferDefinitionVector* buffers) {
GetPointsToSet(instruction)
.ForEachElement([buffers, instruction](
const ShapeIndex& index,
const PointsToSet::BufferList& source_buffers) {
CHECK(!source_buffers.empty());
if (source_buffers.size() == 1 &&
source_buffers[0]->instruction() == instruction) {
DCHECK(source_buffers[0]->index() == index);
buffers->push_back(source_buffers[0]);
} else {
for (const LogicalBuffer* source_buffer : source_buffers) {
DCHECK(source_buffer->instruction() != instruction);
}
}
});
return absl::OkStatus();
}
PointsToSet& TuplePointsToAnalysis::CreateCopiedPointsToSet(
const HloInstruction* instruction, const HloInstruction* src) {
PointsToSet& dst_points_to_set = CreateEmptyPointsToSet(instruction);
const PointsToSet& src_points_to_set = GetPointsToSet(src);
dst_points_to_set.ForEachMutableElement(
[&dst_points_to_set, &src_points_to_set](
const ShapeIndex& index, PointsToSet::BufferList* buffers) {
*buffers = src_points_to_set.element(index);
for (auto& tuple_source : src_points_to_set.tuple_sources(index)) {
dst_points_to_set.add_tuple_source(index, tuple_source);
}
});
return *PerInst(instruction)->points_to_set;
}
std::string TuplePointsToAnalysis::ToString() const {
std::string output =
absl::StrFormat("TuplePointsToSet for module %s:\n", module_->name());
for (const auto* computation : module_->MakeNonfusionComputations()) {
const char* entry =
computation == module_->entry_computation() ? "entry " : "";
absl::StrAppend(&output, entry, "computation ", computation->name(), ":\n");
for (const HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
InstructionToString(instruction, &output);
if (instruction->opcode() == HloOpcode::kFusion) {
for (auto* fused : instruction->fused_instructions()) {
InstructionToString(fused, &output);
}
}
}
}
absl::StrAppend(&output, "LogicalBuffers:\n");
for (const auto& b : logical_buffer_analysis_->logical_buffers()) {
absl::StrAppend(&output, " buffer ", b->ToString(), ":\n");
for (const BufferAlias& alias : logical_buffer_aliases_[b->id()]) {
absl::StrAppend(&output, " alias ", alias.ToString(), "\n");
}
}
return output;
}
void TuplePointsToAnalysis::InstructionToString(
const HloInstruction* instruction, std::string* output) const {
const std::string prefix = instruction->IsFused() ? " " : "";
absl::StrAppend(output, prefix, " instruction ",
instruction->ToShortString(), ":\n");
const PointsToSet& points_to_set = GetPointsToSet(instruction);
points_to_set.ForEachElement(
[&prefix, &output](const ShapeIndex& index,
const PointsToSet::BufferList& points_to) {
absl::StrAppend(
output, prefix, " {", absl::StrJoin(index, ","), "}: ",
absl::StrJoin(points_to, ", ",
[](std::string* out, const LogicalBuffer* source) {
out->append(source->ToString());
}),
"\n");
});
}
bool TuplePointsToAnalysis::DoesNotUseOperandBuffer(
const HloInstruction* operand, const ShapeIndex& index,
const HloInstruction* user) const {
CHECK(user->IsUserOf(operand))
<< "user: " << user->ToString() << " operand: " << operand->ToString();
if (user->opcode() == HloOpcode::kGetTupleElement && !index.empty()) {
return true;
} else if (user->IsLoopFusion()) {
auto it = absl::c_find_if(
user->fused_parameters(), [&](HloInstruction* fused_param) {
return user->operand(fused_param->parameter_number()) == operand;
});
CHECK(it != user->fused_parameters().end());
const LogicalBuffer* buffer = GetBufferDefinedAt(*it, index).value();
for (const BufferAlias& alias : GetBufferAliases(*buffer)) {
for (HloInstruction* alias_user : alias.instruction()->users()) {
if (DoesNotUseOperandBuffer(alias.instruction(), alias.index(),
alias_user)) {
continue;
}
return false;
}
}
return true;
}
return false;
}
std::vector<std::pair<HloInstruction*, int64_t>>
TuplePointsToAnalysis::GetAllUsesOfInstructionAtIndex(
HloInstruction* instruction, const ShapeIndex& index) const {
std::vector<std::pair<HloInstruction*, int64_t>> uses;
const PointsToSet::BufferList& points_to =
GetPointsToSet(instruction).element(index);
for (const LogicalBuffer* buffer : points_to) {
for (const BufferAlias& alias : GetBufferAliases(*buffer)) {
for (HloInstruction* alias_user : alias.instruction()->users()) {
if (DoesNotUseOperandBuffer(alias.instruction(), alias.index(),
alias_user)) {
continue;
}
for (int64_t op_idx : alias_user->OperandIndices(alias.instruction())) {
uses.emplace_back(alias_user, op_idx);
}
}
}
}
return uses;
}
bool TuplePointsToAnalysis::HasUniqueFusedUseOfOperandAt(
HloInstruction* operand, const ShapeIndex& operand_index,
HloInstruction* fusion, const int64_t use_operand_index) const {
CHECK_EQ(HloOpcode::kFusion, fusion->opcode());
if (fusion->OperandIndices(operand).size() > 1) {
return false;
}
const auto& fused_params = fusion->fused_parameters();
auto fused_param_it =
absl::c_find_if(fused_params, [&](HloInstruction* fused_param) {
return fusion->operand(fused_param->parameter_number()) == operand;
});
if (fused_param_it == fused_params.end()) {
return false;
}
auto* fused_param = *fused_param_it;
auto fused_param_uses =
GetAllUsesOfInstructionAtIndex(fused_param, operand_index);
return fused_param_uses.size() == 1 &&
fused_param_uses[0].first == fusion->fused_expression_root() &&
fused_param_uses[0].second == use_operand_index;
}
} | #include "xla/service/tuple_points_to_analysis.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/logical_buffer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using ::testing::UnorderedElementsAre;
using ::testing::UnorderedElementsAreArray;
class TuplePointsToAnalysisTest : public HloTestBase {
protected:
void BuildModuleAndRunAnalysis(std::unique_ptr<HloComputation> computation) {
BuildModule(std::move(computation));
RunAnalysis();
}
void BuildModule(std::unique_ptr<HloComputation> computation) {
module_ = CreateNewVerifiedModule();
module_->AddEntryComputation(std::move(computation));
}
void RunAnalysis() {
CHECK_NOTNULL(module_.get());
points_to_analysis_ = TuplePointsToAnalysis::Run(module_.get()).value();
}
const LogicalBuffer* GetBuffer(const HloInstruction* instruction,
const ShapeIndex& index) {
const auto& pointed_to =
points_to_analysis_->GetPointsToSet(instruction).element(index);
CHECK_EQ(1, pointed_to.size());
CHECK_EQ(instruction, pointed_to[0]->instruction());
CHECK(index == pointed_to[0]->index());
return pointed_to[0];
}
void ExpectHasBuffers(const PointsToSet::BufferList& points_to_set,
absl::Span<const LogicalBuffer* const> buffers) {
std::vector<const LogicalBuffer*> vec(buffers.begin(), buffers.end());
EXPECT_THAT(points_to_set, UnorderedElementsAreArray(vec));
}
void ExpectHasTopLevelBuffers(
const PointsToSet::BufferList& points_to_set,
absl::Span<HloInstruction* const> instructions) {
PointsToSet::BufferList buffers;
for (auto instruction : instructions) {
buffers.push_back(GetBuffer(instruction, {}));
}
ExpectHasBuffers(points_to_set, buffers);
}
void ExpectHasTopLevelBuffers(
const PointsToSet::BufferSet& points_to_set,
absl::Span<HloInstruction* const> instructions) {
ExpectHasTopLevelBuffers(
PointsToSet::BufferList(points_to_set.begin(), points_to_set.end()),
instructions);
}
void ExpectHasBufferAliases(
const HloInstruction* instruction, const ShapeIndex& index,
absl::Span<const std::pair<HloInstruction*, ShapeIndex>> expected) {
const LogicalBuffer* buffer =
points_to_analysis_->GetBufferDefinedAt(instruction, index).value();
std::vector<BufferAlias> expected_aliases;
expected_aliases.reserve(expected.size());
for (auto& pair : expected) {
expected_aliases.push_back(BufferAlias(pair.first, pair.second));
}
EXPECT_THAT(points_to_analysis_->GetBufferAliases(*buffer),
UnorderedElementsAreArray(expected_aliases));
}
std::unique_ptr<HloModule> module_;
std::unique_ptr<TuplePointsToAnalysis> points_to_analysis_;
};
TEST_F(TuplePointsToAnalysisTest, SimpleTuple) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
BuildModuleAndRunAnalysis(builder.Build());
EXPECT_EQ(1, points_to_analysis_->GetPointsToSet(constant1).size());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(constant1).element({}), {constant1});
EXPECT_TRUE(
points_to_analysis_->GetPointsToSet(constant1).tuple_sources({}).empty());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(tuple).IsDistinct());
EXPECT_EQ(1, points_to_analysis_->GetPointsToSet(constant2).size());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(constant2).element({}), {constant2});
EXPECT_TRUE(
points_to_analysis_->GetPointsToSet(constant2).tuple_sources({}).empty());
EXPECT_EQ(3, points_to_analysis_->GetPointsToSet(tuple).size());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(tuple).IsAmbiguous());
EXPECT_THAT(points_to_analysis_->GetPointsToSet(tuple).tuple_sources({}),
UnorderedElementsAre(tuple));
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).CreateFlattenedSet(),
{constant1, constant2, tuple});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).element({}), {tuple});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).element({0}), {constant1});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).element({1}), {constant2});
const PointsToSet& tuple_points_to_set =
points_to_analysis_->GetPointsToSet(tuple);
EXPECT_TRUE(tuple_points_to_set.ContainsBufferAtIndex(
*GetBuffer(constant1, {}), {0}));
EXPECT_TRUE(tuple_points_to_set.ContainsBufferAtIndex(
*GetBuffer(constant2, {}), {1}));
EXPECT_FALSE(tuple_points_to_set.ContainsBufferAtIndex(
*GetBuffer(constant2, {}), {0}));
EXPECT_TRUE(tuple_points_to_set.ContainsBuffer(*GetBuffer(constant1, {})));
EXPECT_TRUE(tuple_points_to_set.ContainsBuffer(*GetBuffer(constant2, {})));
}
TEST_F(TuplePointsToAnalysisTest, NestedTuple) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto inner_tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto constant3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({inner_tuple, constant3}));
BuildModuleAndRunAnalysis(builder.Build());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(constant1).element({}), {constant1});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(constant2).element({}), {constant2});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(constant3).element({}), {constant3});
EXPECT_EQ(3, points_to_analysis_->GetPointsToSet(inner_tuple).size());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(inner_tuple).IsAmbiguous());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(inner_tuple).IsDistinct());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(inner_tuple).CreateFlattenedSet(),
{constant1, constant2, inner_tuple});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(inner_tuple).element({}),
{inner_tuple});
EXPECT_THAT(
points_to_analysis_->GetPointsToSet(inner_tuple).tuple_sources({}),
UnorderedElementsAre(inner_tuple));
EXPECT_EQ(5, points_to_analysis_->GetPointsToSet(tuple).size());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(tuple).IsAmbiguous());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).CreateFlattenedSet(),
{constant1, constant2, constant3, inner_tuple, tuple});
EXPECT_THAT(points_to_analysis_->GetPointsToSet(tuple).tuple_sources({}),
UnorderedElementsAre(tuple));
EXPECT_THAT(points_to_analysis_->GetPointsToSet(tuple).tuple_sources({0}),
UnorderedElementsAre(inner_tuple));
EXPECT_TRUE(
points_to_analysis_->GetPointsToSet(tuple).tuple_sources({1}).empty());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).element({0}), {inner_tuple});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).element({0, 0}), {constant1});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).element({0, 1}), {constant2});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).element({1}), {constant3});
}
TEST_F(TuplePointsToAnalysisTest, GetTupleElement) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto inner_tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto constant3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({inner_tuple, constant3}));
auto get_tuple_element = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(inner_tuple->shape(), tuple, 0));
BuildModuleAndRunAnalysis(builder.Build());
auto& points_to_set = points_to_analysis_->GetPointsToSet(get_tuple_element);
EXPECT_EQ(3, points_to_set.size());
EXPECT_FALSE(points_to_set.IsAmbiguous());
EXPECT_TRUE(points_to_set.IsDistinct());
ExpectHasTopLevelBuffers(points_to_set.CreateFlattenedSet(),
{constant1, constant2, inner_tuple});
ExpectHasTopLevelBuffers(points_to_set.element({}), {inner_tuple});
EXPECT_THAT(points_to_set.tuple_sources({}),
UnorderedElementsAre(inner_tuple));
}
TEST_F(TuplePointsToAnalysisTest, AddDependency) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto add_dependency = builder.AddInstruction(
HloInstruction::CreateAddDependency(constant, token));
BuildModuleAndRunAnalysis(builder.Build());
auto& points_to_set = points_to_analysis_->GetPointsToSet(add_dependency);
EXPECT_EQ(1, points_to_set.size());
EXPECT_FALSE(points_to_set.IsAmbiguous());
EXPECT_TRUE(points_to_set.IsDistinct());
ExpectHasTopLevelBuffers(points_to_set.CreateFlattenedSet(), {constant});
}
TEST_F(TuplePointsToAnalysisTest, DuplicatedElement) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant, constant, constant}));
BuildModuleAndRunAnalysis(builder.Build());
EXPECT_EQ(2, points_to_analysis_->GetPointsToSet(tuple).size());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(tuple).IsAmbiguous());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(tuple).IsDistinct());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).element({}), {tuple});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).CreateFlattenedSet(),
{constant, tuple});
}
TEST_F(TuplePointsToAnalysisTest, TupleCopy) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto copy = builder.AddInstruction(
HloInstruction::CreateUnary(tuple->shape(), HloOpcode::kCopy, tuple));
BuildModuleAndRunAnalysis(builder.Build());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(copy).IsAmbiguous());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(copy).IsDistinct());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).CreateFlattenedSet(),
{constant1, constant2, tuple});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(copy).element({}), {copy});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(copy).CreateFlattenedSet(),
{constant1, constant2, copy});
}
TEST_F(TuplePointsToAnalysisTest, CopyStartAndCopyDone) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto copy_start = builder.AddInstruction(HloInstruction::CreateCopyStart(
ShapeUtil::MakeTupleShape({constant->shape(), constant->shape(),
ShapeUtil::MakeShape(U32, {})}),
constant));
auto copy_done = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kCopyDone, copy_start));
BuildModuleAndRunAnalysis(builder.Build());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(copy_start).IsAmbiguous());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(copy_start).IsDistinct());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(copy_done).IsAmbiguous());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(copy_done).IsDistinct());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(copy_start).element({}),
{copy_start});
ExpectHasBufferAliases(copy_start, {0}, {{copy_start, {0}}, {copy_done, {}}});
ExpectHasBufferAliases(constant, {}, {{constant, {}}, {copy_start, {1}}});
}
TEST_F(TuplePointsToAnalysisTest, AsyncOps) {
std::string hlo_str = R"(
HloModule module
ENTRY entry {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3]), f32[2,3], u32[]) custom-call-start(p0), custom_call_target="foo"
async-update = ((f32[2,3]), f32[2,3], u32[]) custom-call-update(async-start)
ROOT async-done = f32[2,3] custom-call-done(async-update)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
module_, ParseAndReturnVerifiedModule(hlo_str, GetModuleConfigForTest()));
HloInstruction* param =
module_->entry_computation()->parameter_instruction(0);
HloInstruction* async_start = FindInstruction(module_.get(), "async-start");
HloInstruction* async_update = FindInstruction(module_.get(), "async-update");
HloInstruction* async_done = FindInstruction(module_.get(), "async-done");
RunAnalysis();
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(async_start).IsAmbiguous());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(async_start).IsDistinct());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(async_update).IsAmbiguous());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(async_update).IsDistinct());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(async_done).IsAmbiguous());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(async_done).IsDistinct());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(async_start).element({}),
{async_start});
ExpectHasBufferAliases(
param, {}, {{param, {}}, {async_start, {0, 0}}, {async_update, {0, 0}}});
ExpectHasBufferAliases(
async_start, {1},
{{async_start, {1}}, {async_update, {1}}, {async_done, {}}});
ExpectHasBufferAliases(async_start, {2},
{{async_start, {2}}, {async_update, {2}}});
}
TEST_F(TuplePointsToAnalysisTest, SendAndSendDone) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto send = builder.AddInstruction(
HloInstruction::CreateSend(constant, token, 0));
auto send_done = builder.AddInstruction(HloInstruction::CreateSendDone(send));
BuildModuleAndRunAnalysis(builder.Build());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(send).IsAmbiguous());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(send).IsDistinct());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(send_done).IsAmbiguous());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(send_done).IsDistinct());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(send).element({}), {send});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(send).element({0}), {constant});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(send_done).CreateFlattenedSet(),
{send_done});
ExpectHasBufferAliases(constant, {}, {{constant, {}}, {send, {0}}});
}
TEST_F(TuplePointsToAnalysisTest, RecvAndRecvDone) {
auto builder = HloComputation::Builder(TestName());
auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto recv = builder.AddInstruction(HloInstruction::CreateRecv(
ShapeUtil::MakeShape(F32, {1, 2, 3}), token, 0));
auto recv_done = builder.AddInstruction(HloInstruction::CreateRecvDone(recv));
BuildModuleAndRunAnalysis(builder.Build());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(recv).IsAmbiguous());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(recv).IsDistinct());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(recv_done).IsAmbiguous());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(recv_done).IsDistinct());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(recv).element({}), {recv});
ExpectHasBufferAliases(recv, {0}, {{recv, {0}}, {recv_done, {0}}});
}
TEST_F(TuplePointsToAnalysisTest, TupleWithBitcast) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto bitcast = builder.AddInstruction(
HloInstruction::CreateBitcast(constant2->shape(), constant2));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({constant1, bitcast}));
BuildModuleAndRunAnalysis(builder.Build());
EXPECT_EQ(1, points_to_analysis_->GetPointsToSet(bitcast).size());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(bitcast).element({}), {constant2});
EXPECT_TRUE(
points_to_analysis_->GetPointsToSet(bitcast).tuple_sources({}).empty());
EXPECT_EQ(3, points_to_analysis_->GetPointsToSet(tuple).size());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(tuple).IsAmbiguous());
EXPECT_THAT(points_to_analysis_->GetPointsToSet(tuple).tuple_sources({}),
UnorderedElementsAre(tuple));
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).CreateFlattenedSet(),
{constant1, constant2, tuple});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).element({}), {tuple});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).element({0}), {constant1});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).element({1}), {constant2});
}
TEST_F(TuplePointsToAnalysisTest, PointsToTupleConstantElements) {
auto builder = HloComputation::Builder(TestName());
Literal elements[] = {LiteralUtil::CreateR2<float>({{1.0}, {2.0}}),
LiteralUtil::CreateR1<float>({2.0, 42})};
auto tuple_constant = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::MakeTuple({&elements[0], &elements[1]})));
auto copy = builder.AddInstruction(HloInstruction::CreateUnary(
tuple_constant->shape(), HloOpcode::kCopy, tuple_constant));
BuildModuleAndRunAnalysis(builder.Build());
auto& points_to_set = points_to_analysis_->GetPointsToSet(copy);
ExpectHasBuffers(points_to_set.element({}), {GetBuffer(copy, {})});
ExpectHasBuffers(points_to_set.element({0}),
{GetBuffer(tuple_constant, {0})});
ExpectHasBuffers(points_to_set.element({1}),
{GetBuffer(tuple_constant, {1})});
}
TEST_F(TuplePointsToAnalysisTest, BufferAliases) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto inner_tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({inner_tuple, constant2}));
BuildModuleAndRunAnalysis(builder.Build());
ExpectHasBufferAliases(
constant1, {},
{{constant1, {}}, {inner_tuple, {0}}, {tuple, {0, 0}}});
ExpectHasBufferAliases(
constant2, {},
{{constant2, {}}, {inner_tuple, {1}}, {tuple, {0, 1}}, {tuple, {1}}});
ExpectHasBufferAliases(inner_tuple, {},
{{inner_tuple, {}}, {tuple, {0}}});
ExpectHasBufferAliases(tuple, {}, {{tuple, {}}});
}
TEST_F(TuplePointsToAnalysisTest, DISABLED_CustomCall) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
Shape data_shape = ShapeUtil::MakeShape(F32, {});
auto ccall = builder.AddInstruction(HloInstruction::CreateCustomCall(
ShapeUtil::MakeTupleShape({data_shape, data_shape}), {constant},
"TestOp"));
Cast<HloCustomCallInstruction>(ccall)->set_output_to_operand_aliasing(
{std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>{
ShapeIndex{1}, std::pair<int64_t, ShapeIndex>(0, {})}});
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape, ccall, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape, ccall, 1));
BuildModuleAndRunAnalysis(builder.Build());
ExpectHasBufferAliases(ccall, {0}, {{gte0, {}}, {ccall, {0}}});
ExpectHasBufferAliases(constant, {},
{{constant, {}}, {gte1, {}}, {ccall, {1}}});
}
class FusionPointsToAnalysisTest : public TuplePointsToAnalysisTest {
protected:
void Run(const std::string& hlo_str, int64_t expected_num_users) {
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_str));
auto* fusion = module_->entry_computation()->root_instruction();
auto* tuple_param0 = fusion->operand(0);
RunAnalysis();
auto* fusion_param = GetFusionParameterForOperand(fusion, tuple_param0);
ExpectHasBuffers(
points_to_analysis_->GetPointsToSet(fusion_param).element({}),
{GetBuffer(fusion_param, {})});
ExpectHasBuffers(
points_to_analysis_->GetPointsToSet(fusion_param).element({0}),
{GetBuffer(fusion_param, {0})});
ExpectHasBuffers(
points_to_analysis_->GetPointsToSet(fusion_param).element({1}),
{GetBuffer(fusion_param, {1})});
auto fused_gte0 = GetUniqueFusionParameterUserAt(fusion_param, 0);
ExpectHasBuffers(
points_to_analysis_->GetPointsToSet(fused_gte0).element({}),
{GetBuffer(fusion_param, {0})});
auto fused_gte1 = GetUniqueFusionParameterUserAt(fusion_param, 1);
ExpectHasBuffers(
points_to_analysis_->GetPointsToSet(fused_gte1).element({}),
{GetBuffer(fusion_param, {1})});
ExpectHasBufferAliases(fusion_param, {0},
{{fusion_param, {0}}, {fused_gte0, {}}});
ExpectHasBufferAliases(fusion_param, {1},
{{fusion_param, {1}}, {fused_gte1, {}}});
ExpectNumUsersOfAliases(fusion_param, {0}, expected_num_users);
}
HloInstruction* GetFusionParameterForOperand(HloInstruction* fusion,
const HloInstruction* operand) {
const auto& fused_instructions = fusion->fused_instructions();
auto it =
absl::c_find_if(fused_instructions, [&](const HloInstruction* fused) {
return fused->opcode() == HloOpcode::kParameter &&
fusion->operand(fused->parameter_number()) == operand;
});
CHECK(it != fusion->fused_instructions().end());
return *it;
}
std::vector<HloInstruction*> GetFusionParameterUsersAt(
HloInstruction* fusion_param, int64_t tuple_index) {
CHECK(fusion_param->shape().IsTuple());
std::vector<HloInstruction*> users_at_tuple_index;
for (auto user : fusion_param->users()) {
CHECK_EQ(HloOpcode::kGetTupleElement, user->opcode());
if (user->tuple_index() == tuple_index) {
users_at_tuple_index.push_back(user);
}
}
return users_at_tuple_index;
}
HloInstruction* GetUniqueFusionParameterUserAt(HloInstruction* fusion_param,
int64_t tuple_index) {
std::vector<HloInstruction*> users =
GetFusionParameterUsersAt(fusion_param, tuple_index);
CHECK_EQ(1, users.size());
return users[0];
}
void ExpectNumUsersOfAliases(const HloInstruction* instruction,
const ShapeIndex& index,
const int64_t expected_num_users) {
const auto* buffer = GetBuffer(instruction, index);
int64_t num_users = 0;
for (const auto& alias : points_to_analysis_->GetBufferAliases(*buffer)) {
for (auto user : alias.instruction()->users()) {
if (user->opcode() == HloOpcode::kGetTupleElement && !index.empty()) {
continue;
}
++num_users;
}
}
EXPECT_EQ(expected_num_users, num_users);
}
};
TEST_F(FusionPointsToAnalysisTest, FusionParam0OneUser) {
std::string hlo_str = R"(
HloModule FusionParam0OneUser
%fused_computation (param_1.2: (f32[8], f32[3])) -> f32[8] {
%param_1.2 = (f32[8]{0}, f32[3]{0}) parameter(0)
%get-tuple-element.1 = f32[8]{0} get-tuple-element((f32[8]{0}, f32[3]{0}) %param_1.2), index=0
%get-tuple-element.2 = f32[3]{0} get-tuple-element((f32[8]{0}, f32[3]{0}) %param_1.2), index=1
%constant.3 = f32[3]{0} constant({1, 1, 1})
%add.1 = f32[3]{0} add(f32[3]{0} %get-tuple-element.2, f32[3]{0} %constant.3)
%constant.2 = s32[] constant(0)
ROOT %dynamic-update-slice.1 = f32[8]{0} dynamic-update-slice(f32[8]{0} %get-tuple-element.1, f32[3]{0} %add.1, s32[] %constant.2)
}
ENTRY %FusionParam0OneUser (param0: (f32[8], f32[3])) -> f32[8] {
%param0 = (f32[8]{0}, f32[3]{0}) parameter(0)
ROOT %fusion = f32[8]{0} fusion((f32[8]{0}, f32[3]{0}) %param0), kind=kLoop, calls=%fused_computation
}
)";
Run(hlo_str, 1);
}
TEST_F(FusionPointsToAnalysisTest, FusionParam0TwoUsers) {
std::string hlo_str = R"(
HloModule FusionParam0TwoUsers
%fused_computation (param_1.2: (f32[8], f32[3])) -> f32[8] {
%param_1.2 = (f32[8]{0}, f32[3]{0}) parameter(0)
%get-tuple-element.1 = f32[8]{0} get-tuple-element((f32[8]{0}, f32[3]{0}) %param_1.2), index=0
%get-tuple-element.2 = f32[3]{0} get-tuple-element((f32[8]{0}, f32[3]{0}) %param_1.2), index=1
%constant.3 = f32[3]{0} constant({1, 1, 1})
%add.1 = f32[3]{0} add(f32[3]{0} %get-tuple-element.2, f32[3]{0} %constant.3)
%slice = f32[3]{0} slice(f32[8]{0} %get-tuple-element.1), slice={[0:3]}
%add.2 = f32[3]{0} add(f32[3]{0} %add.1, f32[3]{0} %slice)
%constant.2 = s32[] constant(0)
ROOT %dynamic-update-slice.1 = f32[8]{0} dynamic-update-slice(f32[8]{0} %get-tuple-element.1, f32[3]{0} %add.2, s32[] %constant.2)
}
ENTRY %FusionParam0TwoUsers (param0: (f32[8], f32[3])) -> f32[8] {
%param0 = (f32[8]{0}, f32[3]{0}) parameter(0)
ROOT %fusion = f32[8]{0} fusion((f32[8]{0}, f32[3]{0}) %param0), kind=kLoop, calls=%fused_computation
}
)";
Run(hlo_str, 2);
}
class PointsToAnalysisTestBase : public HloTestBase {
protected:
void BuildModule(std::unique_ptr<HloComputation> computation) {
module_ = CreateNewVerifiedModule();
computation_ = module_->AddEntryComputation(std::move(computation));
}
void RunAnalysis() {
CHECK_NOTNULL(module_.get());
points_to_analysis_ = TuplePointsToAnalysis::Run(module_.get()).value();
}
void BuildModuleAndRunAnalysis(std::unique_ptr<HloComputation> computation) {
BuildModule(std::move(computation));
RunAnalysis();
}
std::unique_ptr<HloModule> module_;
HloComputation* computation_ = nullptr;
std::unique_ptr<TuplePointsToAnalysis> points_to_analysis_;
};
class DoesNotUseOperandBufferTest : public PointsToAnalysisTestBase {};
TEST_F(DoesNotUseOperandBufferTest, GetTupleElement) {
auto builder = HloComputation::Builder(TestName());
Shape elem_shape = ShapeUtil::MakeShape(F32, {8});
auto tuple = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape({elem_shape, elem_shape}), "tuple"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(elem_shape, tuple, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(elem_shape, tuple, 1));
builder.AddInstruction(
HloInstruction::CreateBinary(elem_shape, HloOpcode::kAdd, gte0, gte1));
BuildModuleAndRunAnalysis(builder.Build());
EXPECT_TRUE(points_to_analysis_->DoesNotUseOperandBuffer(tuple, {0}, gte0));
EXPECT_TRUE(points_to_analysis_->DoesNotUseOperandBuffer(tuple, {1}, gte1));
EXPECT_FALSE(points_to_analysis_->DoesNotUseOperandBuffer(tuple, {}, gte0));
EXPECT_FALSE(points_to_analysis_->DoesNotUseOperandBuffer(tuple, {}, gte1));
}
TEST_F(DoesNotUseOperandBufferTest, FusedDynamicUpdateSlice) {
auto builder = HloComputation::Builder(TestName());
Shape data_shape = ShapeUtil::MakeShape(F32, {8});
auto tuple = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape({data_shape, data_shape}), "tuple"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape, tuple, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape, tuple, 1));
auto starts = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(2)));
auto update = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f})));
auto dynamic_update_slice =
builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
data_shape, gte1, update, {starts}));
builder.AddInstruction(
HloInstruction::CreateTuple({gte0, dynamic_update_slice}));
BuildModule(builder.Build());
auto fusion = computation_->CreateFusionInstruction(
{dynamic_update_slice, starts, update, gte1},
HloInstruction::FusionKind::kLoop);
RunAnalysis();
EXPECT_TRUE(points_to_analysis_->DoesNotUseOperandBuffer(tuple, {0}, fusion));
EXPECT_FALSE(
points_to_analysis_->DoesNotUseOperandBuffer(tuple, {1}, fusion));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/tuple_points_to_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/tuple_points_to_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c7d36866-411c-46ed-801c-d8994c2f600b | cpp | tensorflow/tensorflow | scatter_expander | third_party/xla/xla/service/gpu/transforms/scatter_expander.cc | third_party/xla/xla/service/scatter_expander_test.cc | #include "xla/service/gpu/transforms/scatter_expander.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
namespace xla {
bool GpuScatterExpander::InstructionMatchesPattern(HloInstruction* inst) {
return inst->opcode() == HloOpcode::kScatter &&
(inst->shape().IsTuple() ||
primitive_util::BitWidth(inst->shape().element_type()) > 64);
}
} | #include "xla/service/scatter_expander.h"
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
namespace xla {
namespace {
class ScatterExpanderTest : public HloTestBase {
protected:
void ClearInstructionLayout(HloModule* module, absl::string_view inst_name) {
HloInstruction* inst = FindInstruction(module, inst_name);
inst->mutable_shape()->clear_layout();
}
};
TEST_F(ScatterExpanderTest, ScatterOperandWithoutLayout) {
const char* kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
parameter0 = s32[] parameter(0)
ROOT parameter1 = s32[] parameter(1)
}
ENTRY kernel_entry {
operand = s32[5] iota(), iota_dimension=0
indices = s32[1] parameter(0)
update = s32[] constant(0)
ROOT scatter = s32[5]{0} scatter(operand, indices, update),
update_window_dims={}, inserted_window_dims={0},
scatter_dims_to_operand_dims={0}, index_vector_dim=0,
to_apply=scatter_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ClearInstructionLayout(module.get(), "operand");
ScatterExpander scatter_expander(ScatterExpander::kEliminateAllScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_TRUE(result);
}
TEST_F(ScatterExpanderTest, ScatterMultipleOperandsWithoutLayout) {
const char* kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
p0 = s32[] parameter(0)
p1 = f32[] parameter(1)
p2 = s32[] parameter(2)
p3 = f32[] parameter(3)
ROOT tuple = tuple(p2, p3)
}
ENTRY kernel_entry {
operand0 = s32[5] iota(), iota_dimension=0
operand1 = f32[5] constant({2,4,6,8,10})
indices = s32[1] parameter(0)
update0 = s32[] constant(0)
update1 = f32[] constant(1)
ROOT scatter = (s32[5]{0}, f32[5]{0}) scatter(operand0, operand1, indices, update0, update1),
update_window_dims={}, inserted_window_dims={0},
scatter_dims_to_operand_dims={0}, index_vector_dim=0,
to_apply=scatter_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ClearInstructionLayout(module.get(), "operand0");
ClearInstructionLayout(module.get(), "operand1");
ScatterExpander scatter_expander(ScatterExpander::kEliminateAllScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_TRUE(result);
}
TEST_F(ScatterExpanderTest, EliminateSimpleScattersSkipsNontrivialScatter) {
const char* kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
parameter0 = s32[] parameter(0)
ROOT parameter1 = s32[] parameter(1)
}
ENTRY kernel_entry {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[2,3] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=scatter_computation,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ClearInstructionLayout(module.get(), "operand");
ScatterExpander scatter_expander(ScatterExpander::kEliminateSimpleScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_FALSE(result);
}
TEST_F(ScatterExpanderTest,
EliminateSimpleMultioutpuScattersSkipsNontrivialScatter) {
const char* kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
p0 = s32[] parameter(0)
p1 = f32[] parameter(1)
p2 = s32[] parameter(2)
p3 = f32[] parameter(3)
ROOT tuple = tuple(p2, p3)
}
ENTRY kernel_entry {
operand0 = s32[3,3] parameter(0)
operand1 = bf16[3,3] parameter(1)
indices = s32[2] parameter(2)
update0 = s32[2,3] parameter(3)
update1 = bf16[2,3] parameter(4)
ROOT scatter = (s32[3,3], bf16[3,3]) scatter(operand0, operand1, indices, update0, update1),
to_apply=scatter_computation,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ClearInstructionLayout(module.get(), "operand0");
ClearInstructionLayout(module.get(), "operand1");
ScatterExpander scatter_expander(ScatterExpander::kEliminateSimpleScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_FALSE(result);
}
TEST_F(ScatterExpanderTest, EliminateSimpleScattersRewritesTrivialScatter) {
const char* kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
parameter0 = s32[] parameter(0)
ROOT parameter1 = s32[] parameter(1)
}
ENTRY kernel_entry {
operand = s32[5] iota(), iota_dimension=0
indices = s32[1] parameter(0)
update = s32[] constant(0)
ROOT scatter = s32[5]{0} scatter(operand, indices, update),
update_window_dims={}, inserted_window_dims={0},
scatter_dims_to_operand_dims={0}, index_vector_dim=0,
to_apply=scatter_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ClearInstructionLayout(module.get(), "operand");
ScatterExpander scatter_expander(ScatterExpander::kEliminateSimpleScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_TRUE(result);
}
TEST_F(ScatterExpanderTest,
EliminateSimpleMultioutputScattersRewritesTrivialScatter) {
const char* kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
p0 = s32[] parameter(0)
p1 = f32[] parameter(1)
p2 = s32[] parameter(2)
p3 = f32[] parameter(3)
ROOT tuple = tuple(p2, p3)
}
ENTRY kernel_entry {
operand0 = s32[5] iota(), iota_dimension=0
operand1 = f32[5] iota(), iota_dimension=0
indices = s32[1] parameter(0)
update0 = s32[] constant(0)
update1 = f32[] constant(0)
ROOT scatter = (s32[5]{0}, f32[5]{0}) scatter(operand0, operand1, indices, update0, update1),
update_window_dims={}, inserted_window_dims={0},
scatter_dims_to_operand_dims={0}, index_vector_dim=0,
to_apply=scatter_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ClearInstructionLayout(module.get(), "operand0");
ClearInstructionLayout(module.get(), "operand1");
ScatterExpander scatter_expander(ScatterExpander::kEliminateSimpleScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_TRUE(result);
}
TEST_F(ScatterExpanderTest, DoNotEliminateScatterWithAssociativeCombiner) {
const char* const kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
arg1.173 = s32[] parameter(1)
arg0.172 = s32[] parameter(0)
ROOT add.48 = s32[] add(arg0.172, arg1.173)
}
ENTRY fused_computation {
bitcast.2335 = s32[1,4096] parameter(0)
pad.96 = s32[4096,2] parameter(1)
bitcast.2748 = s32[4096,1,1] parameter(2)
ROOT scatter.48 = s32[1,4096] scatter(bitcast.2335, pad.96, bitcast.2748),
update_window_dims={1,2}, inserted_window_dims={},
scatter_dims_to_operand_dims={0,1}, index_vector_dim=1,
to_apply=scatter_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ScatterExpander scatter_expander(
ScatterExpander::kEliminateIndeterministicScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_FALSE(result);
}
TEST_F(ScatterExpanderTest, EliminateScatterWithNonAssociativeCombiner) {
const char* const kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
arg1.173 = f32[] parameter(1)
arg0.172 = f32[] parameter(0)
ROOT add.48 = f32[] add(arg0.172, arg1.173)
}
ENTRY fused_computation {
bitcast.2335 = f32[1,4096] parameter(0)
pad.96 = s32[4096,2] parameter(1)
bitcast.2748 = f32[4096,1,1] parameter(2)
ROOT scatter.48 = f32[1,4096] scatter(bitcast.2335, pad.96, bitcast.2748),
update_window_dims={1,2}, inserted_window_dims={},
scatter_dims_to_operand_dims={0,1}, index_vector_dim=1,
to_apply=scatter_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ScatterExpander scatter_expander(
ScatterExpander::kEliminateIndeterministicScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_TRUE(result);
}
TEST_F(ScatterExpanderTest, DoNotEliminateScatterWithAssociativeFp32Combiner) {
const char* const kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
arg1.173 = f32[] parameter(1)
arg0.172 = f32[] parameter(0)
ROOT max.48 = f32[] maximum(arg0.172, arg1.173)
}
ENTRY fused_computation {
bitcast.2335 = f32[1,4096] parameter(0)
pad.96 = s32[4096,2] parameter(1)
bitcast.2748 = f32[4096,1,1] parameter(2)
ROOT scatter.48 = f32[1,4096] scatter(bitcast.2335, pad.96, bitcast.2748),
update_window_dims={1,2}, inserted_window_dims={},
scatter_dims_to_operand_dims={0,1}, index_vector_dim=1,
to_apply=scatter_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ScatterExpander scatter_expander(
ScatterExpander::kEliminateIndeterministicScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_FALSE(result);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/scatter_expander.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/scatter_expander_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
931a8bc3-ba94-4243-bc39-9a972cf91eb2 | cpp | tensorflow/tensorflow | dynamic_dimension_inference | third_party/xla/xla/service/dynamic_dimension_inference.cc | third_party/xla/xla/service/dynamic_dimension_inference_test.cc | #include "xla/service/dynamic_dimension_inference.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/dynamic_parameter_binding.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/call_inliner.h"
#include "xla/service/dynamic_window_utils.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/service/tuple_util.h"
#include "xla/service/while_util.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::StatusOr<std::pair<HloComputation*, CallInliner::InlinedInstructionMap>>
WidenComputation(HloComputation* narrow_comp, const Shape& wide_shape) {
TF_RET_CHECK(wide_shape.IsTuple());
const Shape& narrow_shape = narrow_comp->parameter_instruction(0)->shape();
if (Shape::Equal()(wide_shape, narrow_shape)) {
return std::make_pair(narrow_comp, CallInliner::InlinedInstructionMap());
}
HloComputation* wide_comp = [&]() {
HloComputation::Builder builder(absl::StrCat("wide.", narrow_comp->name()));
builder.AddInstruction(HloInstruction::CreateParameter(
0, wide_shape,
absl::StrCat("wide.", narrow_comp->parameter_instruction(0)->name())));
return narrow_comp->parent()->AddEmbeddedComputation(builder.Build());
}();
HloInstruction* wide_parameter = wide_comp->parameter_instruction(0);
HloInstruction* truncated_parameter = TupleUtil::ExtractPrefix(
wide_parameter, narrow_shape.tuple_shapes_size(),
absl::StrCat("renarrowed.",
narrow_comp->parameter_instruction(0)->name()));
HloInstruction* call_narrow_comp = wide_comp->AddInstruction(
HloInstruction::CreateCall(narrow_comp->root_instruction()->shape(),
{truncated_parameter}, narrow_comp));
wide_comp->set_root_instruction(call_narrow_comp,
true);
TF_ASSIGN_OR_RETURN(auto inline_map, CallInliner::Inline(call_narrow_comp));
return std::make_pair(wide_comp, std::move(inline_map));
}
}
class DynamicDimensionInferenceVisitor : public DfsHloRewriteVisitor {
public:
explicit DynamicDimensionInferenceVisitor(
const DynamicParameterBinding& param_bindings,
HloDataflowAnalysis& dataflow_analysis, DynamicDimensionInference* parent,
DynamicDimensionInference::CustomCallInferenceHandler custom_call_handler,
DynamicDimensionInference::ShapeCheckMode shape_check_mode,
DynamicDimensionInference::AssertionGenerator assertion_generator)
: param_bindings_(param_bindings),
dataflow_analysis_(dataflow_analysis),
parent_(parent),
custom_call_handler_(std::move(custom_call_handler)),
shape_check_mode_(shape_check_mode),
assertion_generator_(assertion_generator) {}
absl::Status DefaultAction(HloInstruction* hlo) override;
static absl::StatusOr<bool> Run(
HloComputation* computation, HloDataflowAnalysis& dataflow_analysis,
const DynamicParameterBinding& param_bindings,
DynamicDimensionInference* parent,
DynamicDimensionInference::CustomCallInferenceHandler
custom_call_handler = nullptr,
DynamicDimensionInference::ShapeCheckMode shape_check_mode =
DynamicDimensionInference::ShapeCheckMode::kIgnore,
const DynamicDimensionInference::AssertionGenerator& assertion_generator =
nullptr) {
if (!HloInstruction::IsThreadIncluded(computation->execution_thread(),
parent->execution_threads_)) {
return false;
}
DynamicDimensionInferenceVisitor visitor(
param_bindings, dataflow_analysis, parent,
std::move(custom_call_handler), shape_check_mode, assertion_generator);
TF_RETURN_IF_ERROR(computation->Accept(&visitor));
if (visitor.shape_assertion_ != nullptr) {
CHECK(assertion_generator);
assertion_generator(visitor.shape_assertion_);
}
return visitor.changed();
}
absl::Status HandleParameter(HloInstruction* hlo) override;
absl::Status HandleInfeed(HloInstruction* hlo) override;
absl::Status HandleConstant(HloInstruction* hlo) override;
absl::Status HandleReduce(HloInstruction* hlo) override;
absl::Status HandleDot(HloInstruction* hlo) override;
absl::Status HandleTuple(HloInstruction* hlo) override;
absl::Status HandleTranspose(HloInstruction* hlo) override;
absl::Status HandleDynamicReshape(HloInstruction* hlo) override;
absl::Status HandleReshape(HloInstruction* hlo) override;
absl::Status HandleSort(HloInstruction* hlo) override;
absl::Status HandlePad(HloInstruction* hlo) override;
absl::Status HandleCustomCall(HloInstruction* hlo) override;
absl::Status HandleBroadcast(HloInstruction* hlo) override;
absl::Status HandleGetDimensionSize(HloInstruction* hlo) override;
absl::Status HandleSetDimensionSize(HloInstruction* hlo) override;
absl::Status HandleSelect(HloInstruction* hlo) override;
absl::Status HandleConvolution(HloInstruction* hlo) override;
absl::Status HandleConcatenate(HloInstruction* hlo) override;
absl::Status HandleReduceWindow(HloInstruction* hlo) override;
absl::Status HandleReverse(HloInstruction* hlo) override;
absl::Status HandleSelectAndScatter(HloInstruction* hlo) override;
absl::Status HandleGetTupleElement(HloInstruction* hlo) override;
absl::Status HandleElementwiseUnary(HloInstruction* hlo) override;
absl::Status HandleElementwiseNary(HloInstruction* hlo);
absl::Status HandleElementwiseBinary(HloInstruction* hlo) override;
absl::Status HandleClamp(HloInstruction* hlo) override;
absl::Status HandleConditional(HloInstruction* hlo) override;
absl::Status HandleWhile(HloInstruction* hlo) override;
absl::Status HandleSlice(HloInstruction* hlo) override;
absl::Status HandleDynamicSlice(HloInstruction* hlo) override;
absl::Status HandleDynamicUpdateSlice(HloInstruction* hlo) override;
absl::Status HandleGather(HloInstruction* hlo) override;
absl::Status HandleScatter(HloInstruction* hlo) override;
absl::Status HandleMap(HloInstruction* hlo) override;
absl::Status HandleDomain(HloInstruction* hlo) override;
absl::Status HandleAsyncStart(HloInstruction* hlo) override;
absl::Status HandleAsyncDone(HloInstruction* hlo) override;
private:
using OperandDynamicDimensionFn = absl::FunctionRef<absl::Status(
HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size)>;
using DynamicDimensionFn = std::function<absl::Status(
ShapeIndex index, int64_t dimension, HloInstruction* dynamic_size)>;
void SetDynamicSize(HloInstruction* inst, const ShapeIndex& index,
int64_t dim, HloInstruction* size,
bool clear_dynamic_dimension = true);
void SetDynamicSizes(HloInstruction* inst, const ShapeIndex& index,
absl::Span<HloInstruction* const> sizes);
absl::Status HandleDynamicConvolutionForward(HloInstruction* hlo,
int64_t operand_index,
int64_t dimension,
HloInstruction* dynamic_size);
absl::Status HandleDynamicConvolutionKernelGrad(HloInstruction* hlo,
int64_t operand_index,
int64_t dimension);
absl::Status HandleDynamicConvolutionInputGrad(HloInstruction* hlo,
int64_t operand_index,
int64_t dimension);
absl::Status HandleDynamicWindowSamePadding(HloInstruction* hlo,
HloInstruction* dynamic_size,
int64_t operand_index,
int64_t dimension);
absl::Status ForEachOperandDynamicDimension(HloInstruction* inst,
OperandDynamicDimensionFn);
absl::Status ForEachDynamicDimensionInOperand(HloInstruction* inst,
int64_t operand_index,
OperandDynamicDimensionFn);
absl::Status ForEachDynamicDimension(HloInstruction* inst,
const DynamicDimensionFn& fn);
bool CanInfer(HloInstruction* hlo) { return parent_->CanInfer(hlo); }
absl::StatusOr<bool> RequiresPadToStatic(HloInstruction* instr,
ShapeIndex shape_index);
absl::Status InsertPadToStaticOnInstruction(HloInstruction* inst);
absl::Status InsertShapeCheck(HloInstruction* dim1, HloInstruction* dim2,
bool support_implicit_broadcast);
absl::Status PassThroughDynamicDimension(HloInstruction*);
const DynamicParameterBinding& param_bindings_;
HloDataflowAnalysis& dataflow_analysis_;
DynamicDimensionInference* parent_;
DynamicDimensionInference::CustomCallInferenceHandler custom_call_handler_;
DynamicDimensionInference::ShapeCheckMode shape_check_mode_;
HloInstruction* shape_assertion_ = nullptr;
DynamicDimensionInference::AssertionGenerator assertion_generator_;
};
void DynamicDimensionInferenceVisitor::SetDynamicSize(
HloInstruction* inst, const ShapeIndex& index, int64_t dim,
HloInstruction* size, bool clear_dynamic_dimension) {
parent_->SetDynamicSize(inst, index, dim, size);
if (clear_dynamic_dimension) {
ShapeUtil::GetMutableSubshape(inst->mutable_shape(), index)
->set_dynamic_dimension(dim, false);
}
MarkAsChanged();
}
void DynamicDimensionInferenceVisitor::SetDynamicSizes(
HloInstruction* inst, const ShapeIndex& index,
absl::Span<HloInstruction* const> sizes) {
const Shape& subshape = ShapeUtil::GetSubshape(inst->shape(), index);
CHECK(subshape.IsArray() && subshape.rank() == sizes.size());
for (int64_t dimension = 0; dimension < subshape.rank(); ++dimension) {
if (sizes[dimension] != nullptr) {
SetDynamicSize(inst, index, dimension, sizes[dimension]);
}
}
}
absl::Status DynamicDimensionInferenceVisitor::DefaultAction(
HloInstruction* hlo) {
return ForEachOperandDynamicDimension(
hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
return UnimplementedStrCat(
"Asked to propagate a dynamic dimension from hlo ", operand->name(),
"@", index.ToString(), "@", dimension, " to hlo ", hlo->ToString(),
", which is not implemented.");
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleGetTupleElement(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {
if (hlo->tuple_index() != index[0]) {
return absl::OkStatus();
}
ShapeIndex new_index(ShapeIndexView(index).subspan(1));
SetDynamicSize(hlo, new_index, dimension, dynamic_size);
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleTuple(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo, [&](HloInstruction*, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
index.push_front(operand_index);
SetDynamicSize(hlo, index, dimension, dynamic_size);
return absl::OkStatus();
}));
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleBroadcast(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
int64_t broadcast_dim = hlo->dimensions(dimension);
SetDynamicSize(hlo, {}, broadcast_dim, dynamic_size);
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleConstant(
HloInstruction* hlo) {
if (!hlo->shape().is_dynamic()) {
return absl::OkStatus();
}
auto* constant = Cast<HloConstantInstruction>(hlo);
ShapeTree<bool> do_pad(constant->shape(), false);
Shape padded_shape = constant->shape();
bool pad_any = false;
TF_RETURN_IF_ERROR(ShapeUtil::ForEachMutableSubshapeWithStatus(
&padded_shape,
[&](Shape* subshape, const ShapeIndex& index) -> absl::Status {
if (!subshape->IsArray()) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(bool requires_pad, RequiresPadToStatic(hlo, index));
if (requires_pad) {
pad_any = *do_pad.mutable_element(index) = true;
*subshape = ShapeUtil::MakeStaticShape(*subshape);
}
return absl::OkStatus();
}));
if (!pad_any) {
return absl::OkStatus();
}
Literal padded_literal(padded_shape);
do_pad.ForEachElement([&](const ShapeIndex& index, bool requires_pad) {
const Shape& subshape = ShapeUtil::GetSubshape(padded_shape, index);
if (!subshape.IsArray()) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(padded_literal.CopyFrom(constant->literal(), index,
index,
true));
if (!requires_pad) {
for (int64_t dimension = 0; dimension < subshape.rank(); ++dimension) {
if (subshape.is_dynamic_dimension(dimension)) {
padded_literal.SetDynamicSize(
dimension, index,
constant->literal().GetDynamicSize(dimension, index));
}
}
}
return absl::OkStatus();
});
auto* padded_constant = hlo->AddInstruction(
HloInstruction::CreateConstant(std::move(padded_literal)));
TF_RETURN_IF_ERROR(constant->ReplaceAllUsesWith(padded_constant));
SetVisited(*padded_constant);
TF_RETURN_IF_ERROR(do_pad.ForEachElementWithStatus(
[&](const ShapeIndex& index, bool requires_pad) -> absl::Status {
if (!requires_pad) {
return absl::OkStatus();
}
const Shape& subshape =
ShapeUtil::GetSubshape(constant->shape(), index);
TF_RET_CHECK(subshape.IsArray());
for (int64_t dimension = 0; dimension < subshape.rank(); ++dimension) {
if (!subshape.is_dynamic_dimension(dimension)) {
continue;
}
HloInstruction* dynamic_size = hlo->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
constant->literal().GetDynamicSize(dimension, index))));
SetVisited(*dynamic_size);
SetDynamicSize(padded_constant, index, dimension, dynamic_size);
}
return absl::OkStatus();
}));
MarkAsChanged();
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleCustomCall(
HloInstruction* hlo) {
if (hlo->custom_call_target() == "PadToStatic") {
for (int64_t i = 0; i < hlo->operand(0)->shape().rank(); ++i) {
if (hlo->operand(0)->shape().is_dynamic_dimension(i)) {
HloInstruction* dynamic_size =
hlo->parent()->AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::MakeScalarShape(S32), hlo, i + 1));
ShapeIndex data_output = {0};
SetDynamicSize(hlo, data_output, i, dynamic_size);
}
}
return absl::OkStatus();
}
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
if (custom_call_handler_) {
TF_RETURN_IF_ERROR(custom_call_handler_(hlo, parent_));
} else {
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index,
HloInstruction* dynamic_size) -> absl::Status {
if (hlo->custom_call_target() == "SliceToDynamic" ||
hlo->custom_call_target() == "Sharding" ||
(absl::StartsWith(hlo->custom_call_target(), "Resize") &&
(dimension == 0 || dimension == 3))) {
SetDynamicSize(hlo, {}, dimension, dynamic_size);
return absl::OkStatus();
}
if (hlo->custom_call_target() == "DynamicReduceWindowSamePadding") {
if (hlo->operand_count() > 2) {
return Unimplemented(
"DynamicReduceWindowSamePadding doesn't support variadic "
"reduce window %s",
hlo->ToString());
}
return HandleDynamicWindowSamePadding(hlo, dynamic_size,
operand_index, dimension);
}
if (hlo->custom_call_target() ==
"DynamicSelectAndScatterSamePadding") {
if (operand_index == 1) {
return absl::OkStatus();
}
SetDynamicSize(hlo, {}, dimension, dynamic_size);
return absl::OkStatus();
}
if (hlo->custom_call_target() == "DynamicConvolutionInputGrad") {
return HandleDynamicConvolutionInputGrad(hlo, operand_index,
dimension);
}
if (hlo->custom_call_target() == "DynamicConvolutionKernelGrad") {
return HandleDynamicConvolutionKernelGrad(hlo, operand_index,
dimension);
}
if (hlo->custom_call_target() == "DynamicConvolutionForward") {
return HandleDynamicConvolutionForward(hlo, operand_index,
dimension, dynamic_size);
}
return Unimplemented(
"CustomCall \"%s\" is not supported to have a dynamic dimension",
hlo->custom_call_target());
}));
}
return InsertPadToStaticOnInstruction(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::HandleSort(HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dynamic_dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
HloSortInstruction* sort = Cast<HloSortInstruction>(hlo);
if (sort->values_count() == 0) {
SetDynamicSize(hlo, {}, dynamic_dimension, dynamic_size);
} else {
SetDynamicSize(hlo, {operand_index}, dynamic_dimension, dynamic_size);
}
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandlePad(HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {
if (operand_index != 0) {
return Unimplemented(
"Dynamic dimension on padding value is not supported");
}
const PaddingConfig_PaddingConfigDimension& padding_config =
hlo->padding_config().dimensions(dimension);
HloInstruction* dynamic_size_adjusted = dynamic_size;
if (padding_config.interior_padding() != 0) {
HloInstruction* one =
hlo->parent()->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(1)));
HloInstruction* zero =
hlo->parent()->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(0)));
HloInstruction* interior_padding = hlo->parent()->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
padding_config.interior_padding())));
dynamic_size_adjusted =
hlo->parent()->AddInstruction(HloInstruction::CreateBinary(
dynamic_size_adjusted->shape(), HloOpcode::kSubtract,
dynamic_size_adjusted, one));
dynamic_size_adjusted =
hlo->parent()->AddInstruction(HloInstruction::CreateBinary(
dynamic_size_adjusted->shape(), HloOpcode::kMaximum,
dynamic_size_adjusted, zero));
dynamic_size_adjusted =
hlo->parent()->AddInstruction(HloInstruction::CreateBinary(
dynamic_size_adjusted->shape(), HloOpcode::kMultiply,
dynamic_size_adjusted, interior_padding));
dynamic_size_adjusted =
hlo->parent()->AddInstruction(HloInstruction::CreateBinary(
dynamic_size_adjusted->shape(), HloOpcode::kAdd,
dynamic_size_adjusted, dynamic_size));
}
HloInstruction* adjustment = hlo->parent()->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
padding_config.edge_padding_low() +
padding_config.edge_padding_high())));
dynamic_size_adjusted =
hlo->parent()->AddInstruction(HloInstruction::CreateBinary(
dynamic_size_adjusted->shape(), HloOpcode::kAdd,
dynamic_size_adjusted, adjustment));
SetDynamicSize(hlo, {}, dimension, dynamic_size_adjusted);
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleReduce(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
auto* reduce = Cast<HloReduceInstruction>(hlo);
int64_t rank = -1;
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
reduce->shape(),
[&](const Shape& subshape, const ShapeIndex& index) -> absl::Status {
if (!subshape.IsArray()) {
return absl::OkStatus();
}
if (rank < 0) {
rank = subshape.rank();
} else {
TF_RET_CHECK(rank == subshape.rank());
}
return absl::OkStatus();
}));
TF_RET_CHECK(rank >= 0);
absl::InlinedVector<HloInstruction*, 4> dynamic_sizes(rank, nullptr);
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
int64_t operand_count = reduce->operand_count();
CHECK_EQ(operand_count % 2, 0);
if (operand_index >= reduce->input_count()) {
return absl::OkStatus();
}
if (absl::c_count(reduce->dimensions(), dimension) != 0) {
return absl::OkStatus();
}
int64_t dimensions_not_reduced_count = 0;
for (int64_t i = 0; i < operand->shape().rank(); ++i) {
if (dimension == i) {
dynamic_sizes[dimensions_not_reduced_count] = dynamic_size;
return absl::OkStatus();
}
if (!absl::c_linear_search(reduce->dimensions(), i)) {
dimensions_not_reduced_count++;
}
}
return absl::OkStatus();
}));
ShapeUtil::ForEachSubshape(
reduce->shape(), [&](const Shape& subshape, ShapeIndex shape_index) {
if (!subshape.IsArray()) {
return;
}
SetDynamicSizes(reduce, shape_index, dynamic_sizes);
});
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleDot(HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
absl::InlinedVector<HloInstruction*, 4> dynamic_sizes(hlo->shape().rank(),
nullptr);
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex operand_shape_index,
int64_t operand_dimension, int64_t operand_index,
HloInstruction* dynamic_size) -> absl::Status {
HloInstruction* dot = hlo;
const DotDimensionNumbers& dimension_numbers =
dot->dot_dimension_numbers();
absl::flat_hash_map<int64_t, int64_t> result_dim_mapping;
int64_t current_result_dims = 0;
bool lhs = operand_index == 0;
if (lhs) {
for (int64_t i : dimension_numbers.lhs_batch_dimensions()) {
result_dim_mapping[i] = current_result_dims++;
}
} else {
for (int64_t i : dimension_numbers.rhs_batch_dimensions()) {
result_dim_mapping[i] = current_result_dims++;
}
}
for (int64_t i = 0; i < dot->operand(0)->shape().rank(); i++) {
if (absl::c_linear_search(
dimension_numbers.lhs_contracting_dimensions(), i)) {
continue;
}
if (absl::c_linear_search(dimension_numbers.lhs_batch_dimensions(),
i)) {
continue;
}
if (lhs) {
result_dim_mapping[i] = current_result_dims;
}
current_result_dims++;
}
for (int64_t i = 0; i < dot->operand(1)->shape().rank(); i++) {
if (absl::c_linear_search(
dimension_numbers.rhs_contracting_dimensions(), i)) {
continue;
}
if (absl::c_linear_search(dimension_numbers.rhs_batch_dimensions(),
i)) {
continue;
}
if (!lhs) {
result_dim_mapping[i] = current_result_dims;
}
current_result_dims++;
}
auto iter = result_dim_mapping.find(operand_dimension);
if (iter != result_dim_mapping.end()) {
dynamic_sizes[iter->second] = dynamic_size;
}
return absl::OkStatus();
}));
SetDynamicSizes(hlo, {}, dynamic_sizes);
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleTranspose(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {
int64_t permuted_dim = -1;
for (int64_t i = 0; i < hlo->dimensions().size(); ++i) {
if (hlo->dimensions()[i] == dimension) {
TF_RET_CHECK(permuted_dim == -1);
permuted_dim = i;
}
}
SetDynamicSize(hlo, {}, permuted_dim, dynamic_size);
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleConvolution(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {
HloInstruction* conv = hlo;
const ConvolutionDimensionNumbers& dimension_numbers =
conv->convolution_dimension_numbers();
if (operand_index == 0) {
if (dimension == dimension_numbers.input_batch_dimension()) {
SetDynamicSize(conv, {}, dimension_numbers.output_batch_dimension(),
dynamic_size);
return absl::OkStatus();
}
if (dimension == dimension_numbers.input_feature_dimension()) {
return absl::OkStatus();
}
} else {
if (dimension == dimension_numbers.kernel_input_feature_dimension()) {
return absl::OkStatus();
}
}
return Unimplemented("Dynamic Spatial Convolution is not supported: %s",
conv->ToString());
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleConcatenate(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
int64_t static_size = 0;
std::vector<HloInstruction*> dynamic_concat_dims;
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
HloInstruction* concat_dim_size = nullptr;
for (int64_t dimension = 0; dimension < hlo->operand(i)->shape().rank();
++dimension) {
if (dimension == hlo->concatenate_dimension()) {
HloInstruction* dynamic_size =
parent_->GetDynamicSize(hlo->mutable_operand(i), {}, dimension);
concat_dim_size = dynamic_size;
}
}
if (concat_dim_size == nullptr) {
static_size +=
hlo->operand(i)->shape().dimensions(hlo->concatenate_dimension());
} else {
dynamic_concat_dims.push_back(concat_dim_size);
}
}
std::vector<HloInstruction*> dynamic_sizes(hlo->shape().rank(), nullptr);
if (!dynamic_concat_dims.empty()) {
HloInstruction* dim_size_total =
hlo->parent()->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(static_size)));
for (HloInstruction* dynamic_dim : dynamic_concat_dims) {
dim_size_total = hlo->parent()->AddInstruction(
HloInstruction::CreateBinary(dim_size_total->shape(), HloOpcode::kAdd,
dim_size_total, dynamic_dim));
}
dynamic_sizes[hlo->concatenate_dimension()] = dim_size_total;
}
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {
TF_RET_CHECK(index.empty());
int64_t concatenate_dimension = hlo->concatenate_dimension();
if (concatenate_dimension == dimension) {
return absl::OkStatus();
}
dynamic_sizes[dimension] = dynamic_size;
return absl::OkStatus();
}));
SetDynamicSizes(hlo, {}, dynamic_sizes);
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleGetDimensionSize(
HloInstruction* gds) {
int64_t dim = gds->dimension();
TF_RET_CHECK(dim < gds->operand(0)->shape().rank()) << gds->ToString();
HloInstruction* operand = gds->mutable_operand(0);
TF_RET_CHECK(dim < operand->shape().rank());
HloInstruction* replacement = parent_->GetDynamicSize(operand, {}, dim);
HloComputation* computation = gds->parent();
if (replacement == nullptr &&
!gds->operand(0)->shape().is_dynamic_dimension(dim)) {
TF_RET_CHECK(dim < gds->operand(0)->shape().rank());
int32_t size = gds->operand(0)->shape().dimensions(dim);
replacement = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(size)),
gds->name());
}
if (replacement != nullptr) {
TF_RETURN_IF_ERROR(gds->ReplaceAllUsesWith(replacement));
parent_->ReplaceAllDynamicDimensionUsesWith(gds, replacement);
MarkAsChanged();
}
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleSetDimensionSize(
HloInstruction* hlo) {
bool dimension_is_static = false;
const HloInstruction* size = hlo->operand(1);
if (size->opcode() == HloOpcode::kConstant) {
TF_RET_CHECK(size->shape().rank() == 0);
if (size->literal().Get<int32_t>({}) ==
hlo->shape().dimensions(hlo->dimension()) &&
!hlo->shape().is_dynamic_dimension(hlo->dimension())) {
dimension_is_static = true;
}
}
if (!dimension_is_static) {
SetDynamicSize(hlo, {}, hlo->dimension(), hlo->mutable_operand(1),
false);
}
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {
TF_RET_CHECK(operand_index == 0);
if (dimension != hlo->dimension()) {
SetDynamicSize(hlo, index, dimension, dynamic_size,
false);
}
return absl::OkStatus();
}));
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleDynamicConvolutionForward(
HloInstruction* hlo, int64_t operand_index, int64_t dimension,
HloInstruction* dynamic_size) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
TF_RET_CHECK(operand_index == 0);
const ConvolutionDimensionNumbers& dimension_numbers =
hlo->convolution_dimension_numbers();
if (dimension == dimension_numbers.input_batch_dimension()) {
SetDynamicSize(hlo, {}, dimension_numbers.output_batch_dimension(),
dynamic_size);
return absl::OkStatus();
}
for (int64_t spatial_dim_index = 0;
spatial_dim_index < dimension_numbers.input_spatial_dimensions_size();
++spatial_dim_index) {
int64_t input_spatial_dim =
dimension_numbers.input_spatial_dimensions(spatial_dim_index);
int64_t output_spatial_dim =
dimension_numbers.output_spatial_dimensions(spatial_dim_index);
if (dimension == input_spatial_dim) {
WindowDimension window_dim = hlo->window().dimensions(spatial_dim_index);
DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize(
dynamic_size, window_dim.size(), window_dim.window_dilation(),
window_dim.stride(), hlo->padding_type());
TF_RET_CHECK(window_dim.base_dilation() == 1);
SetDynamicSize(hlo, {}, output_spatial_dim,
dynamic_window_dims.output_size);
return absl::OkStatus();
}
}
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleDynamicWindowSamePadding(
HloInstruction* hlo, HloInstruction* dynamic_size, int64_t operand_index,
int64_t dimension) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
const Window& window = hlo->window();
const WindowDimension& window_dim = window.dimensions(dimension);
if (!window_util::IsTrivialWindowDimension(window_dim)) {
DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize(
dynamic_size, window_dim.size(), window_dim.window_dilation(),
window_dim.stride(), PaddingType::PADDING_SAME);
SetDynamicSize(hlo, {}, dimension, dynamic_window_dims.output_size);
} else {
SetDynamicSize(hlo, {}, dimension, dynamic_size);
}
return absl::OkStatus();
}
absl::Status
DynamicDimensionInferenceVisitor::HandleDynamicConvolutionInputGrad(
HloInstruction* hlo, int64_t operand_index, int64_t dimension) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
HloInstruction* input_sizes = hlo->mutable_operand(0);
HloComputation* comp = hlo->parent();
TF_RET_CHECK(input_sizes->shape().rank() == 1) << hlo->ToString();
TF_RET_CHECK(input_sizes->shape().element_type() == S32) << hlo->ToString();
TF_RET_CHECK(input_sizes->shape().dimensions(0) ==
hlo->shape().dimensions_size())
<< hlo->ToString();
HloInstruction* slice = comp->AddInstruction(
HloInstruction::CreateSlice(ShapeUtil::MakeShape(S32, {1}), input_sizes,
{dimension}, {dimension + 1}, {1}));
HloInstruction* reshape = comp->AddInstruction(
HloInstruction::CreateReshape(ShapeUtil::MakeScalarShape(S32), slice));
SetDynamicSize(hlo, {}, dimension, reshape);
return absl::OkStatus();
}
absl::Status
DynamicDimensionInferenceVisitor::HandleDynamicConvolutionKernelGrad(
HloInstruction* hlo, int64_t operand_index, int64_t dimension) {
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::PassThroughDynamicDimension(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
ShapeTree<absl::InlinedVector<HloInstruction*, 2>> dynamic_sizes(
hlo->shape());
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
const Shape& subshape = ShapeUtil::GetSubshape(hlo->shape(), index);
auto* element = dynamic_sizes.mutable_element(index);
element->resize(subshape.rank(), nullptr);
(*element)[dimension] = dynamic_size;
return absl::OkStatus();
}));
dynamic_sizes.ForEachElement([&](const ShapeIndex& index, const auto& sizes) {
if (sizes.empty()) {
return;
}
SetDynamicSizes(hlo, index, sizes);
});
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleDomain(
HloInstruction* hlo) {
return PassThroughDynamicDimension(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::HandleAsyncStart(
HloInstruction* hlo) {
if (!HloInstruction::IsThreadIncluded(hlo->async_execution_thread(),
parent_->execution_threads_)) {
return absl::OkStatus();
}
return DefaultAction(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::HandleAsyncDone(
HloInstruction* hlo) {
if (!HloInstruction::IsThreadIncluded(hlo->async_execution_thread(),
parent_->execution_threads_)) {
return InsertPadToStaticOnInstruction(hlo);
}
return DefaultAction(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::HandleElementwiseUnary(
HloInstruction* hlo) {
return PassThroughDynamicDimension(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::HandleSelect(
HloInstruction* hlo) {
return HandleElementwiseNary(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::HandleElementwiseNary(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
HloComputation* comp = hlo->parent();
absl::InlinedVector<absl::InlinedVector<HloInstruction*, 2>, 2> operand_sizes(
hlo->shape().rank(),
absl::InlinedVector<HloInstruction*, 2>(hlo->operand_count(), nullptr));
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {
TF_RET_CHECK(index.empty());
operand_sizes[dimension][operand_index] = dynamic_size;
return absl::OkStatus();
}));
absl::InlinedVector<HloInstruction*, 2> existing_sizes(hlo->shape().rank(),
nullptr);
for (int operand_index = 0; operand_index < hlo->operand_count();
++operand_index) {
for (int64_t dimension = 0; dimension < hlo->shape().rank(); ++dimension) {
HloInstruction* dynamic_size = operand_sizes[dimension][operand_index];
if (dynamic_size == nullptr) {
continue;
}
HloInstruction* existing_size = existing_sizes[dimension];
if (existing_size == nullptr) {
existing_sizes[dimension] = dynamic_size;
} else if (existing_sizes[dimension] != dynamic_size) {
TF_RETURN_IF_ERROR(
InsertShapeCheck(existing_size, dynamic_size,
true));
auto one = comp->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::One(S32)));
auto operand_needs_broadcast =
comp->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), dynamic_size, existing_size,
ComparisonDirection::kLt));
auto is_one = comp->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), dynamic_size, one,
ComparisonDirection::kEq));
operand_needs_broadcast =
comp->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(PRED, {}), HloOpcode::kAnd, is_one,
operand_needs_broadcast));
auto existing_needs_broadcast =
comp->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), existing_size, dynamic_size,
ComparisonDirection::kLt));
is_one = comp->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), existing_size, one,
ComparisonDirection::kEq));
existing_needs_broadcast =
comp->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(PRED, {}), HloOpcode::kAnd, is_one,
existing_needs_broadcast));
auto needs_broadcast =
comp->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(PRED, {}), HloOpcode::kOr,
operand_needs_broadcast, existing_needs_broadcast));
auto max_size = comp->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeScalarShape(S32), HloOpcode::kMaximum, dynamic_size,
existing_size));
auto min_size = comp->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeScalarShape(S32), HloOpcode::kMinimum, dynamic_size,
existing_size));
auto select_size = comp->AddInstruction(HloInstruction::CreateTernary(
ShapeUtil::MakeScalarShape(S32), HloOpcode::kSelect,
needs_broadcast, max_size, min_size));
existing_sizes[dimension] = select_size;
}
}
}
SetDynamicSizes(hlo, {}, existing_sizes);
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleElementwiseBinary(
HloInstruction* hlo) {
return HandleElementwiseNary(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::HandleClamp(
HloInstruction* hlo) {
return PassThroughDynamicDimension(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::HandleDynamicReshape(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
HloDynamicReshapeInstruction* dynamic_reshape =
Cast<HloDynamicReshapeInstruction>(hlo);
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
if (hlo->shape().is_dynamic_dimension(i)) {
SetDynamicSize(hlo, {}, i, dynamic_reshape->dim_sizes(i));
}
}
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleReshape(
HloInstruction* const hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
VLOG(2) << "Handle reshape: " << hlo->ToString() << "\n";
absl::InlinedVector<HloInstruction*, 2> dynamic_sizes(hlo->shape().rank(),
nullptr);
using ReshapeGroup = std::pair<int64_t, int64_t>;
using ReshapeGroupPair = std::pair<ReshapeGroup, ReshapeGroup>;
auto is_reverse_reshape_group_pair =
[&](const HloInstruction* op1, const ReshapeGroupPair& p1,
const HloInstruction* op2, const ReshapeGroupPair& p2) -> bool {
return ShapeUtil::EqualStructure(
ShapeUtil::GetSubshape(
op1->operand(0)->shape(),
ShapeIndex(p1.first.first, p1.first.second)),
ShapeUtil::GetSubshape(
op2->operand(0)->shape(),
ShapeIndex(p2.second.first, p2.second.second))) &&
ShapeUtil::EqualStructure(
ShapeUtil::GetSubshape(
op1->shape(), ShapeIndex(p1.second.first, p1.second.second)),
ShapeUtil::GetSubshape(
op2->operand(0)->shape(),
ShapeIndex(p2.first.first, p2.first.second)));
};
auto find_reshape_group_pair = [](HloInstruction* reshape,
int64_t input_dynamic_dimension) {
VLOG(2) << "Find reshape pair: " << reshape->ToString() << "\n";
auto common_factors =
CommonFactors(reshape->operand(0)->shape().dimensions(),
reshape->shape().dimensions());
ReshapeGroup input_dim = {-1, -1}, output_dim = {-1, -1};
bool found = false;
for (int64_t i = 0; i < common_factors.size() - 1; ++i) {
auto start = common_factors[i];
auto end = common_factors[i + 1];
if (input_dynamic_dimension >= start.first &&
input_dynamic_dimension < end.first) {
input_dim.first = start.first;
input_dim.second = end.first;
output_dim.first = start.second;
output_dim.second = end.second;
VLOG(3) << "Found common_factor group pair: " << input_dim.first << ","
<< input_dim.second << "->" << output_dim.first << ","
<< output_dim.second << "\n";
found = true;
break;
}
}
CHECK(found);
return ReshapeGroupPair(input_dim, output_dim);
};
auto reshape_group_pair_needs_flatten =
[](const ReshapeGroupPair& reshape_pair) {
return reshape_pair.first.second - reshape_pair.first.first > 1 &&
reshape_pair.second.second - reshape_pair.second.first > 1;
};
std::function<bool(HloInstruction*, const ReshapeGroupPair&, int64_t)>
find_reverse_past_reshape = [&](HloInstruction* op,
const ReshapeGroupPair reshape_pair,
int64_t dynamic_dimension_size) {
VLOG(2) << "Find reverse past reshape from " << op->ToString()
<< " for " << dynamic_dimension_size << "\n";
absl::InlinedVector<int64_t, 4> found_dims;
for (int op_dim_index = 0; op_dim_index < op->shape().rank();
++op_dim_index) {
if (op->shape().dimensions(op_dim_index) == dynamic_dimension_size) {
found_dims.push_back(op_dim_index);
}
}
if (found_dims.empty()) {
return false;
}
VLOG(3) << "Found " << found_dims.size() << "\n";
if (op->opcode() == HloOpcode::kReshape) {
for (auto op_dim_index : found_dims) {
auto orig_reshape_pair = find_reshape_group_pair(op, op_dim_index);
if (is_reverse_reshape_group_pair(op, orig_reshape_pair, hlo,
reshape_pair)) {
TF_CHECK_OK(ForEachOperandDynamicDimension(
op,
[&](HloInstruction* operand, ShapeIndex index,
int64_t op_dynamic_dimension, int64_t operand_index,
HloInstruction* operand_dynamic_size) -> absl::Status {
if (op_dynamic_dimension >= orig_reshape_pair.first.first &&
op_dynamic_dimension < orig_reshape_pair.first.second) {
auto dynamic_size =
parent_->GetDynamicSize(op, {}, op_dynamic_dimension);
CHECK_NE(dynamic_size, nullptr);
auto hlo_dimension_index = op_dynamic_dimension -
orig_reshape_pair.first.first +
reshape_pair.second.first;
dynamic_sizes[hlo_dimension_index] = dynamic_size;
}
return absl::OkStatus();
}));
return true;
}
}
}
for (auto operand : op->mutable_operands()) {
if (find_reverse_past_reshape(operand, reshape_pair,
dynamic_dimension_size)) {
return true;
}
VLOG(3) << "Checking " << operand->ToString() << "\n";
}
return false;
};
absl::flat_hash_map<int64_t, ReshapeGroupPair> reshape_group_pairs;
bool need_flatten_unflatten =
hlo->inferred_dimension() != -1 &&
hlo->shape().dimensions(hlo->inferred_dimension()) == 1;
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index,
int64_t input_dynamic_dimension, int64_t operand_index,
HloInstruction* operand_dynamic_size) -> absl::Status {
auto reshape_pair =
find_reshape_group_pair(hlo, input_dynamic_dimension);
reshape_group_pairs[input_dynamic_dimension] = reshape_pair;
if (reshape_group_pair_needs_flatten(reshape_pair)) {
need_flatten_unflatten = true;
}
return absl::OkStatus();
}));
if (need_flatten_unflatten) {
if (hlo->inferred_dimension() != -1) {
HloInstruction* operand = hlo->mutable_operand(0);
HloComputation* comp = hlo->parent();
HloInstruction* dynamic_size = comp->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
int64_t static_size = 1;
for (int64_t i = 0; i < operand->shape().rank(); i++) {
HloInstruction* dynamic_dim_size =
parent_->GetDynamicSize(operand, {}, i);
if (dynamic_dim_size == nullptr) {
static_size *= operand->shape().dimensions(i);
} else {
dynamic_size = comp->AddInstruction(HloInstruction::CreateBinary(
dynamic_size->shape(), HloOpcode::kMultiply, dynamic_size,
dynamic_dim_size));
}
}
HloInstruction* static_size_hlo =
comp->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(static_size)));
dynamic_size = comp->AddInstruction(HloInstruction::CreateBinary(
dynamic_size->shape(), HloOpcode::kMultiply, dynamic_size,
static_size_hlo));
int64_t size_without_inferred_dim =
ShapeUtil::ElementsIn(hlo->shape()) /
hlo->shape().dimensions(hlo->inferred_dimension());
HloInstruction* size_without_inferred_dim_hlo =
comp->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(size_without_inferred_dim)));
dynamic_size = comp->AddInstruction(HloInstruction::CreateBinary(
dynamic_size->shape(), HloOpcode::kDivide, dynamic_size,
size_without_inferred_dim_hlo));
dynamic_sizes[hlo->inferred_dimension()] = dynamic_size;
VLOG(3)
<< "Need to decompose a dynamic reshape to flatten-unflatten pair. "
<< comp->parent()->ToString();
SetDynamicSizes(hlo, {}, dynamic_sizes);
return absl::OkStatus();
}
return Internal(
"Need inferred dimension to be set to "
"flatten-unflatten pair. %s",
hlo->ToString());
}
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index,
int64_t input_dynamic_dimension, int64_t operand_index,
HloInstruction* operand_dynamic_size) -> absl::Status {
HloInstruction* const reshape = hlo;
if (reshape->shape().rank() == 0) {
VLOG(0) << "Reshaping a dynamic dimension into a scalar, which has "
"undefined behavior when input size is 0. The offending "
"instruction is: "
<< reshape->ToString();
return absl::OkStatus();
}
auto iter = reshape_group_pairs.find(input_dynamic_dimension);
CHECK(iter != reshape_group_pairs.end());
ReshapeGroupPair reshape_group_pair = iter->second;
auto output_dim_start = reshape_group_pair.second.first,
output_dim_end = reshape_group_pair.second.second;
int64_t output_dynamic_dimension = -1;
if (operand->shape().dimensions(input_dynamic_dimension) == 1) {
if (input_dynamic_dimension == 0) {
output_dynamic_dimension = 0;
} else if (input_dynamic_dimension == operand->shape().rank() - 1) {
output_dynamic_dimension = reshape->shape().rank() - 1;
}
if (output_dynamic_dimension == -1) {
return Unimplemented(
"Dynamic degenerated dimension that's not most-minor nor "
"most-major is not supported %s",
reshape->ToString());
}
}
if (output_dynamic_dimension == -1 &&
output_dim_end - output_dim_start == 1) {
output_dynamic_dimension = output_dim_start;
}
if (output_dynamic_dimension == -1 &&
output_dim_end - output_dim_start > 1) {
output_dynamic_dimension = reshape->inferred_dimension();
if (output_dynamic_dimension == -1) {
for (int64_t i = output_dim_start; i < output_dim_end; ++i) {
if (reshape->shape().is_dynamic_dimension(i)) {
output_dynamic_dimension = i;
}
}
}
if (output_dynamic_dimension == -1) {
std::vector<int64_t> output_non_degenerated;
for (int64_t i = output_dim_start; i < output_dim_end; ++i) {
if (reshape->shape().dimensions(i) != 1) {
output_non_degenerated.push_back(i);
}
}
if (output_non_degenerated.size() == 1) {
output_dynamic_dimension = output_non_degenerated[0];
}
}
if (output_dynamic_dimension == -1 &&
find_reverse_past_reshape(
hlo->mutable_operand(0), reshape_group_pair,
hlo->mutable_operand(0)->shape().dimensions(
input_dynamic_dimension))) {
return absl::OkStatus();
}
if (output_dynamic_dimension == -1) {
return InvalidArgument(
"Reshape's input dynamic dimension is decomposed into "
"multiple output dynamic dimensions, but the constraint is "
"ambiguous and XLA can't infer the output dimension %s. ",
hlo->ToString());
}
}
CHECK_NE(output_dynamic_dimension, -1);
const int64_t input_dim_size =
operand->shape().dimensions(input_dynamic_dimension);
const int64_t output_dim_size =
reshape->shape().dimensions(output_dynamic_dimension);
VLOG(2) << "input_dim_size: " << input_dim_size
<< " output_dim_size: " << output_dim_size;
if (input_dim_size == output_dim_size) {
dynamic_sizes[output_dynamic_dimension] = operand_dynamic_size;
}
if (input_dim_size > output_dim_size) {
TF_RET_CHECK(input_dim_size % output_dim_size == 0)
<< reshape->ToString();
const int64_t divisor = input_dim_size / output_dim_size;
HloInstruction* divisor_hlo =
hlo->parent()->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(divisor)));
HloInstruction* new_dynamic_size =
hlo->parent()->AddInstruction(HloInstruction::CreateBinary(
operand_dynamic_size->shape(), HloOpcode::kDivide,
operand_dynamic_size, divisor_hlo));
dynamic_sizes[output_dynamic_dimension] = new_dynamic_size;
}
if (input_dim_size < output_dim_size) {
HloInstruction* output_dynamic_size =
dynamic_sizes[output_dynamic_dimension];
if (output_dynamic_size == nullptr) {
output_dynamic_size =
hlo->parent()->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(output_dim_size)));
}
HloInstruction* divisor_hlo = hlo->parent()->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
operand->shape().dimensions(input_dynamic_dimension))));
HloInstruction* new_dynamic_size =
hlo->parent()->AddInstruction(HloInstruction::CreateBinary(
output_dynamic_size->shape(), HloOpcode::kDivide,
output_dynamic_size, divisor_hlo));
new_dynamic_size =
hlo->parent()->AddInstruction(HloInstruction::CreateBinary(
output_dynamic_size->shape(), HloOpcode::kMultiply,
new_dynamic_size, operand_dynamic_size));
dynamic_sizes[output_dynamic_dimension] = new_dynamic_size;
}
return absl::OkStatus();
}));
SetDynamicSizes(hlo, {}, dynamic_sizes);
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleReduceWindow(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
ShapeTree<absl::InlinedVector<HloInstruction*, 2>> dynamic_sizes(
hlo->shape());
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
auto* reduce_window = Cast<HloReduceWindowInstruction>(hlo);
const WindowDimension& window_dim =
reduce_window->window().dimensions(dimension);
if (operand_index >= reduce_window->input_count()) {
return absl::OkStatus();
}
if (!window_util::IsTrivialWindowDimension(window_dim)) {
DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize(
dynamic_size, window_dim.size(), window_dim.window_dilation(),
window_dim.stride(), PaddingType::PADDING_VALID);
dynamic_size = dynamic_window_dims.output_size;
}
ShapeUtil::ForEachSubshape(
reduce_window->shape(),
[&](const Shape& subshape, ShapeIndex reduce_window_result_index) {
if (!ShapeUtil::IsLeafIndex(reduce_window->shape(),
reduce_window_result_index)) {
return;
}
auto* leaf_dynamic_sizes =
dynamic_sizes.mutable_element(reduce_window_result_index);
leaf_dynamic_sizes->resize(subshape.rank(), nullptr);
(*leaf_dynamic_sizes)[dimension] = dynamic_size;
});
return absl::OkStatus();
}));
dynamic_sizes.ForEachElement(
[&](const ShapeIndex& shape_index,
const absl::InlinedVector<HloInstruction*, 2> sizes) {
if (sizes.empty()) {
return;
}
SetDynamicSizes(hlo, shape_index, sizes);
});
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleSelectAndScatter(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
if (operand_index == 1) {
return absl::OkStatus();
}
SetDynamicSize(hlo, {}, dimension, dynamic_size);
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleSlice(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex , int64_t dimension,
int64_t ,
HloInstruction* dynamic_size) -> absl::Status {
int64_t start = hlo->slice_starts(dimension);
int64_t limit = hlo->slice_limits(dimension);
int64_t stride = hlo->slice_strides(dimension);
int64_t size = CeilOfRatio<int64_t>(limit - start, stride);
if (size == 1) {
TF_RET_CHECK(!hlo->shape().is_dynamic_dimension(dimension));
return absl::OkStatus();
}
TF_RET_CHECK(hlo->shape().is_dynamic_dimension(dimension));
if (start != 0) {
dynamic_size = hlo->AddInstruction(HloInstruction::CreateBinary(
dynamic_size->shape(), HloOpcode::kSubtract, dynamic_size,
hlo->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(start)))));
}
if (stride != 1) {
dynamic_size = hlo->AddInstruction(HloInstruction::CreateBinary(
dynamic_size->shape(), HloOpcode::kAdd, dynamic_size,
hlo->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(stride - 1)))));
dynamic_size = hlo->AddInstruction(HloInstruction::CreateBinary(
dynamic_size->shape(), HloOpcode::kDivide, dynamic_size,
hlo->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(stride)))));
}
SetDynamicSize(hlo, {}, dimension, dynamic_size);
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleDynamicSlice(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {
if (hlo->shape().dimensions(dimension) == 1) {
return absl::OkStatus();
}
if (hlo->shape().dimensions(dimension) !=
hlo->operand(0)->shape().dimensions(dimension)) {
return Unimplemented(
"Dynamic dimension propagation on DynamicSlice where a partial "
"dimension is selected %s",
hlo->ToString());
}
TF_RET_CHECK(operand_index == 0);
TF_RET_CHECK(index.empty());
SetDynamicSize(hlo, {}, dimension, dynamic_size);
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleDynamicUpdateSlice(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
absl::InlinedVector<HloInstruction*, 2> output_dynamic_sizes(
hlo->shape().rank(), nullptr);
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {
TF_RET_CHECK(index.empty());
if (hlo->shape().dimensions(dimension) !=
hlo->operand(0)->shape().dimensions(dimension)) {
return Unimplemented(
"Dynamic dimension propagation on DynamicUpdateSlice where a "
"partial dimension is selected %s",
hlo->ToString());
}
if (operand_index == 1 &&
hlo->operand(1)->shape().dimensions(dimension) <
hlo->operand(0)->shape().dimensions(dimension)) {
hlo->mutable_shape()->set_dynamic_dimension(dimension, false);
return absl::OkStatus();
}
output_dynamic_sizes[dimension] = dynamic_size;
return absl::OkStatus();
}));
SetDynamicSizes(hlo, {}, output_dynamic_sizes);
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleReverse(
HloInstruction* hlo) {
return PassThroughDynamicDimension(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::HandleGather(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
absl::InlinedVector<HloInstruction*, 2> output_dynamic_sizes(
hlo->shape().rank(), nullptr);
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex ,
int64_t input_dynamic_dimension, int64_t operand_index,
HloInstruction* dynamic_size) -> absl::Status {
const GatherDimensionNumbers& gather_dims =
hlo->gather_dimension_numbers();
if (operand_index == 0) {
if (hlo->gather_slice_sizes()[input_dynamic_dimension] == 1) {
return absl::OkStatus();
}
if (hlo->gather_slice_sizes()[input_dynamic_dimension] ==
operand->shape().dimensions(input_dynamic_dimension)) {
int64_t operand_dimension = 0;
for (int64_t output_dimension : gather_dims.offset_dims()) {
TF_RET_CHECK(output_dimension < hlo->shape().rank());
while (operand_dimension < operand->shape().rank() &&
absl::c_linear_search(gather_dims.collapsed_slice_dims(),
operand_dimension)) {
++operand_dimension;
}
TF_RET_CHECK(operand_dimension < operand->shape().rank());
if (operand_dimension == input_dynamic_dimension) {
output_dynamic_sizes[output_dimension] = dynamic_size;
return absl::OkStatus();
}
++operand_dimension;
}
return Internal("Invalid instruction: %s", hlo->ToString());
}
return Unimplemented(
"Detects a dynamic dimension on the data input of gather, which "
"is not supported: %s, %lld",
hlo->ToString(), input_dynamic_dimension);
}
int64_t indices_rank = hlo->operand(1)->shape().rank();
if (gather_dims.index_vector_dim() == indices_rank) {
++indices_rank;
}
int64_t output_rank = hlo->shape().rank();
int64_t indices_dim = 0;
for (int64_t output_dim = 0; output_dim < output_rank; ++output_dim) {
if (!absl::c_linear_search(gather_dims.offset_dims(), output_dim)) {
if (indices_dim == gather_dims.index_vector_dim()) {
indices_dim++;
}
if (indices_dim++ == input_dynamic_dimension) {
output_dynamic_sizes[output_dim] = dynamic_size;
return absl::OkStatus();
}
}
}
CHECK(indices_dim == indices_rank);
return Unimplemented(
"Detects a non-batch dynamic dimension of gather, "
"which is not supported: %s",
hlo->ToString());
}));
SetDynamicSizes(hlo, {}, output_dynamic_sizes);
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleConditional(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
std::vector<HloComputation*> new_branch_computations;
std::vector<HloInstruction*> new_operands;
ShapeTree<absl::flat_hash_map<int64_t, int64_t>> dynamic_output_mapping(
hlo->shape());
bool need_rewrite = false;
for (int64_t branch_index = 0; branch_index < hlo->branch_count();
++branch_index) {
std::vector<HloInstruction*> operands_to_add;
absl::flat_hash_map<HloInstruction*, int64_t>
dynamic_size_to_operand_id_index_map;
const int64_t operand_index = branch_index + 1;
int operand_count =
hlo->operand(operand_index)->shape().tuple_shapes_size();
TF_RETURN_IF_ERROR(ForEachDynamicDimensionInOperand(
hlo, operand_index,
[&](HloInstruction*, ShapeIndex, int64_t, int64_t,
HloInstruction* dynamic_size) -> absl::Status {
TF_RET_CHECK(hlo->operand(operand_index)->shape().IsTuple())
<< "Only tuple typed inputs can have dynamic dimension. Please "
"file a bug against XLA team.";
const HloInstruction* tuple_operand = hlo->operand(operand_index);
for (int64_t i = 0; i < tuple_operand->operand_count(); ++i) {
if (dynamic_size == tuple_operand->operand(i)) {
dynamic_size_to_operand_id_index_map[dynamic_size] = i;
return absl::OkStatus();
}
}
auto iter = dynamic_size_to_operand_id_index_map.find(dynamic_size);
if (iter == dynamic_size_to_operand_id_index_map.end()) {
operands_to_add.push_back(dynamic_size);
dynamic_size_to_operand_id_index_map[dynamic_size] =
operand_count++;
}
return absl::OkStatus();
}));
HloInstruction* original_input = hlo->mutable_operand(operand_index);
HloComputation* branch_computation = hlo->branch_computation(branch_index);
HloComputation* new_computation = branch_computation;
CallInliner::InlinedInstructionMap inline_map;
HloInstruction* new_operand = hlo->mutable_operand(operand_index);
Shape new_param_shape =
branch_computation->parameter_instruction(0)->shape();
if (!operands_to_add.empty()) {
TF_RET_CHECK(original_input->shape().IsTuple());
need_rewrite = true;
new_operand = TupleUtil::AppendSuffix(original_input, operands_to_add);
for (HloInstruction* operand : operands_to_add) {
ShapeUtil::AppendShapeToTuple(operand->shape(), &new_param_shape);
}
TF_ASSIGN_OR_RETURN(
std::tie(new_computation, inline_map),
WidenComputation(branch_computation, new_param_shape));
}
DynamicParameterBinding dynamic_parameter_binding;
TF_RETURN_IF_ERROR(ForEachDynamicDimensionInOperand(
hlo, operand_index,
[&](HloInstruction*, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
DynamicParameterBinding::DynamicSizeParameter dynamic_parameter{
0, {dynamic_size_to_operand_id_index_map[dynamic_size]}};
DynamicParameterBinding::DynamicDimension dynamic_dimension{
0, {index}, dimension};
TF_RETURN_IF_ERROR(dynamic_parameter_binding.Bind(dynamic_parameter,
dynamic_dimension));
return absl::OkStatus();
}));
VLOG(2) << "dynamic_parameter_binding for conditional branch"
<< dynamic_parameter_binding;
for (auto [old_inst, new_inst] : inline_map) {
parent_->CopyMapping(
old_inst,
new_inst,
&inline_map);
}
TF_ASSIGN_OR_RETURN(
bool changed,
DynamicDimensionInferenceVisitor::Run(
new_computation, dataflow_analysis_, dynamic_parameter_binding,
parent_, custom_call_handler_, shape_check_mode_,
assertion_generator_));
if (changed) {
MarkAsChanged();
}
new_branch_computations.push_back(new_computation);
new_operands.push_back(new_operand);
}
int tuple_count = hlo->shape().tuple_shapes_size();
ShapeUtil::ForEachSubshape(
hlo->shape(), [&](const Shape& subshape, const ShapeIndex& index) {
if (!subshape.IsArray()) {
return;
}
for (int64_t i = 0; i < subshape.rank(); ++i) {
for (int64_t j = 0; j < new_branch_computations.size(); ++j) {
HloInstruction* dynamic_size = parent_->GetDynamicSize(
new_branch_computations[j]->root_instruction(), index, i);
if (dynamic_size) {
if (dynamic_output_mapping.element(index).contains(i)) {
continue;
}
dynamic_output_mapping.mutable_element(index)->emplace(
i, tuple_count++);
}
}
}
});
for (int64_t branch_index = 0; branch_index < hlo->branch_count();
++branch_index) {
std::vector<HloInstruction*> hlos_to_add_in_root;
ShapeUtil::ForEachSubshape(
hlo->shape(), [&](const Shape& subshape, const ShapeIndex& index) {
if (!subshape.IsArray()) {
return;
}
for (int64_t i = 0; i < subshape.rank(); ++i) {
if (dynamic_output_mapping.element(index).contains(i)) {
HloInstruction* dynamic_size = parent_->GetDynamicSize(
new_branch_computations[branch_index]->root_instruction(),
index, i);
if (dynamic_size) {
hlos_to_add_in_root.push_back(dynamic_size);
} else {
HloInstruction* constant_size =
new_branch_computations[branch_index]->AddInstruction(
HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(
subshape.dimensions(i))));
hlos_to_add_in_root.push_back(constant_size);
}
}
}
});
VLOG(2) << "hlos_to_add_in_root:" << hlos_to_add_in_root.size();
if (!hlos_to_add_in_root.empty()) {
need_rewrite = true;
HloInstruction* new_branch_root = TupleUtil::AppendSuffix(
new_branch_computations[branch_index]->root_instruction(),
hlos_to_add_in_root);
new_branch_computations[branch_index]->set_root_instruction(
new_branch_root,
true);
}
}
if (!need_rewrite) {
return absl::OkStatus();
}
HloInstruction* new_conditional =
hlo->parent()->AddInstruction(HloInstruction::CreateConditional(
new_branch_computations[0]->root_instruction()->shape(),
hlo->mutable_operand(0), new_branch_computations, new_operands));
HloInstruction* new_conditional_extracted = TupleUtil::ExtractPrefix(
new_conditional, hlo->shape().tuple_shapes_size());
dynamic_output_mapping.ForEachElement(
[&](const ShapeIndex& index,
const absl::flat_hash_map<int64_t, int64_t>& dim_to_output) {
for (auto iter : dim_to_output) {
int64_t dim = iter.first;
int64_t output_index = iter.second;
HloInstruction* dynamic_size = hlo->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(
ShapeUtil::MakeScalarShape(S32), new_conditional,
output_index));
SetDynamicSize(new_conditional, index, dim, dynamic_size,
false);
SetDynamicSize(new_conditional_extracted, index, dim, dynamic_size,
false);
}
});
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(new_conditional_extracted));
TF_RETURN_IF_ERROR(hlo->parent()->RemoveInstruction(hlo));
SetVisited(*new_conditional);
SetVisited(*new_conditional_extracted);
MarkAsChanged();
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::HandleMap(HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return HandleElementwiseNary(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::HandleScatter(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
return ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex dynamic_index, int64_t dimension,
int64_t operand_index,
HloInstruction* operand_dynamic_size) -> absl::Status {
if (operand_index == 0) {
SetDynamicSize(hlo, {}, dimension, operand_dynamic_size);
return absl::OkStatus();
}
const ScatterDimensionNumbers& scatter_dims =
hlo->scatter_dimension_numbers();
if (operand_index == 2 &&
absl::c_linear_search(scatter_dims.update_window_dims(),
dimension)) {
std::vector<int64_t> update_window_dims_in_operand;
for (int64_t i = 0; i < hlo->operand(0)->shape().rank(); ++i) {
if (absl::c_linear_search(scatter_dims.inserted_window_dims(), i)) {
continue;
}
update_window_dims_in_operand.push_back(i);
}
for (int64_t i = 0; i < scatter_dims.update_window_dims_size(); ++i) {
if (scatter_dims.update_window_dims(i) == dimension) {
const Shape& operand_shape = hlo->operand(0)->shape();
const Shape& update_shape = hlo->operand(2)->shape();
int64_t dim_in_operand = update_window_dims_in_operand[i];
if (operand_shape.dimensions(dim_in_operand) !=
update_shape.dimensions(dimension)) {
return Unimplemented(
"Dynamic dimension of update window dims that are not the "
"same as corresponding operand dim is not supported: "
"%s : %d : %d : %d",
hlo->ToString(), i, update_shape.dimensions(dimension),
operand_shape.dimensions(dim_in_operand));
}
HloInstruction* base_dynamic_size = parent_->GetDynamicSize(
hlo->mutable_operand(0), {}, dim_in_operand);
if (base_dynamic_size == nullptr ||
!operand_shape.is_dynamic_dimension(dim_in_operand)) {
return absl::OkStatus();
}
if (base_dynamic_size != operand_dynamic_size) {
return Unimplemented(
"Dynamic dimension size of update window dims that are not "
"the same as corresponding operand dim is not supported: "
"%s.\n Dynamic dim size of base: %s, dynamic dim size of "
"update: %s",
hlo->ToString(), base_dynamic_size->ToString(),
operand_dynamic_size->ToString());
}
}
}
}
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleWhile(
HloInstruction* hlo) {
if (!CanInfer(hlo)) {
return absl::OkStatus();
}
Shape original_shape = hlo->shape();
ShapeTree<absl::flat_hash_map<int64_t, int64_t>> dynamic_output_mapping(
original_shape);
std::vector<HloInstruction*> operands_to_add;
const int original_tuple_count = original_shape.tuple_shapes_size();
int operand_count = original_tuple_count;
DynamicParameterBinding binding_for_while;
TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(
hlo,
[&](HloInstruction* operand, ShapeIndex index, int64_t dim,
int64_t operand_num, HloInstruction* dynamic_size) -> absl::Status {
TF_RET_CHECK(operand_num == 0);
operands_to_add.push_back(dynamic_size);
dynamic_output_mapping.mutable_element(index)->emplace(dim,
operand_count);
DynamicParameterBinding::DynamicDimension dynamic_dimension{
0,
index,
dim,
};
DynamicParameterBinding::DynamicSizeParameter dynamic_size_param{
0,
{operand_count},
};
TF_RETURN_IF_ERROR(
binding_for_while.Bind(dynamic_size_param, dynamic_dimension));
++operand_count;
return absl::OkStatus();
}));
if (operands_to_add.empty()) {
return absl::OkStatus();
}
HloInstruction* old_tuple_operand = hlo->mutable_operand(0);
HloInstruction* old_body_root = hlo->while_body()->root_instruction();
TF_ASSIGN_OR_RETURN(WhileUtil::MakeInstructionsLiveInResult result,
WhileUtil::MakeInstructionsLiveIn(hlo, operands_to_add));
TF_RET_CHECK(result.replacement_instr->opcode() == HloOpcode::kTuple);
HloInstruction* new_tuple_operand =
result.new_while_instr->mutable_operand(0);
parent_->CopyMapping(old_tuple_operand,
new_tuple_operand);
hlo = result.new_while_instr;
SetVisited(*hlo);
for (auto [old_inst, new_inst] : result.while_body_instruction_map) {
parent_->CopyMapping(
old_inst,
new_inst,
&result.while_body_instruction_map);
}
parent_->CopyMapping(old_body_root,
hlo->while_body()->root_instruction(),
&result.while_body_instruction_map);
for (auto [old_inst, new_inst] : result.while_condition_instruction_map) {
parent_->CopyMapping(
old_inst,
new_inst,
&result.while_condition_instruction_map);
}
TF_RETURN_IF_ERROR(DynamicDimensionInferenceVisitor::Run(
hlo->while_body(), dataflow_analysis_,
binding_for_while, parent_, custom_call_handler_,
shape_check_mode_, assertion_generator_)
.status());
TF_RETURN_IF_ERROR(DynamicDimensionInferenceVisitor::Run(
hlo->while_condition(), dataflow_analysis_,
binding_for_while, parent_, custom_call_handler_,
shape_check_mode_, assertion_generator_)
.status());
HloInstruction* body_root = hlo->while_body()->root_instruction();
std::vector<HloInstruction*> new_root_operands(body_root->operand_count(),
nullptr);
for (int i = 0; i < original_tuple_count; ++i) {
new_root_operands[i] =
body_root->AddInstruction(HloInstruction::CreateGetTupleElement(
body_root->shape().tuple_shapes(i), body_root, i));
}
TF_RETURN_IF_ERROR(dynamic_output_mapping.ForEachElementWithStatus(
[&](const ShapeIndex& index,
const absl::flat_hash_map<int64_t, int64_t>& dim_to_size)
-> absl::Status {
for (auto [dimension, output_index] : dim_to_size) {
TF_RET_CHECK(new_root_operands[output_index] == nullptr);
HloInstruction* dynamic_size =
parent_->GetDynamicSize(body_root, index, dimension);
TF_RET_CHECK(dynamic_size != nullptr);
new_root_operands[output_index] = dynamic_size;
}
return absl::OkStatus();
}));
for (auto operand : new_root_operands) {
TF_RET_CHECK(operand != nullptr);
}
HloInstruction* new_body_root = hlo->while_body()->AddInstruction(
HloInstruction::CreateTuple(new_root_operands));
for (int i = 0; i < original_tuple_count; ++i) {
TF_RETURN_IF_ERROR(ForEachDynamicDimension(
body_root,
[&](ShapeIndex index, int64_t dimension,
HloInstruction* dynamic_size) -> absl::Status {
SetDynamicSize(new_body_root, index, dimension, dynamic_size);
if (index.empty() || index.front() != i) {
return absl::OkStatus();
}
index.pop_front();
SetDynamicSize(new_root_operands[i], index, dimension, dynamic_size);
return absl::OkStatus();
}));
}
hlo->while_body()->set_root_instruction(new_body_root);
MarkAsChanged();
return dynamic_output_mapping.ForEachElementWithStatus(
[&](const ShapeIndex& index,
const absl::flat_hash_map<int64_t, int64_t>& dim_to_size)
-> absl::Status {
for (auto [dimension, output_index] : dim_to_size) {
HloInstruction* dynamic_size = hlo->AddInstruction(
HloInstruction::CreateGetTupleElement(hlo, output_index));
SetDynamicSize(result.replacement_instr, index, dimension,
dynamic_size);
ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), index)
->set_dynamic_dimension(dimension, false);
TF_RET_CHECK(!index.empty());
HloInstruction* gte =
result.replacement_instr->mutable_operand(index.front());
TF_RET_CHECK(gte->opcode() == HloOpcode::kGetTupleElement);
TF_RET_CHECK(gte->operand(0) == hlo);
ShapeUtil::GetMutableSubshape(gte->mutable_shape(),
ShapeIndexView(index).subspan(1))
->set_dynamic_dimension(dimension, false);
}
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleParameter(
HloInstruction* hlo) {
if (hlo->parent()->IsEntryComputation()) {
TF_RET_CHECK(param_bindings_.empty());
return InsertPadToStaticOnInstruction(hlo);
}
return param_bindings_.ForEachBinding(
[&](const DynamicParameterBinding::DynamicSizeParameter& dynamic_size,
const DynamicParameterBinding::DynamicDimension& dynamic_dimension)
-> absl::Status {
if (dynamic_dimension.parameter_num == hlo->parameter_number()) {
SetDynamicSize(
hlo, dynamic_dimension.parameter_index,
dynamic_dimension.dimension,
TupleUtil::AddGetTupleElements(HloPosition{
hlo->parent()->parameter_instruction(
dynamic_size.parameter_num),
dynamic_size.parameter_index,
}));
}
return absl::OkStatus();
});
}
absl::Status DynamicDimensionInferenceVisitor::HandleInfeed(
HloInstruction* hlo) {
return InsertPadToStaticOnInstruction(hlo);
}
absl::Status DynamicDimensionInferenceVisitor::ForEachDynamicDimension(
HloInstruction* inst, const DynamicDimensionFn& fn) {
auto iter = parent_->per_hlo_dynamic_dimensions_.find(inst);
if (iter != parent_->per_hlo_dynamic_dimensions_.end()) {
for (auto& dynamic_dimension : iter->second) {
HloInstruction* dynamic_size = parent_->GetDynamicSize(
dynamic_dimension.inst, dynamic_dimension.index,
dynamic_dimension.dim);
TF_RETURN_IF_ERROR(
fn(dynamic_dimension.index, dynamic_dimension.dim, dynamic_size));
}
}
return absl::OkStatus();
}
absl::StatusOr<bool> DynamicDimensionInferenceVisitor::RequiresPadToStatic(
HloInstruction* instr, ShapeIndex shape_index) {
TF_RET_CHECK(ShapeUtil::IsLeafIndex(instr->shape(), shape_index))
<< instr->shape() << " @ " << shape_index;
if (ShapeUtil::GetSubshape(instr->shape(), shape_index).is_static()) {
return false;
}
auto uses =
dataflow_analysis_.GetValueDefinedAt(instr, shape_index).GetUses();
for (const auto& use : uses) {
if (use.instruction->opcode() == HloOpcode::kAsyncStart ||
use.instruction->opcode() == HloOpcode::kAsyncUpdate ||
use.instruction->opcode() == HloOpcode::kAsyncDone ||
use.instruction->opcode() == HloOpcode::kCall ||
use.instruction->opcode() == HloOpcode::kTuple ||
use.instruction->opcode() == HloOpcode::kGetTupleElement ||
use.instruction->opcode() == HloOpcode::kConditional) {
continue;
}
if (use.instruction->opcode() == HloOpcode::kWhile) {
TF_RET_CHECK(use.operand_number == 0);
HloInstruction* root = use.instruction->while_body()->root_instruction();
if (parent_->HasDynamicDimension(root, use.operand_index)) {
return true;
}
continue;
}
if (use.instruction->opcode() == HloOpcode::kSetDimensionSize) {
TF_RET_CHECK(use.operand_number == 0);
return true;
}
if (use.instruction->opcode() == HloOpcode::kGetDimensionSize) {
return true;
}
if (use.instruction->opcode() != HloOpcode::kCustomCall ||
use.instruction->custom_call_target() != "PadToStatic") {
if (parent_->op_supports_dynamism_handler_ == nullptr) {
return true;
}
if (parent_->op_supports_dynamism_handler_(use.instruction) ==
OpDynamismSupport::kNoSupport) {
return true;
}
}
}
return false;
}
absl::Status DynamicDimensionInferenceVisitor::InsertPadToStaticOnInstruction(
HloInstruction* inst) {
if (inst->shape().is_static()) {
return absl::OkStatus();
}
ShapeTree<bool> needs_pad(inst->shape(), false);
bool any_needs_pad = false;
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
inst->shape(), [&](const Shape& subshape, const ShapeIndex& shape_index) {
if (subshape.IsTuple()) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(bool do_pad,
RequiresPadToStatic(inst, shape_index));
if (do_pad) {
*needs_pad.mutable_element(shape_index) = true;
any_needs_pad = true;
}
return absl::OkStatus();
}));
if (!any_needs_pad) {
return absl::OkStatus();
}
auto users = inst->users();
ShapeTree<HloInstruction*> gtes =
TupleUtil::DisassembleTupleInstruction(inst);
ShapeTree<HloInstruction*> padded(inst->shape(), nullptr);
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapePostOrderWithStatus(
inst->shape(),
[&](const Shape& subshape,
const ShapeIndex& shape_index) -> absl::Status {
HloInstruction* element = gtes.element(shape_index);
SetVisited(*gtes.element(shape_index));
if (subshape.IsTuple()) {
absl::InlinedVector<HloInstruction*, 2> children;
ShapeIndex child_index = shape_index;
for (int i = 0; i < subshape.tuple_shapes_size(); ++i) {
child_index.push_back(i);
children.push_back(padded.element(child_index));
child_index.pop_back();
}
HloInstruction* tuple =
element->AddInstruction(HloInstruction::CreateVariadic(
subshape, HloOpcode::kTuple, children));
TF_CHECK_OK(ForEachOperandDynamicDimension(
tuple,
[&](HloInstruction* operand, ShapeIndex index, int64_t dimension,
int64_t operand_index, HloInstruction* dynamic_size) {
index.push_front(operand_index);
SetDynamicSize(tuple, index, dimension, dynamic_size);
return absl::OkStatus();
}));
*padded.mutable_element(shape_index) = tuple;
return absl::OkStatus();
}
if (needs_pad.element(shape_index)) {
Shape data_output_shape =
ShapeUtil::MakeStaticShape(element->shape());
Shape output_shape = ShapeUtil::MakeTupleShape({data_output_shape});
for (int64_t i = 0; i < element->shape().rank(); ++i) {
ShapeUtil::AppendShapeToTuple(ShapeUtil::MakeScalarShape(S32),
&output_shape);
}
HloInstruction* pad_to_static = inst->parent()->AddInstruction(
HloInstruction::CreateCustomCall(output_shape, {element},
"PadToStatic"),
absl::StrCat(element->name(), ".padded"));
SetVisited(*pad_to_static);
HloInstruction* data_output = inst->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(data_output_shape,
pad_to_static, 0),
absl::StrCat(element->name(), ".data"));
SetVisited(*data_output);
for (int64_t i = 0; i < element->shape().rank(); ++i) {
if (!element->shape().is_dynamic_dimension(i)) {
continue;
}
HloInstruction* dynamic_size_output =
inst->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(
output_shape.tuple_shapes(i + 1), pad_to_static, i + 1),
absl::StrCat(element->name(), ".size"));
SetVisited(*dynamic_size_output);
SetDynamicSize(data_output, {}, i, dynamic_size_output,
false);
}
*padded.mutable_element(shape_index) = data_output;
} else {
*padded.mutable_element(shape_index) = element;
}
return absl::OkStatus();
}));
HloInstruction* result = padded.element({});
for (auto user : users) {
for (int64_t i : user->OperandIndices(inst)) {
TF_RETURN_IF_ERROR(user->ReplaceOperandWith(i, result));
}
}
if (inst->IsRoot()) {
inst->parent()->set_root_instruction(result);
}
MarkAsChanged();
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::InsertShapeCheck(
HloInstruction* dim1, HloInstruction* dim2,
bool support_implicit_broadcast) {
switch (shape_check_mode_) {
case DynamicDimensionInference::kIgnore:
return absl::OkStatus();
case DynamicDimensionInference::kCompileTime:
return InvalidArgument(
"Fail to proof the equality of two dimensions at compile time: "
"%s vs %s",
dim1->ToString(), dim2->ToString());
case DynamicDimensionInference::kRuntime: {
TF_ASSIGN_OR_RETURN(
HloInstruction * assertion,
MakeCompareHlo(Comparison::Direction::kEq, dim1, dim2));
if (shape_assertion_ == nullptr) {
shape_assertion_ = assertion;
} else {
TF_ASSIGN_OR_RETURN(
shape_assertion_,
MakeBinaryHlo(HloOpcode::kAnd, shape_assertion_, assertion));
}
return absl::OkStatus();
}
default:
LOG(FATAL) << "Unreachable";
}
}
absl::Status DynamicDimensionInferenceVisitor::ForEachDynamicDimensionInOperand(
HloInstruction* inst, int64_t operand_index, OperandDynamicDimensionFn fn) {
auto iter =
parent_->per_hlo_dynamic_dimensions_.find(inst->operand(operand_index));
if (iter != parent_->per_hlo_dynamic_dimensions_.end()) {
for (auto& dynamic_dimension : iter->second) {
HloInstruction* dynamic_size = parent_->GetDynamicSize(
dynamic_dimension.inst, dynamic_dimension.index,
dynamic_dimension.dim);
TF_RETURN_IF_ERROR(fn(dynamic_dimension.inst, dynamic_dimension.index,
dynamic_dimension.dim, operand_index,
dynamic_size));
}
}
return absl::OkStatus();
}
absl::Status DynamicDimensionInferenceVisitor::ForEachOperandDynamicDimension(
HloInstruction* inst, OperandDynamicDimensionFn fn) {
for (int64_t operand_index = 0; operand_index < inst->operand_count();
++operand_index) {
TF_RETURN_IF_ERROR(
ForEachDynamicDimensionInOperand(inst, operand_index, fn));
}
return absl::OkStatus();
}
void DynamicDimensionInference::SetDynamicSize(HloInstruction* inst,
const ShapeIndex& index,
int64_t dim,
HloInstruction* size) {
CHECK_NE(inst, nullptr);
CHECK_NE(size, nullptr);
VLOG(1) << "Set dimension inst " << inst->ToString() << " index "
<< index.ToString() << "@" << dim << " to " << size->ToShortString();
const Shape& subshape = ShapeUtil::GetSubshape(inst->shape(), index);
CHECK(!subshape.IsTuple()) << "Can't set a tuple shape to dynamic dimension";
CHECK(dim < subshape.rank() && dim >= 0)
<< "Asked to set invalid dynamic dimension. Shape: "
<< subshape.ToString() << ", Dimension: " << dim;
DynamicDimension dynamic_dimension{inst, index, dim};
auto [it, inserted] = dynamic_mapping_.try_emplace(dynamic_dimension, size);
if (!inserted) {
CHECK_EQ(size, it->second) << "old: " << it->second->ToShortString()
<< ", new: " << size->ToShortString();
}
auto iter = per_hlo_dynamic_dimensions_.try_emplace(inst);
iter.first->second.emplace(dynamic_dimension);
}
void DynamicDimensionInference::CopyMapping(
HloInstruction* from, HloInstruction* to,
const absl::flat_hash_map<HloInstruction*, HloInstruction*>*
dynamic_size_map) {
auto iter = per_hlo_dynamic_dimensions_.find(from);
if (iter != per_hlo_dynamic_dimensions_.end()) {
for (auto& dynamic_dimension : iter->second) {
HloInstruction* dynamic_size =
GetDynamicSize(dynamic_dimension.inst, dynamic_dimension.index,
dynamic_dimension.dim);
if (dynamic_size_map != nullptr) {
dynamic_size = dynamic_size_map->at(dynamic_size);
}
SetDynamicSize(to, dynamic_dimension.index, dynamic_dimension.dim,
dynamic_size);
}
}
}
absl::StatusOr<DynamicDimensionInference> DynamicDimensionInference::Run(
HloModule* module, OpSupportsDynamismHandler op_supports_dynamism_handler,
CustomCallInferenceHandler custom_call_handler,
ShapeCheckMode shape_check_mode,
const AssertionGenerator& assertion_generator,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
DynamicDimensionInference inference(
module, std::move(op_supports_dynamism_handler),
std::move(custom_call_handler), shape_check_mode, assertion_generator,
execution_threads);
TF_RETURN_IF_ERROR(inference.AnalyzeDynamicDimensions());
return std::move(inference);
}
std::string DynamicDimensionInference::ToString() const {
std::vector<std::string> pieces;
pieces.push_back("DynamicDimensionInference: ");
for (const auto& mapping : dynamic_mapping_) {
const DynamicDimension& dynamic_dimension = mapping.first;
pieces.push_back(absl::StrFormat(
" -- instruction %s at %s has dim %lld as dynamic"
" dimension, which is represented by instruction %s",
dynamic_dimension.inst->ToString(), dynamic_dimension.index.ToString(),
dynamic_dimension.dim, mapping.second->ToString()));
}
return absl::StrJoin(pieces, "\n");
}
DynamicDimensionInference::DynamicDimensionInference(
HloModule* module, OpSupportsDynamismHandler op_supports_dynamism_handler,
CustomCallInferenceHandler custom_call_handler,
ShapeCheckMode shape_check_mode, AssertionGenerator assertion_generator,
const absl::flat_hash_set<absl::string_view>& execution_threads)
: module_(module),
op_supports_dynamism_handler_(std::move(op_supports_dynamism_handler)),
custom_call_handler_(std::move(custom_call_handler)),
shape_check_mode_(shape_check_mode),
assertion_generator_(assertion_generator),
execution_threads_(execution_threads) {}
absl::Status DynamicDimensionInference::AnalyzeDynamicDimensions() {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloDataflowAnalysis> dataflow_analysis,
HloDataflowAnalysis::Run(*module_, false,
true,
nullptr,
nullptr, execution_threads_));
for (HloComputation* computation : module_->MakeComputationPostOrder()) {
if (!HloInstruction::IsThreadIncluded(computation->execution_thread(),
execution_threads_)) {
continue;
}
TF_ASSIGN_OR_RETURN(
bool changed,
DynamicDimensionInferenceVisitor::Run(
computation, *dataflow_analysis, {}, this, custom_call_handler_,
shape_check_mode_, assertion_generator_));
changed_ |= changed;
}
return absl::OkStatus();
}
void DynamicDimensionInference::ReplaceAllDynamicDimensionUsesWith(
HloInstruction* replace, HloInstruction* with) {
CHECK(Shape::Equal().IgnoreLayout()(replace->shape(),
ShapeUtil::MakeScalarShape(S32)));
CHECK(Shape::Equal().IgnoreLayout()(with->shape(),
ShapeUtil::MakeScalarShape(S32)));
for (auto& kv : dynamic_mapping_) {
if (kv.second == replace) {
kv.second = with;
}
}
}
absl::Status DynamicDimensionInference::ForwardDynamicSize(
HloInstruction* inst, HloInstruction* new_inst, const ShapeIndex& index) {
TF_RET_CHECK(ShapeUtil::Compatible(inst->shape(), new_inst->shape()));
for (int64_t dim = 0; dim < inst->shape().rank(); ++dim) {
DynamicDimension dynamic_dimension_new{new_inst, index, dim};
DynamicDimension dynamic_dimension{inst, index, dim};
auto iter = dynamic_mapping_.find(dynamic_dimension);
if (iter != dynamic_mapping_.end()) {
dynamic_mapping_.insert({dynamic_dimension_new, iter->second});
auto iter = per_hlo_dynamic_dimensions_.try_emplace(new_inst);
iter.first->second.emplace(dynamic_dimension_new);
}
}
return absl::OkStatus();
}
bool DynamicDimensionInference::HasDynamicDimension(
HloInstruction* inst, ShapeIndexView index) const {
bool has_dynamic_dim = false;
ShapeUtil::ForEachSubshape(inst->shape(), [&](const Shape& subshape,
const ShapeIndex& subindex) {
if (subshape.IsTuple()) {
return;
}
if (ShapeIndexView(subindex).subspan(0, index.size()) != index) {
return;
}
for (int64_t i = 0; i < subshape.dimensions_size(); ++i) {
HloInstruction* operand_dynamic_size = GetDynamicSize(inst, subindex, i);
if (operand_dynamic_size != nullptr) {
has_dynamic_dim = true;
}
}
});
return has_dynamic_dim;
}
Shape DynamicDimensionInference::GetDynamicShape(HloInstruction* inst) {
Shape shape = inst->shape();
ShapeUtil::ForEachMutableSubshape(
&shape, [&](Shape* subshape, const ShapeIndex& index) {
if (!subshape->IsArray()) {
return;
}
for (int64_t dimension = 0; dimension < subshape->rank(); ++dimension) {
if (GetDynamicSize(inst, index, dimension) != nullptr) {
subshape->set_dynamic_dimension(dimension, true);
}
}
});
return shape;
}
HloInstruction* DynamicDimensionInference::GetDynamicSize(
HloInstruction* inst, const ShapeIndex& index, int64_t dim) const {
auto iter = dynamic_mapping_.find(DynamicDimension{inst, index, dim});
if (iter != dynamic_mapping_.end()) {
return iter->second;
}
return nullptr;
}
const HloInstruction* DynamicDimensionInference::GetDynamicSize(
const HloInstruction* inst, const ShapeIndex& index, int64_t dim) const {
return GetDynamicSize(const_cast<HloInstruction*>(inst), index, dim);
}
std::vector<HloInstruction*> DynamicDimensionInference::GetDynamicSizes(
HloInstruction* inst, const ShapeIndex& index) const {
CHECK(ShapeUtil::IndexIsValid(inst->shape(), index));
const int64_t rank = ShapeUtil::GetSubshape(inst->shape(), index).rank();
std::vector<HloInstruction*> result(rank, nullptr);
for (int64_t i = 0; i < rank; ++i) {
result[i] = GetDynamicSize(inst, index, i);
}
return result;
}
bool DynamicDimensionInference::CanInfer(HloInstruction* hlo) {
if (hlo->shape().is_static() && hlo->called_computations().empty() &&
hlo->opcode() != HloOpcode::kCustomCall) {
return false;
}
bool ok = true;
for (int64_t operand_index = 0; operand_index < hlo->operand_count();
++operand_index) {
ShapeUtil::ForEachSubshape(
hlo->operand(operand_index)->shape(),
[&](const Shape& subshape, const ShapeIndex& shape_index) {
if (!subshape.IsArray()) {
return;
}
for (int64_t dimension = 0; dimension < subshape.rank();
++dimension) {
bool shape_is_dynamic = subshape.is_dynamic_dimension(dimension);
bool dynamic_size_recorded =
GetDynamicSize(hlo->operand(operand_index), shape_index,
dimension) != nullptr;
if (shape_is_dynamic && !dynamic_size_recorded) {
VLOG(2) << "cannot infer " << hlo->ToShortString()
<< " because operand " << operand_index << " ("
<< hlo->operand(operand_index)->ToShortString() << ")"
<< " subshape " << shape_index.ToString()
<< " is missing dynamic size for dimension " << dimension;
ok = false;
}
CHECK(hlo->operand(operand_index)->opcode() ==
HloOpcode::kSetDimensionSize ||
hlo->operand(operand_index)->opcode() ==
HloOpcode::kCustomCall ||
!shape_is_dynamic || !dynamic_size_recorded);
}
});
}
return ok;
}
} | #include "xla/service/dynamic_dimension_inference.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/service/hlo_runner.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test_benchmark.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
class DynamicDimensionInferenceTest : public HloTestBase {
protected:
DynamicDimensionInferenceTest() : HloTestBase() {
module_ = CreateNewVerifiedModule();
}
absl::Status RunInference(
OpSupportsDynamismHandler op_supports_dynamism_handler = nullptr,
DynamicDimensionInference::CustomCallInferenceHandler handler = nullptr,
DynamicDimensionInference::ShapeCheckMode shape_check_mode =
DynamicDimensionInference::ShapeCheckMode::kIgnore,
const DynamicDimensionInference::AssertionGenerator& assertion_generator =
nullptr) {
TF_ASSIGN_OR_RETURN(DynamicDimensionInference inference,
DynamicDimensionInference::Run(
module_.get(), op_supports_dynamism_handler,
handler, shape_check_mode, assertion_generator));
inference_ = std::make_unique<DynamicDimensionInference>(inference);
return absl::OkStatus();
}
HloComputation* GetAdd() {
auto embedded_builder = HloComputation::Builder("add");
auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "lhs"));
auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "rhs"));
embedded_builder.AddInstruction(
HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));
return module_->AddEmbeddedComputation(embedded_builder.Build());
}
HloComputation* GetAddTuple() {
auto embedded_builder = HloComputation::Builder("add");
auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "lhs"));
auto lhs_1 =
embedded_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "lhs.1"));
auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
2, ShapeUtil::MakeShape(F32, {}), "rhs"));
auto rhs_1 =
embedded_builder.AddInstruction(HloInstruction::CreateParameter(
3, ShapeUtil::MakeShape(F32, {}), "rhs.1"));
auto add = embedded_builder.AddInstruction(
HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));
auto add_1 = embedded_builder.AddInstruction(HloInstruction::CreateBinary(
lhs->shape(), HloOpcode::kAdd, lhs_1, rhs_1));
embedded_builder.AddInstruction(HloInstruction::CreateTuple({add, add_1}));
return module_->AddEmbeddedComputation(embedded_builder.Build());
}
HloComputation* GetGe() {
auto embedded_builder = HloComputation::Builder("ge");
auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "lhs"));
auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "rhs"));
embedded_builder.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), lhs, rhs, ComparisonDirection::kGe));
return module_->AddEmbeddedComputation(embedded_builder.Build());
}
std::unique_ptr<HloModule> module_;
std::unique_ptr<DynamicDimensionInference> inference_;
const Shape scalar_shape_ = ShapeUtil::MakeShape(S32, {});
};
TEST_F(DynamicDimensionInferenceTest, ParamTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, true, false});
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "param"));
auto param2 = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param"));
auto result = builder.AddInstruction(
HloInstruction::CreateSetDimensionSize(dynamic_shape, param, param2, 1));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(result, {}, 1), param2);
EXPECT_EQ(inference_->GetDynamicSize(param, {}, 0), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(param2, {}, 0), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, ElementwiseTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, true, false});
auto data_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "data_param"));
auto size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
auto dynamic_param =
builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param, size_param, 1));
auto* negate = builder.AddInstruction(HloInstruction::CreateUnary(
dynamic_shape, HloOpcode::kNegate, dynamic_param));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(negate, {}, 1), size_param);
}
TEST_F(DynamicDimensionInferenceTest, ReduceTestI) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});
auto reduce_shape = ShapeUtil::MakeShape(F32, {2}, {true});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, true, false});
auto data_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "data_param"));
auto size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
auto dynamic_param =
builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param, size_param, 1));
auto negate = builder.AddInstruction(HloInstruction::CreateUnary(
dynamic_shape, HloOpcode::kNegate, dynamic_param));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto reduce = builder.AddInstruction(HloInstruction::CreateReduce(
reduce_shape, negate, init, {0, 2}, GetAdd()));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(reduce, {}, 0), size_param);
}
TEST_F(DynamicDimensionInferenceTest, ReduceTestII) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});
auto reduce_shape = ShapeUtil::MakeShape(F32, {1, 2}, {false, true});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, false, true});
auto data_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "data_param"));
auto size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
auto dynamic_param =
builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param, size_param, 2));
auto negate = builder.AddInstruction(HloInstruction::CreateUnary(
dynamic_shape, HloOpcode::kNegate, dynamic_param));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto reduce = builder.AddInstruction(
HloInstruction::CreateReduce(reduce_shape, negate, init, {1}, GetAdd()));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(reduce, {}, 1), size_param);
EXPECT_EQ(inference_->GetDynamicSize(reduce, {}, 0), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, VariadicReduce) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});
auto reduce_shape = ShapeUtil::MakeShape(F32, {1, 2}, {false, true});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, false, true});
auto data_param_1 = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "data_param"));
auto data_param_2 = builder.AddInstruction(
HloInstruction::CreateParameter(1, input_shape, "data_param.2"));
auto size_param = builder.AddInstruction(
HloInstruction::CreateParameter(2, scalar_shape_, "size_param"));
auto data_param_dynamic_1 =
builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param_1, size_param, 2));
auto data_param_dynamic_2 =
builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param_2, size_param, 2));
auto dynamic_negate_1 = builder.AddInstruction(HloInstruction::CreateUnary(
dynamic_shape, HloOpcode::kNegate, data_param_dynamic_1));
auto dynamic_negate_2 = builder.AddInstruction(HloInstruction::CreateUnary(
dynamic_shape, HloOpcode::kNegate, data_param_dynamic_2));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto reduce = builder.AddInstruction(HloInstruction::CreateReduce(
ShapeUtil::MakeTupleShape({reduce_shape, reduce_shape}),
{dynamic_negate_1, dynamic_negate_2}, {init, init}, {1}, GetAddTuple()));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(reduce, {0}, 1), size_param);
EXPECT_EQ(inference_->GetDynamicSize(reduce, {1}, 1), size_param);
EXPECT_EQ(inference_->GetDynamicSize(reduce, {0}, 0), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(reduce, {1}, 0), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, DotTest) {
auto builder = HloComputation::Builder(TestName());
constexpr int xdim = 3;
constexpr int ydim = 2;
constexpr int zdim = 1;
auto xy_shape = ShapeUtil::MakeShape(F32, {xdim, ydim});
auto yz_shape = ShapeUtil::MakeShape(F32, {ydim, zdim});
auto xy_dynamic_shape = ShapeUtil::MakeShape(F32, {xdim, ydim}, {true, true});
auto yz_dynamic_shape =
ShapeUtil::MakeShape(F32, {ydim, zdim}, {true, false});
auto xz_dynamic_shape =
ShapeUtil::MakeShape(F32, {xdim, zdim}, {true, false});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, xy_shape, "A"));
auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, yz_shape, "B"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, xy_shape.dimensions(), {true, false}), a_param,
size_param, 0));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
xy_dynamic_shape, a_param, size_param, 1));
b_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
yz_dynamic_shape, b_param, size_param, 0));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(
HloInstruction::CreateDot(xz_dynamic_shape, a_param, b_param, dot_dnums,
HloTestBase::DefaultPrecisionConfig(2)));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 0), size_param);
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 1), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, DotTestBatch) {
auto builder = HloComputation::Builder(TestName());
auto lhs_shape = ShapeUtil::MakeShape(F32, {4, 128, 2, 8});
auto rhs_shape = ShapeUtil::MakeShape(F32, {4, 128, 2, 8});
auto output_shape =
ShapeUtil::MakeShape(F32, {4, 2, 128, 128}, {true, false, false, false});
auto lhs_shape_dynamic =
ShapeUtil::MakeShape(F32, {4, 128, 2, 8}, {true, false, false, false});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, lhs_shape, "A"));
auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, rhs_shape, "B"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
lhs_shape_dynamic, a_param, size_param, 0));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(3);
dot_dnums.add_rhs_contracting_dimensions(3);
dot_dnums.add_lhs_batch_dimensions(0);
dot_dnums.add_lhs_batch_dimensions(2);
dot_dnums.add_rhs_batch_dimensions(0);
dot_dnums.add_rhs_batch_dimensions(2);
auto dot = builder.AddInstruction(
HloInstruction::CreateDot(output_shape, a_param, b_param, dot_dnums,
HloTestBase::DefaultPrecisionConfig(2)));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 0), size_param);
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 1), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 2), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 3), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, DotTestMultiContracting) {
auto builder = HloComputation::Builder(TestName());
auto lhs_shape = ShapeUtil::MakeShape(F32, {2, 2, 8, 64});
auto rhs_shape = ShapeUtil::MakeShape(F32, {2, 2, 512});
auto output_shape = ShapeUtil::MakeShape(F32, {8, 64, 512});
auto lhs_shape_dynamic =
ShapeUtil::MakeShape(F32, {2, 2, 8, 64}, {true, true, false, false});
auto rhs_shape_dynamic =
ShapeUtil::MakeShape(F32, {2, 2, 512}, {true, true, false});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, lhs_shape, "A"));
auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, rhs_shape, "B"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, lhs_shape.dimensions(),
{true, false, false, false}),
a_param, size_param, 0));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
lhs_shape_dynamic, a_param, size_param, 1));
b_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, rhs_shape.dimensions(), {true, false, false}),
b_param, size_param, 0));
b_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
rhs_shape_dynamic, b_param, size_param, 1));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(0);
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
dot_dnums.add_rhs_contracting_dimensions(1);
auto dot = builder.AddInstruction(
HloInstruction::CreateDot(output_shape, a_param, b_param, dot_dnums,
HloTestBase::DefaultPrecisionConfig(2)));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 0), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 1), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 2), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, ConvolutionTest) {
auto builder = HloComputation::Builder(TestName());
constexpr int xdim = 3;
constexpr int ydim = 2;
constexpr int zdim = 1;
auto xy_shape = ShapeUtil::MakeShape(F32, {xdim, ydim});
auto yz_shape = ShapeUtil::MakeShape(F32, {ydim, zdim});
auto xy_shape_dynamic = ShapeUtil::MakeShape(F32, {xdim, ydim}, {true, true});
auto zx_shape_dynamic =
ShapeUtil::MakeShape(F32, {zdim, xdim}, {false, true});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, xy_shape, "A"));
auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, yz_shape, "B"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, xy_shape.dimensions(), {true, false}), a_param,
size_param, 0));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
xy_shape_dynamic, a_param, size_param, 1));
auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers(0);
dnums.set_kernel_input_feature_dimension(0);
dnums.set_kernel_output_feature_dimension(1);
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(1);
dnums.set_output_feature_dimension(0);
Window window;
auto* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
zx_shape_dynamic, a_param, b_param, 1,
1, window, dnums,
HloTestBase::DefaultPrecisionConfig(2)));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(conv, {}, 1), size_param);
EXPECT_EQ(inference_->GetDynamicSize(conv, {}, 0), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, TransposeTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 3});
auto output_shape = ShapeUtil::MakeShape(F32, {3, 2, 1}, {true, true, true});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, true, true});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param_1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
auto* size_param_2 = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
auto* size_param_3 = builder.AddInstruction(HloInstruction::CreateParameter(
3, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, false, false}), a_param,
size_param_1, 0));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, true, false}), a_param,
size_param_2, 1));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param_3, 2));
auto* transpose = builder.AddInstruction(
HloInstruction::CreateTranspose(output_shape, a_param, {2, 1, 0}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 0), size_param_3);
EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 1), size_param_2);
EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 2), size_param_1);
}
TEST_F(DynamicDimensionInferenceTest, NonDescendingTransposeTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 3});
auto output_shape = ShapeUtil::MakeShape(F32, {3, 1, 2}, {true, true, true});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, true, true});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param_1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
auto* size_param_2 = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
auto* size_param_3 = builder.AddInstruction(HloInstruction::CreateParameter(
3, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, false, false}), a_param,
size_param_1, 0));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, true, false}), a_param,
size_param_2, 1));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param_3, 2));
auto* transpose = builder.AddInstruction(
HloInstruction::CreateTranspose(output_shape, a_param, {2, 0, 1}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 0), size_param_3);
EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 1), size_param_1);
EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 2), size_param_2);
}
TEST_F(DynamicDimensionInferenceTest, ReshapeTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {2, 3, 4, 5, 6});
auto output_shape = ShapeUtil::MakeShape(
F32, {6, 4, 1, 5, 2, 3}, {false, true, false, true, false, false});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {2, 3, 4, 5, 6},
{false, false, true, true, false});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {2, 3, 4, 5, 6},
{false, false, true, false, false}),
a_param, size_param, 2));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param, 3));
auto* reshape = builder.AddInstruction(
HloInstruction::CreateReshape(output_shape, a_param));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 0), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 1), size_param);
EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 2), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 3), size_param);
EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 4), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 5), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, ReshapeInferredDimensionTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {4, 5});
auto output_shape =
ShapeUtil::MakeShape(F32, {1, 4, 5}, {true, false, false});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {4, 5}, {true, false});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param, 0));
auto* reshape = builder.AddInstruction(HloInstruction::CreateReshape(
output_shape, a_param, 0));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_NE(inference_->GetDynamicSize(reshape, {}, 0), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, ReshapeTestMajorDimension) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {32, 10, 4});
auto output_shape = ShapeUtil::MakeShape(F32, {320, 4}, {true, false});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {32, 10, 4}, {true, false, false});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param, 0));
auto* reshape = builder.AddInstruction(
HloInstruction::CreateReshape(output_shape, a_param));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
absl::Status status = RunInference();
EXPECT_NE(inference_->GetDynamicSize(reshape, {}, 0), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, ReshapeIntoScalar) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1});
auto output_shape = ShapeUtil::MakeShape(F32, {});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {1}, {true});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param, 0));
builder.AddInstruction(HloInstruction::CreateReshape(output_shape, a_param));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_CHECK_OK(RunInference());
}
TEST_F(DynamicDimensionInferenceTest, GatherTest) {
const std::string hlo_text = R"(
HloModule TensorFlowGatherV2
ENTRY main {
operand = s32[20,10]{1,0} parameter(0)
indices = s32[32,20] parameter(1)
dynamic_size = s32[] parameter(2)
indices_dynamic = s32[<=32,20] set-dimension-size(indices, dynamic_size), dimensions={0}
ROOT gather = s32[<=32,20,10]{2,1,0} gather(%operand, %indices_dynamic),
offset_dims={2},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,10}
}
)";
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_text));
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(
module_->entry_computation()->root_instruction(), {}, 0),
module_->entry_computation()->parameter_instruction(2));
}
TEST_F(DynamicDimensionInferenceTest, BroadcastTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {2});
auto output_shape =
ShapeUtil::MakeShape(F32, {3, 2, 4}, {false, true, false});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {2}, {true});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param, 0));
auto* broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(output_shape, a_param, {1}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(broadcast, {}, 0), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(broadcast, {}, 1), size_param);
EXPECT_EQ(inference_->GetDynamicSize(broadcast, {}, 2), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, WhileTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {2, 4, 4}, {true, false, false});
auto tuple_shape = ShapeUtil::MakeTupleShape({input_shape, input_shape});
auto dynamic_tuple_shape =
ShapeUtil::MakeTupleShape({dynamic_shape, dynamic_shape});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, dynamic_tuple_shape, "param"));
auto gte_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(dynamic_shape, body_param, 0));
auto gte_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(dynamic_shape, body_param, 1));
auto add = body_builder.AddInstruction(HloInstruction::CreateBinary(
dynamic_shape, HloOpcode::kAdd, gte_0, gte_1));
body_builder.AddInstruction(HloInstruction::CreateTuple({add, add}));
HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, dynamic_tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, tuple_shape, "A"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
auto* a_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(input_shape, a_param, 0));
a_0 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_0, size_param, 0));
auto* a_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(input_shape, a_param, 0));
a_1 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_1, size_param, 0));
a_param = builder.AddInstruction(HloInstruction::CreateTuple({a_0, a_1}));
builder.AddInstruction(HloInstruction::CreateWhile(dynamic_tuple_shape,
condition, body, a_param));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunInference());
HloInstruction* while_hlo = nullptr;
for (HloInstruction* inst : module_->entry_computation()->instructions()) {
if (inst->opcode() == HloOpcode::kWhile) {
while_hlo = inst;
}
}
ASSERT_NE(while_hlo, nullptr);
EXPECT_EQ(while_hlo->shape().tuple_shapes_size(), 4);
HloInstruction* add_inst = nullptr;
for (HloInstruction* inst : while_hlo->while_body()->instructions()) {
if (inst->opcode() == HloOpcode::kAdd) {
add_inst = inst;
}
}
EXPECT_NE(add_inst, nullptr);
EXPECT_NE(inference_->GetDynamicSize(add_inst, {}, 0), nullptr);
EXPECT_NE(inference_->GetDynamicSize(
module_->entry_computation()->root_instruction(), {0}, 0),
nullptr);
EXPECT_NE(inference_->GetDynamicSize(
module_->entry_computation()->root_instruction(), {1}, 0),
nullptr);
}
TEST_F(DynamicDimensionInferenceTest, ConditionalInputTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {2, 4, 4}, {true, false, false});
auto output_shape = ShapeUtil::MakeShape(F32, {2, 2, 2});
auto tuple_shape_1 = ShapeUtil::MakeTupleShape({input_shape});
auto tuple_shape_2 = ShapeUtil::MakeTupleShape({input_shape, input_shape});
auto tuple_shape_3 =
ShapeUtil::MakeTupleShape({input_shape, input_shape, input_shape});
auto tuple_shape_2_dynamic =
ShapeUtil::MakeTupleShape({dynamic_shape, dynamic_shape});
auto tuple_shape_3_dynamic =
ShapeUtil::MakeTupleShape({input_shape, dynamic_shape, dynamic_shape});
auto true_builder = HloComputation::Builder("true");
{
auto true_param = true_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape_2_dynamic, "param"));
auto gte_0 = true_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(dynamic_shape, true_param, 0));
auto gte_1 = true_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(dynamic_shape, true_param, 1));
auto add = true_builder.AddInstruction(HloInstruction::CreateBinary(
dynamic_shape, HloOpcode::kAdd, gte_0, gte_1));
true_builder.AddInstruction(HloInstruction::CreateTuple({add}));
}
HloComputation* true_branch =
module_->AddEmbeddedComputation(true_builder.Build());
auto false_builder = HloComputation::Builder("false");
{
auto false_param = false_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape_3_dynamic, "param"));
auto gte_0 = false_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(dynamic_shape, false_param, 1));
auto gte_1 = false_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(dynamic_shape, false_param, 2));
auto add = false_builder.AddInstruction(HloInstruction::CreateBinary(
dynamic_shape, HloOpcode::kAdd, gte_0, gte_1));
false_builder.AddInstruction(HloInstruction::CreateTuple({add}));
}
HloComputation* false_branch =
module_->AddEmbeddedComputation(false_builder.Build());
auto* pred_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeScalarShape(PRED), "pred"));
auto* tuple_2_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, tuple_shape_2, "tuple_2_param"));
auto* tuple_3_param = builder.AddInstruction(HloInstruction::CreateParameter(
2, tuple_shape_3, "tuple_3_param"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
3, scalar_shape_, "size_param"));
auto* param_2_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(input_shape, tuple_2_param, 0));
param_2_0 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, param_2_0, size_param, 0));
auto* param_2_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(input_shape, tuple_2_param, 1));
param_2_1 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, param_2_1, size_param, 0));
tuple_2_param = builder.AddInstruction(
HloInstruction::CreateTuple({param_2_0, param_2_1}));
auto* param_3_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(input_shape, tuple_3_param, 0));
auto* param_3_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(input_shape, tuple_3_param, 1));
param_3_1 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, param_3_1, size_param, 0));
auto* param_3_2 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(input_shape, tuple_3_param, 2));
param_3_2 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, param_3_1, size_param, 0));
tuple_3_param = builder.AddInstruction(
HloInstruction::CreateTuple({param_3_0, param_3_1, param_3_2}));
builder.AddInstruction(HloInstruction::CreateConditional(
tuple_shape_1, pred_param, tuple_2_param, true_branch, tuple_3_param,
false_branch));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunInference());
HloInstruction* conditional_hlo = nullptr;
for (HloInstruction* inst : module_->entry_computation()->instructions()) {
if (inst->opcode() == HloOpcode::kConditional) {
conditional_hlo = inst;
}
}
ASSERT_NE(conditional_hlo, nullptr);
EXPECT_EQ(conditional_hlo->shape().tuple_shapes_size(), 2);
HloInstruction* add_true_branch = nullptr;
for (HloInstruction* inst :
conditional_hlo->true_computation()->instructions()) {
if (inst->opcode() == HloOpcode::kAdd) {
add_true_branch = inst;
}
}
EXPECT_NE(add_true_branch, nullptr);
EXPECT_NE(inference_->GetDynamicSize(add_true_branch, {}, 0), nullptr);
HloInstruction* add_false_branch = nullptr;
for (HloInstruction* inst :
conditional_hlo->false_computation()->instructions()) {
if (inst->opcode() == HloOpcode::kAdd) {
add_false_branch = inst;
}
}
EXPECT_NE(add_false_branch, nullptr);
EXPECT_NE(inference_->GetDynamicSize(add_false_branch, {}, 0), nullptr);
EXPECT_NE(inference_->GetDynamicSize(conditional_hlo, {0}, 0), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, ReduceWindowBatchTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4});
auto output_shape =
ShapeUtil::MakeShape(F32, {2, 2, 2}, {true, false, false});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {2, 4, 4}, {true, false, false});
Window window;
WindowDimension* batch_dim = window.add_dimensions();
batch_dim->set_size(1);
batch_dim->set_stride(1);
batch_dim->set_padding_low(0);
batch_dim->set_padding_high(0);
batch_dim->set_window_dilation(1);
batch_dim->set_base_dilation(1);
for (int64_t i = 0; i < 2; ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_size(2);
dim->set_stride(2);
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_window_dilation(1);
dim->set_base_dilation(1);
}
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param, 0));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto* reduce_window =
builder.AddInstruction(HloInstruction::CreateReduceWindow(
output_shape, a_param, init, window, GetAdd()));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(reduce_window, {}, 0), size_param);
}
TEST_F(DynamicDimensionInferenceTest, SelectAndScatterTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4});
auto source_shape = ShapeUtil::MakeShape(F32, {2, 2, 2});
auto input_shape_dynamic =
ShapeUtil::MakeShape(F32, {2, 4, 4}, {true, false, false});
auto source_shape_dynamic =
ShapeUtil::MakeShape(F32, {2, 2, 2}, {true, false, false});
Window window;
WindowDimension* batch_dim = window.add_dimensions();
batch_dim->set_size(1);
batch_dim->set_stride(1);
batch_dim->set_padding_low(0);
batch_dim->set_padding_high(0);
batch_dim->set_window_dilation(1);
batch_dim->set_base_dilation(1);
for (int64_t i = 0; i < 2; ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_size(2);
dim->set_stride(2);
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_window_dilation(1);
dim->set_base_dilation(1);
}
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, scalar_shape_, "size_param"));
auto* source = builder.AddInstruction(HloInstruction::CreateParameter(
2, source_shape, "B"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
input_shape_dynamic, a_param, size_param, 0));
source = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
source_shape_dynamic, source, size_param, 0));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto* sns = builder.AddInstruction(HloInstruction::CreateSelectAndScatter(
input_shape_dynamic, a_param, GetGe(), window, source, init, GetAdd()));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(sns, {}, 0), size_param);
}
TEST_F(DynamicDimensionInferenceTest, ConcatTest) {
auto builder = HloComputation::Builder(TestName());
auto data_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {5, 7}), "data_param_1"));
auto data_param_2 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {5, 8}), "data_param_2"));
auto size_param = builder.AddInstruction(
HloInstruction::CreateParameter(2, scalar_shape_, "size_param"));
data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {5, 7}, {true, false}), data_param, size_param,
0));
data_param_2 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {5, 8}, {true, false}), data_param_2,
size_param, 0));
auto* concat = builder.AddInstruction(HloInstruction::CreateConcatenate(
ShapeUtil::MakeShape(F32, {5, 15}, {true, false}),
{data_param, data_param_2}, 1));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(concat, {}, 0), size_param);
}
TEST_F(DynamicDimensionInferenceTest, SliceTest) {
auto builder = HloComputation::Builder(TestName());
auto dynamic_shape = ShapeUtil::MakeShape(F32, {5, 7}, {false, true});
auto data_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {5, 7}), "data_param"));
auto size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param, size_param, 1));
auto* slice = builder.AddInstruction(HloInstruction::CreateSlice(
dynamic_shape, data_param,
{0, 0},
{5, 7}, {1, 1}));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(slice, {}, 1), size_param);
}
TEST_F(DynamicDimensionInferenceTest, DynamicSliceTest) {
auto builder = HloComputation::Builder(TestName());
auto data_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {5, 7}), "data_param"));
auto size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
std::vector<HloInstruction*> params;
for (int i = 0; i < 2; ++i) {
params.push_back(builder.AddInstruction(HloInstruction::CreateParameter(
i + 2, ShapeUtil::MakeShape(S32, {}), "slice_indices")));
}
data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {5, 7}, {true, false}), data_param, size_param,
0));
auto* slice = builder.AddInstruction(HloInstruction::CreateDynamicSlice(
ShapeUtil::MakeShape(F32, {5, 1}, {true, false}), data_param, params,
{5, 1}));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(slice, {}, 0), size_param);
}
TEST_F(DynamicDimensionInferenceTest, SortTest) {
auto builder = HloComputation::Builder(TestName());
auto dynamic_shape = ShapeUtil::MakeShape(F32, {5, 7}, {true, false});
auto data_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {5, 7}), "data_param"));
auto size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
auto compare_builder = HloComputation::Builder("condition");
compare_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "param1"));
compare_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "param2"));
compare_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* compare =
module_->AddEmbeddedComputation(compare_builder.Build());
data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param, size_param, 0));
auto* sort = builder.AddInstruction(
HloInstruction::CreateSort(dynamic_shape, 1, {data_param}, compare,
false));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(sort, {}, 0), size_param);
}
TEST_F(DynamicDimensionInferenceTest, MultiValueSortTest) {
auto builder = HloComputation::Builder(TestName());
auto shape = ShapeUtil::MakeShape(F32, {5, 7});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {5, 7}, {true, false});
auto data_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "data_param"));
auto size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
auto compare_builder = HloComputation::Builder("condition");
compare_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "param1"));
compare_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "param2"));
compare_builder.AddInstruction(HloInstruction::CreateParameter(
2, ShapeUtil::MakeShape(F32, {}), "param3"));
compare_builder.AddInstruction(HloInstruction::CreateParameter(
3, ShapeUtil::MakeShape(F32, {}), "param4"));
compare_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* compare =
module_->AddEmbeddedComputation(compare_builder.Build());
data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param, size_param, 0));
auto* sort = builder.AddInstruction(HloInstruction::CreateSort(
ShapeUtil::MakeTupleShape({dynamic_shape, dynamic_shape}), 1,
{data_param, data_param}, compare,
false));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(sort, {0}, 0), size_param);
EXPECT_EQ(inference_->GetDynamicSize(sort, {1}, 0), size_param);
}
TEST_F(DynamicDimensionInferenceTest, DynamicSliceSingleElementTest) {
auto builder = HloComputation::Builder(TestName());
auto data_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {5, 7}), "data_param"));
auto* size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
std::vector<HloInstruction*> params;
for (int i = 0; i < 2; ++i) {
params.push_back(builder.AddInstruction(HloInstruction::CreateParameter(
i + 2, ShapeUtil::MakeShape(S32, {}), "slice_indices")));
}
data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {5, 7}, {true, false}), data_param, size_param,
0));
auto* slice = builder.AddInstruction(HloInstruction::CreateDynamicSlice(
ShapeUtil::MakeShape(F32, {1, 1}), data_param, params,
{1, 1}));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(slice, {}, 0), nullptr);
}
TEST_F(DynamicDimensionInferenceTest, InfersCustomOp) {
auto builder = HloComputation::Builder(TestName());
auto data_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {5, 7}), "data_param"));
auto* size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {5, 7}, {true, false}), data_param, size_param,
0));
builder.AddInstruction(HloInstruction::CreateCustomCall(
ShapeUtil::MakeShape(F32, {1, 1}), {data_param}, "MyCustomOp", ""));
module_->AddEntryComputation(builder.Build());
bool handler_called = false;
auto handler = [&](HloInstruction* hlo,
DynamicDimensionInference* inference) {
CHECK(inference != nullptr);
CHECK(Cast<HloCustomCallInstruction>(hlo) != nullptr);
handler_called = true;
return absl::OkStatus();
};
TF_ASSERT_OK(RunInference(nullptr, handler));
EXPECT_TRUE(handler_called);
}
TEST_F(DynamicDimensionInferenceTest, DynamicReshapeOp) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {9}), "data_input"));
auto six = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(6)));
auto dynamic_input =
builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {9}, {true}), input, six, 0));
auto dynamic_size = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(S32, {}), "size_param"));
auto three = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(3)));
auto dynamic_reshape =
builder.AddInstruction(HloInstruction::CreateDynamicReshape(
ShapeUtil::MakeShape(F32, {3, 3}, {false, true}), dynamic_input,
{three, dynamic_size}));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(dynamic_reshape, {}, 0), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(dynamic_reshape, {}, 1), dynamic_size);
}
TEST_F(DynamicDimensionInferenceTest, ReshapeOpWithMultipleDynamicDimensions) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {9, 2}), "data_input"));
auto six = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(6)));
input = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {9, 2}, {true, false}), input, six, 0));
auto one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
input = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
ShapeUtil::MakeShape(F32, {9, 2}, {true, true}), input, one, 1));
auto dynamic_reshape = builder.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {9, 1, 2}, {true, false, true}), input));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunInference());
EXPECT_EQ(inference_->GetDynamicSize(dynamic_reshape, {}, 0), six);
EXPECT_EQ(inference_->GetDynamicSize(dynamic_reshape, {}, 1), nullptr);
EXPECT_EQ(inference_->GetDynamicSize(dynamic_reshape, {}, 2), one);
}
TEST_F(DynamicDimensionInferenceTest, HandleMapInDynamicDimensionInference) {
const char* module_str = R"(
HloModule test_module
%scatter-combiner.285 (p0.286: c128[], p1.287: c128[]) -> c128[] {
%p0.286 = c128[] parameter(0)
%p1.287 = c128[] parameter(1)
ROOT %add.288 = c128[] add(c128[] %p0.286, c128[] %p1.287)
}
%while_body {
%reshape.8 = s32[] parameter(4)
%reshape.7 = c128[1]{0} parameter(3)
%reduce = pred[] parameter(2)
%concatenate = s32[1]{0} parameter(1)
%slice.4 = s32[1]{0} slice(s32[1]{0} %concatenate), slice={[0 : 1]}
%broadcast.7 = pred[1]{0} broadcast(pred[] %reduce), dimensions={}
%param.1 = (s32[],c128[<=1]{0},s32[1]{0},c128[1]{0}) parameter(0)
%get-tuple-element.2 = c128[<=1]{0} get-tuple-element((s32[],c128[<=1]{0},s32[1]{0},c128[1]{0}) %param.1), index=1
%dynamic-slice.2 = c128[1]{0} dynamic-slice(c128[<=1]{0} %get-tuple-element.2,s32[] %reshape.8), dynamic_slice_sizes={1}
%map = c128[1]{0} map(c128[1]{0} %dynamic-slice.2,c128[1]{0} %reshape.7), dimensions={0}, to_apply=%scatter-combiner.285
%select = c128[1]{0} select(pred[1]{0} %broadcast.7,c128[1]{0} %map,c128[1]{0} %dynamic-slice.2)
%reshape.9 = s32[] reshape(s32[1]{0} %slice.4)
%dynamic-update-slice = c128[<=1]{0} dynamic-update-slice(c128[<=1]{0} %get-tuple-element.2,c128[1]{0} %select,s32[] %reshape.9)
})";
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnUnverifiedModule(module_str));
TF_ASSERT_OK(RunInference());
}
TEST_F(DynamicDimensionInferenceTest, RuntimeShapeCheck) {
const char* hlo = R"(
HloModule module
ENTRY computation {
a = f32[20,20] parameter(0)
a_size_1 = s32[] parameter(1)
a_size_2 = s32[] parameter(2)
a_dynamic_1 = f32[<=20,20] set-dimension-size(a, a_size_1), dimensions={0}
a_dynamic_2 = f32[<=20,<=20] set-dimension-size(a_dynamic_1, a_size_2), dimensions={1}
b = f32[20,20] parameter(3)
b_size_1 = s32[] parameter(4)
b_size_2 = s32[] parameter(5)
b_dynamic_1 = f32[<=20,20] set-dimension-size(b, b_size_1), dimensions={0}
b_dynamic_2 = f32[<=20,<=20] set-dimension-size(b_dynamic_1, b_size_2), dimensions={1}
ROOT f = add(a_dynamic_2, b_dynamic_2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo));
TF_ASSERT_OK(RunInference(
nullptr,
nullptr, DynamicDimensionInference::ShapeCheckMode::kRuntime,
[&](HloInstruction* constraint) {
constraint->parent()->AddInstruction(HloInstruction::CreateCustomCall(
ShapeUtil::MakeTokenShape(), {constraint},
"__xla__assert",
std::string{}, API_VERSION_STATUS_RETURNING));
}));
absl::StatusOr<bool> filecheck_result = RunFileCheck(module_->ToString({}),
R"(
)");
TF_ASSERT_OK(filecheck_result.status());
EXPECT_TRUE(*filecheck_result);
}
TEST_F(DynamicDimensionInferenceTest, NestedControlFlow) {
const char* hlo = R"(
HloModule tfcompile.377, entry_computation_layout={(s32[], f32[250]{0}, pred[], pred[], s32[], pred[], s32[], pred[])->(f32[3]{0})}
cond_2_Sum-reduction.17 {
x.18 = f32[] parameter(0)
y.19 = f32[] parameter(1)
ROOT add.20 = f32[] add(x.18, y.19)
}
cond_2_cond_true_214__.21 {
arg_tuple.22 = () parameter(0)
constant.23 = s32[] constant(1)
reshape.24 = s32[] reshape(constant.23)
ROOT tuple.25 = (s32[]) tuple(constant.23)
}
cond_2_cond_false_215__.26 {
arg_tuple.27 = () parameter(0)
constant.28 = s32[] constant(0)
reshape.29 = s32[] reshape(constant.28)
ROOT tuple.30 = (s32[]) tuple(constant.28)
}
cond_2_true_195__.31 {
arg_tuple.32 = (s32[], f32[250]{0}) parameter(0)
get-tuple-element.33 = s32[] get-tuple-element(arg_tuple.32), index=0
constant.35 = s32[] constant(20)
minimum.36 = s32[] minimum(get-tuple-element.33, constant.35)
reshape.37 = s32[1]{0} reshape(minimum.36)
concatenate.38 = s32[1]{0} concatenate(reshape.37), dimensions={0}
slice.48 = s32[1]{0} slice(concatenate.38), slice={[0:1]}
reshape.49 = s32[] reshape(reshape.37)
constant.43 = s32[] constant(0)
compare.50 = pred[] compare(minimum.36, constant.43), direction=LT
constant.44 = s32[] constant(250)
add.51 = s32[] add(constant.44, minimum.36)
select.52 = s32[] select(compare.50, add.51, minimum.36)
constant.45 = s32[1]{0} constant({0})
slice.46 = s32[1]{0} slice(constant.45), slice={[0:1]}
reshape.47 = s32[] reshape(slice.46)
subtract.53 = s32[] subtract(select.52, reshape.47)
maximum.54 = s32[] maximum(subtract.53, constant.43)
convert.55 = s32[] convert(maximum.54)
get-tuple-element.34 = f32[250]{0} get-tuple-element(arg_tuple.32), index=1
constant.39 = f32[] constant(0)
pad.40 = f32[500]{0} pad(get-tuple-element.34, constant.39), padding=0_250
constant.41 = s32[] constant(500)
set-dimension-size.42 = f32[500]{0} set-dimension-size(pad.40, constant.41), dimensions={0}
dynamic-slice.56 = f32[250]{0} dynamic-slice(set-dimension-size.42, reshape.47), dynamic_slice_sizes={250}
reshape.57 = f32[250]{0} reshape(dynamic-slice.56)
set-dimension-size.58 = f32[<=250]{0} set-dimension-size(dynamic-slice.56, maximum.54), dimensions={0}
constant.59 = f32[] constant(1)
broadcast.60 = f32[250]{0} broadcast(constant.59), dimensions={}
compare.61 = pred[<=250]{0} compare(set-dimension-size.58, broadcast.60), direction=GE
convert.62 = f32[<=250]{0} convert(compare.61)
convert.63 = f32[<=250]{0} convert(convert.62)
constant.64 = f32[] constant(0)
convert.65 = f32[] convert(constant.64)
reduce.66 = f32[] reduce(convert.62, constant.64), dimensions={0}, to_apply=cond_2_Sum-reduction.17
convert.67 = f32[] convert(reduce.66)
reshape.73 = f32[] reshape(reduce.66)
constant.68 = f32[] constant(6)
compare.69 = pred[] compare(reduce.66, constant.68), direction=GE
tuple.70 = () tuple()
conditional.71 = (s32[]) conditional(compare.69, tuple.70, tuple.70), true_computation=cond_2_cond_true_214__.21, false_computation=cond_2_cond_false_215__.26
get-tuple-element.72 = s32[] get-tuple-element(conditional.71), index=0
reshape.74 = s32[] reshape(get-tuple-element.72)
ROOT tuple.75 = (f32[], s32[]) tuple(reduce.66, get-tuple-element.72)
}
cond_2_false_196__.76 {
arg_tuple.77 = (s32[], f32[250]{0}) parameter(0)
constant.80 = f32[] constant(0)
reshape.82 = f32[] reshape(constant.80)
constant.81 = s32[] constant(0)
reshape.83 = s32[] reshape(constant.81)
ROOT tuple.84 = (f32[], s32[]) tuple(constant.80, constant.81)
}
cond_true_10__.85 {
arg_tuple.86 = (pred[], pred[], pred[]) parameter(0)
get-tuple-element.87 = pred[] get-tuple-element(arg_tuple.86), index=0
reshape.90 = pred[] reshape(get-tuple-element.87)
ROOT tuple.91 = (pred[]) tuple(get-tuple-element.87)
}
cond_cond_true_16__.92 {
arg_tuple.93 = (pred[], pred[]) parameter(0)
get-tuple-element.94 = pred[] get-tuple-element(arg_tuple.93), index=0
reshape.96 = pred[] reshape(get-tuple-element.94)
ROOT tuple.97 = (pred[]) tuple(get-tuple-element.94)
}
cond_cond_false_17__.98 {
arg_tuple.99 = (pred[], pred[]) parameter(0)
get-tuple-element.101 = pred[] get-tuple-element(arg_tuple.99), index=1
reshape.102 = pred[] reshape(get-tuple-element.101)
ROOT tuple.103 = (pred[]) tuple(get-tuple-element.101)
}
cond_false_11__.104 {
arg_tuple.105 = (pred[], pred[], pred[]) parameter(0)
get-tuple-element.107 = pred[] get-tuple-element(arg_tuple.105), index=1
get-tuple-element.108 = pred[] get-tuple-element(arg_tuple.105), index=2
tuple.109 = (pred[], pred[]) tuple(get-tuple-element.107, get-tuple-element.108)
conditional.110 = (pred[]) conditional(get-tuple-element.107, tuple.109, tuple.109), true_computation=cond_cond_true_16__.92, false_computation=cond_cond_false_17__.98
get-tuple-element.111 = pred[] get-tuple-element(conditional.110), index=0
reshape.112 = pred[] reshape(get-tuple-element.111)
ROOT tuple.113 = (pred[]) tuple(get-tuple-element.111)
}
cond_1_map_while_cond_true_82__.114 {
arg_tuple.115 = (f32[]) parameter(0)
constant.117 = f32[] constant(0)
reshape.118 = f32[] reshape(constant.117)
ROOT tuple.119 = (f32[]) tuple(constant.117)
}
cond_1_map_while_cond_cond_true_91__.120 {
constant.123 = f32[] constant(0.1)
arg_tuple.121 = (f32[]) parameter(0)
get-tuple-element.122 = f32[] get-tuple-element(arg_tuple.121), index=0
multiply.124 = f32[] multiply(constant.123, get-tuple-element.122)
constant.125 = f32[] constant(0)
add.126 = f32[] add(multiply.124, constant.125)
constant.127 = f32[] constant(0.9)
divide.128 = f32[] divide(add.126, constant.127)
reshape.129 = f32[] reshape(divide.128)
ROOT tuple.130 = (f32[]) tuple(divide.128)
}
cond_1_map_while_cond_cond_cond_true_106__.131 {
constant.134 = f32[] constant(0.8)
arg_tuple.132 = (f32[]) parameter(0)
get-tuple-element.133 = f32[] get-tuple-element(arg_tuple.132), index=0
multiply.135 = f32[] multiply(constant.134, get-tuple-element.133)
constant.136 = f32[] constant(-0.711)
add.137 = f32[] add(multiply.135, constant.136)
constant.138 = f32[] constant(0.09)
divide.139 = f32[] divide(add.137, constant.138)
reshape.140 = f32[] reshape(divide.139)
ROOT tuple.141 = (f32[]) tuple(divide.139)
}
cond_1_map_while_cond_cond_cond_cond_true_121__.142 {
constant.145 = f32[] constant(0.2)
arg_tuple.143 = (f32[]) parameter(0)
get-tuple-element.144 = f32[] get-tuple-element(arg_tuple.143), index=0
multiply.146 = f32[] multiply(constant.145, get-tuple-element.144)
constant.147 = f32[] constant(-0.18)
add.148 = f32[] add(multiply.146, constant.147)
constant.149 = f32[] constant(0.02)
divide.150 = f32[] divide(add.148, constant.149)
reshape.151 = f32[] reshape(divide.150)
ROOT tuple.152 = (f32[]) tuple(divide.150)
}
cond_1_map_while_cond_cond_cond_cond_cond_true_136__.153 {
constant.156 = f32[] constant(0.1)
arg_tuple.154 = (f32[]) parameter(0)
get-tuple-element.155 = f32[] get-tuple-element(arg_tuple.154), index=0
multiply.157 = f32[] multiply(constant.156, get-tuple-element.155)
constant.158 = f32[] constant(108.788)
add.159 = f32[] add(multiply.157, constant.158)
constant.160 = f32[] constant(98.99)
divide.161 = f32[] divide(add.159, constant.160)
reshape.162 = f32[] reshape(divide.161)
ROOT tuple.163 = (f32[]) tuple(divide.161)
}
cond_1_map_while_cond_cond_cond_cond_cond_false_137__.164 {
arg_tuple.165 = (f32[]) parameter(0)
constant.167 = f32[] constant(1.2)
reshape.168 = f32[] reshape(constant.167)
ROOT tuple.169 = (f32[]) tuple(constant.167)
}
cond_1_map_while_cond_cond_cond_cond_false_122__.170 {
arg_tuple.171 = (f32[]) parameter(0)
get-tuple-element.172 = f32[] get-tuple-element(arg_tuple.171), index=0
constant.173 = f32[] constant(100)
compare.174 = pred[] compare(get-tuple-element.172, constant.173), direction=LE
tuple.175 = (f32[]) tuple(get-tuple-element.172)
conditional.176 = (f32[]) conditional(compare.174, tuple.175, tuple.175), true_computation=cond_1_map_while_cond_cond_cond_cond_cond_true_136__.153, false_computation=cond_1_map_while_cond_cond_cond_cond_cond_false_137__.164
get-tuple-element.177 = f32[] get-tuple-element(conditional.176), index=0
reshape.178 = f32[] reshape(get-tuple-element.177)
ROOT tuple.179 = (f32[]) tuple(get-tuple-element.177)
}
cond_1_map_while_cond_cond_cond_false_107__.180 {
arg_tuple.181 = (f32[]) parameter(0)
get-tuple-element.182 = f32[] get-tuple-element(arg_tuple.181), index=0
constant.183 = f32[] constant(1.01)
compare.184 = pred[] compare(get-tuple-element.182, constant.183), direction=LE
tuple.185 = (f32[]) tuple(get-tuple-element.182)
conditional.186 = (f32[]) conditional(compare.184, tuple.185, tuple.185), true_computation=cond_1_map_while_cond_cond_cond_cond_true_121__.142, false_computation=cond_1_map_while_cond_cond_cond_cond_false_122__.170
get-tuple-element.187 = f32[] get-tuple-element(conditional.186), index=0
reshape.188 = f32[] reshape(get-tuple-element.187)
ROOT tuple.189 = (f32[]) tuple(get-tuple-element.187)
}
cond_1_map_while_cond_cond_false_92__.190 {
arg_tuple.191 = (f32[]) parameter(0)
get-tuple-element.192 = f32[] get-tuple-element(arg_tuple.191), index=0
constant.193 = f32[] constant(0.99)
compare.194 = pred[] compare(get-tuple-element.192, constant.193), direction=LE
tuple.195 = (f32[]) tuple(get-tuple-element.192)
conditional.196 = (f32[]) conditional(compare.194, tuple.195, tuple.195), true_computation=cond_1_map_while_cond_cond_cond_true_106__.131, false_computation=cond_1_map_while_cond_cond_cond_false_107__.180
get-tuple-element.197 = f32[] get-tuple-element(conditional.196), index=0
reshape.198 = f32[] reshape(get-tuple-element.197)
ROOT tuple.199 = (f32[]) tuple(get-tuple-element.197)
}
cond_1_map_while_cond_false_83__.200 {
arg_tuple.201 = (f32[]) parameter(0)
get-tuple-element.202 = f32[] get-tuple-element(arg_tuple.201), index=0
constant.203 = f32[] constant(0.9)
compare.204 = pred[] compare(get-tuple-element.202, constant.203), direction=LE
tuple.205 = (f32[]) tuple(get-tuple-element.202)
conditional.206 = (f32[]) conditional(compare.204, tuple.205, tuple.205), true_computation=cond_1_map_while_cond_cond_true_91__.120, false_computation=cond_1_map_while_cond_cond_false_92__.190
get-tuple-element.207 = f32[] get-tuple-element(conditional.206), index=0
reshape.208 = f32[] reshape(get-tuple-element.207)
ROOT tuple.209 = (f32[]) tuple(get-tuple-element.207)
}
cond_1_map_while_body_59__.210 {
arg_tuple.211 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) parameter(0)
get-tuple-element.212 = s32[] get-tuple-element(arg_tuple.211), index=0
constant.218 = s32[] constant(1)
add.219 = s32[] add(get-tuple-element.212, constant.218)
reshape.239 = s32[] reshape(add.219)
get-tuple-element.213 = s32[] get-tuple-element(arg_tuple.211), index=1
reshape.240 = s32[] reshape(get-tuple-element.213)
get-tuple-element.214 = s32[] get-tuple-element(arg_tuple.211), index=2
constant.220 = s32[] constant(1)
add.221 = s32[] add(get-tuple-element.214, constant.220)
reshape.241 = s32[] reshape(add.221)
get-tuple-element.216 = s32[] get-tuple-element(arg_tuple.211), index=4
reshape.242 = s32[] reshape(get-tuple-element.216)
get-tuple-element.215 = (f32[<=250]{0}, s32[]) get-tuple-element(arg_tuple.211), index=3
get-tuple-element.235 = f32[<=250]{0} get-tuple-element(get-tuple-element.215), index=0
get-tuple-element.217 = (f32[<=250]{0}, s32[]) get-tuple-element(arg_tuple.211), index=5
get-tuple-element.223 = f32[<=250]{0} get-tuple-element(get-tuple-element.217), index=0
dynamic-slice.224 = f32[1]{0} dynamic-slice(get-tuple-element.223, get-tuple-element.214), dynamic_slice_sizes={1}
reshape.225 = f32[] reshape(dynamic-slice.224)
constant.226 = f32[] constant(0)
compare.227 = pred[] compare(reshape.225, constant.226), direction=LE
tuple.228 = (f32[]) tuple(reshape.225)
conditional.229 = (f32[]) conditional(compare.227, tuple.228, tuple.228), true_computation=cond_1_map_while_cond_true_82__.114, false_computation=cond_1_map_while_cond_false_83__.200
get-tuple-element.230 = f32[] get-tuple-element(conditional.229), index=0
reshape.233 = f32[1]{0} reshape(get-tuple-element.230)
dynamic-update-slice.236 = f32[<=250]{0} dynamic-update-slice(get-tuple-element.235, reshape.233, get-tuple-element.214)
get-tuple-element.237 = s32[] get-tuple-element(get-tuple-element.215), index=1
tuple.238 = (f32[<=250]{0}, s32[]) tuple(dynamic-update-slice.236, get-tuple-element.237)
ROOT tuple.243 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) tuple(add.219, get-tuple-element.213, add.221, tuple.238, get-tuple-element.216, get-tuple-element.217)
}
cond_wrapper.257 {
inputs.258 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) parameter(0)
get-tuple-element.0 = s32[] get-tuple-element(inputs.258), index=0
get-tuple-element.1 = s32[] get-tuple-element(inputs.258), index=1
compare.0 = pred[] compare(get-tuple-element.0, get-tuple-element.1), direction=LT
get-tuple-element.2 = s32[] get-tuple-element(inputs.258), index=2
get-tuple-element.3 = s32[] get-tuple-element(inputs.258), index=4
compare.1 = pred[] compare(get-tuple-element.2, get-tuple-element.3), direction=LT
and.0 = pred[] and(compare.0, compare.1)
tuple.0 = (pred[]) tuple(and.0)
ROOT get-tuple-element.260 = pred[] get-tuple-element(tuple.0), index=0
reshape.0 = pred[] reshape(and.0)
}
cond_1_Sum-reduction.261 {
x.262 = f32[] parameter(0)
y.263 = f32[] parameter(1)
ROOT add.264 = f32[] add(x.262, y.263)
}
cond_1_true_36__.265 {
arg_tuple.266 = (s32[], f32[250]{0}) parameter(0)
get-tuple-element.267 = s32[] get-tuple-element(arg_tuple.266), index=0
reshape.269 = s32[1]{0} reshape(get-tuple-element.267)
concatenate.270 = s32[1]{0} concatenate(reshape.269), dimensions={0}
slice.280 = s32[1]{0} slice(concatenate.270), slice={[0:1]}
reshape.281 = s32[] reshape(reshape.269)
constant.275 = s32[] constant(0)
compare.282 = pred[] compare(get-tuple-element.267, constant.275), direction=LT
constant.276 = s32[] constant(250)
add.283 = s32[] add(constant.276, get-tuple-element.267)
select.284 = s32[] select(compare.282, add.283, get-tuple-element.267)
constant.277 = s32[1]{0} constant({0})
slice.278 = s32[1]{0} slice(constant.277), slice={[0:1]}
reshape.279 = s32[] reshape(slice.278)
subtract.285 = s32[] subtract(select.284, reshape.279)
maximum.286 = s32[] maximum(subtract.285, constant.275)
convert.287 = s32[] convert(maximum.286)
get-tuple-element.268 = f32[250]{0} get-tuple-element(arg_tuple.266), index=1
constant.271 = f32[] constant(0)
pad.272 = f32[500]{0} pad(get-tuple-element.268, constant.271), padding=0_250
constant.273 = s32[] constant(500)
set-dimension-size.274 = f32[500]{0} set-dimension-size(pad.272, constant.273), dimensions={0}
dynamic-slice.288 = f32[250]{0} dynamic-slice(set-dimension-size.274, reshape.279), dynamic_slice_sizes={250}
reshape.289 = f32[250]{0} reshape(dynamic-slice.288)
set-dimension-size.290 = f32[<=250]{0} set-dimension-size(dynamic-slice.288, maximum.286), dimensions={0}
get-dimension-size.291 = s32[] get-dimension-size(set-dimension-size.290), dimensions={0}
convert.292 = s32[] convert(get-dimension-size.291)
broadcast.293 = s32[1]{0} broadcast(get-dimension-size.291), dimensions={}
concatenate.294 = s32[1]{0} concatenate(broadcast.293), dimensions={0}
slice.295 = s32[1]{0} slice(concatenate.294), slice={[0:1]}
reshape.296 = s32[] reshape(broadcast.293)
constant.309 = s32[] constant(0)
constant.310 = s32[] constant(0)
constant.312 = f32[] constant(0)
broadcast.313 = f32[250]{0} broadcast(constant.312), dimensions={}
constant.302 = s32[] constant(0)
broadcast.303 = s32[250]{0} broadcast(constant.302), dimensions={}
set-dimension-size.304 = s32[<=250]{0} set-dimension-size(broadcast.303, get-dimension-size.291), dimensions={0}
get-dimension-size.311 = s32[] get-dimension-size(set-dimension-size.304), dimensions={0}
set-dimension-size.314 = f32[<=250]{0} set-dimension-size(broadcast.313, get-dimension-size.311), dimensions={0}
constant.315 = s32[] constant(0)
tuple.316 = (f32[<=250]{0}, s32[]) tuple(set-dimension-size.314, constant.315)
constant.305 = s32[] constant(250)
tuple.306 = (f32[<=250]{0}, s32[]) tuple(set-dimension-size.290, constant.305)
tuple.317 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) tuple(constant.309, get-dimension-size.291, constant.310, tuple.316, get-dimension-size.291, tuple.306)
while.318 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) while(tuple.317), condition=cond_wrapper.257, body=cond_1_map_while_body_59__.210
get-tuple-element.319 = s32[] get-tuple-element(while.318), index=0
get-tuple-element.320 = s32[] get-tuple-element(while.318), index=1
get-tuple-element.321 = s32[] get-tuple-element(while.318), index=2
get-tuple-element.322 = (f32[<=250]{0}, s32[]) get-tuple-element(while.318), index=3
get-tuple-element.323 = s32[] get-tuple-element(while.318), index=4
get-tuple-element.324 = (f32[<=250]{0}, s32[]) get-tuple-element(while.318), index=5
tuple.325 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) tuple(get-tuple-element.319, get-tuple-element.320, get-tuple-element.321, get-tuple-element.322, get-tuple-element.323, get-tuple-element.324)
get-tuple-element.329 = (f32[<=250]{0}, s32[]) get-tuple-element(tuple.325), index=3
get-tuple-element.332 = f32[<=250]{0} get-tuple-element(get-tuple-element.329), index=0
convert.333 = f32[<=250]{0} convert(get-tuple-element.332)
constant.334 = f32[] constant(0)
convert.335 = f32[] convert(constant.334)
reduce.336 = f32[] reduce(get-tuple-element.332, constant.334), dimensions={0}, to_apply=cond_1_Sum-reduction.261
convert.337 = f32[] convert(reduce.336)
reshape.338 = f32[] reshape(reduce.336)
ROOT tuple.339 = (f32[]) tuple(reduce.336)
}
cond_1_false_37__.340 {
arg_tuple.341 = (s32[], f32[250]{0}) parameter(0)
constant.344 = f32[] constant(0)
reshape.345 = f32[] reshape(constant.344)
ROOT tuple.346 = (f32[]) tuple(constant.344)
}
ENTRY tfcompile.377 {
arg6.7 = s32[] parameter(6), parameter_replication={false}
arg0.1 = s32[] parameter(0), parameter_replication={false}
reshape.9 = s32[] reshape(arg0.1)
arg1.2 = f32[250]{0} parameter(1), parameter_replication={false}
reshape.10 = f32[250]{0} reshape(arg1.2)
arg2.3 = pred[] parameter(2), parameter_replication={false}
reshape.11 = pred[] reshape(arg2.3)
arg3.4 = pred[] parameter(3), parameter_replication={false}
reshape.12 = pred[] reshape(arg3.4)
arg4.5 = s32[] parameter(4), parameter_replication={false}
reshape.13 = s32[] reshape(arg4.5)
arg5.6 = pred[] parameter(5), parameter_replication={false}
reshape.14 = pred[] reshape(arg5.6)
arg7.8 = pred[] parameter(7), parameter_replication={false}
reshape.16 = pred[] reshape(arg7.8)
tuple.1 = (s32[], f32[250]{0}) tuple(arg0.1, arg1.2)
conditional.0 = (f32[], s32[]) conditional(arg2.3, tuple.1, tuple.1), true_computation=cond_2_true_195__.31, false_computation=cond_2_false_196__.76
get-tuple-element.4 = f32[] get-tuple-element(conditional.0), index=0
reshape.1 = f32[1]{0} reshape(get-tuple-element.4)
get-tuple-element.5 = s32[] get-tuple-element(conditional.0), index=1
convert.0 = f32[] convert(get-tuple-element.5)
reshape.2 = f32[1]{0} reshape(convert.0)
tuple.2 = (pred[], pred[], pred[]) tuple(arg3.4, arg5.6, arg7.8)
conditional.1 = (pred[]) conditional(arg3.4, tuple.2, tuple.2), true_computation=cond_true_10__.85, false_computation=cond_false_11__.104
get-tuple-element.6 = pred[] get-tuple-element(conditional.1), index=0
tuple.3 = (s32[], f32[250]{0}) tuple(arg4.5, arg1.2)
conditional.2 = (f32[]) conditional(get-tuple-element.6, tuple.3, tuple.3), true_computation=cond_1_true_36__.265, false_computation=cond_1_false_37__.340
get-tuple-element.7 = f32[] get-tuple-element(conditional.2), index=0
reshape.3 = f32[1]{0} reshape(get-tuple-element.7)
concatenate.0 = f32[3]{0} concatenate(reshape.1, reshape.2, reshape.3), dimensions={0}
tuple.4 = (f32[3]{0}) tuple(concatenate.0)
get-tuple-element.374 = f32[3]{0} get-tuple-element(tuple.4), index=0
reshape.375 = f32[3]{0} reshape(get-tuple-element.374)
ROOT tuple.376 = (f32[3]{0}) tuple(get-tuple-element.374)
reshape.4 = f32[3]{0} reshape(concatenate.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo));
TF_ASSERT_OK(RunInference());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_dimension_inference.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_dimension_inference_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
79bd5411-3f18-46a2-ad61-ae5728c586c1 | cpp | tensorflow/tensorflow | call_inliner | third_party/xla/xla/service/call_inliner.cc | third_party/xla/xla/service/call_inliner_test.cc | #include "xla/service/call_inliner.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding_metadata.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_domain_isolator.h"
#include "xla/service/spmd/shardy/constants.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class SubcomputationInsertionVisitor : public DfsHloVisitorWithDefault {
public:
explicit SubcomputationInsertionVisitor(HloInstruction* call)
: call_(call), outer_(call->parent()) {
CHECK_EQ(HloOpcode::kCall, call_->opcode());
}
absl::Status DefaultAction(HloInstruction* hlo) override {
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : hlo->operands()) {
TF_ASSIGN_OR_RETURN(HloInstruction * new_operand, Resolve(operand));
new_operands.push_back(new_operand);
}
VLOG(1) << "Cloning HLO and adding to caller: " << hlo->ToString();
auto new_hlo = hlo->CloneWithNewOperands(hlo->shape(), new_operands);
HloInstruction* new_hlo_pointer =
outer_->AddInstruction(std::move(new_hlo));
TF_RETURN_IF_ERROR(NoteMapping(hlo, new_hlo_pointer));
for (HloInstruction* control_predecessor : hlo->control_predecessors()) {
TF_ASSIGN_OR_RETURN(HloInstruction * new_control_predecessor,
Resolve(control_predecessor));
TF_RETURN_IF_ERROR(
new_control_predecessor->AddControlDependencyTo(new_hlo_pointer));
}
return absl::OkStatus();
}
absl::Status HandleParameter(HloInstruction* parameter) override {
TF_RETURN_IF_ERROR(NoteMapping(
parameter, call_->mutable_operand(parameter->parameter_number())));
return absl::OkStatus();
}
absl::Status FinishVisit(HloInstruction* root) override {
TF_ASSIGN_OR_RETURN(HloInstruction * new_root, Resolve(root));
VLOG(1) << "Replacing all uses of " << call_->ToString()
<< " with new root " << new_root->ToString();
return outer_->ReplaceInstruction(call_, new_root);
}
CallInliner::InlinedInstructionMap ConsumeInstructionMap() {
return std::move(subcomputation_hlo_to_new_hlo_);
}
private:
absl::StatusOr<HloInstruction*> Resolve(HloInstruction* subcomputation_hlo) {
auto it = subcomputation_hlo_to_new_hlo_.find(subcomputation_hlo);
if (it == subcomputation_hlo_to_new_hlo_.end()) {
return NotFound(
"Could not find mapping from subcomputation HLO %s to a cloned HLO.",
subcomputation_hlo->ToString());
}
return it->second;
}
absl::Status NoteMapping(HloInstruction* subcomputation_hlo,
HloInstruction* new_hlo) {
auto result = subcomputation_hlo_to_new_hlo_.insert(
std::make_pair(subcomputation_hlo, new_hlo));
TF_RET_CHECK(result.second)
<< "A mapping for the subcomputation HLO is already present.";
return absl::OkStatus();
}
HloInstruction* call_;
HloComputation* outer_;
CallInliner::InlinedInstructionMap subcomputation_hlo_to_new_hlo_;
};
bool InlineUnderShardy(HloInstruction* instruction) {
return !(instruction->GetModule()->config().use_shardy_partitioner() &&
(absl::StrContains(instruction->to_apply()->name(), "shmap_body") ||
absl::StartsWith(instruction->to_apply()->name(),
sdy::kManualComputationBodyFuncName.str())));
}
}
absl::StatusOr<CallInliner::InlinedInstructionMap>
CallInliner::Inline(HloInstruction* call) {
TF_RET_CHECK(call->opcode() == HloOpcode::kCall)
<< "Instruction was not a call op: " << call->opcode();
if (call->is_composite()) {
FrontendAttributes frontend_attributes = call->frontend_attributes();
frontend_attributes.mutable_map()->erase("composite.name");
frontend_attributes.mutable_map()->erase("composite.attributes");
frontend_attributes.mutable_map()->erase("composite.version");
call->set_frontend_attributes(frontend_attributes);
}
const auto& callees = call->called_computations();
TF_RET_CHECK(callees.size() == 1);
HloComputation* callee = callees[0];
if (call->has_frontend_attributes()) {
const FrontendAttributes& call_attributes = call->frontend_attributes();
std::string has_fuse =
call_attributes.map().contains("MUST_FUSE") ? "MUST_FUSE"
: call_attributes.map().contains("MAXIMAL_FUSE") ? "MAXIMAL_FUSE"
: "";
if (!has_fuse.empty()) {
for (auto instruction : callee->instructions()) {
if (instruction->IsFusible()) {
FrontendAttributes frontend_attributes =
instruction->frontend_attributes();
frontend_attributes.mutable_map()->insert(
{has_fuse, call_attributes.map().at(has_fuse)});
instruction->set_frontend_attributes(frontend_attributes);
}
}
}
}
SubcomputationInsertionVisitor visitor(call);
TF_RETURN_IF_ERROR(callee->Accept(&visitor));
return visitor.ConsumeInstructionMap();
}
bool CallInliner::IsInlineableCallOp(HloInstruction* instruction) const {
return instruction->opcode() == HloOpcode::kCall &&
!instruction->has_backend_config() &&
!instruction->parent()->IsAsyncComputation() &&
InlineUnderShardy(instruction);
}
absl::StatusOr<bool> CallInliner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
bool did_mutate = false;
TF_RETURN_IF_ERROR(call_graph->VisitNodes([&](const CallGraphNode& node)
-> absl::Status {
if (!HloInstruction::IsThreadIncluded(
node.computation()->execution_thread(), execution_threads)) {
return absl::OkStatus();
}
VLOG(1) << "Visiting node: " << node.ToString();
for (HloInstruction* instruction :
node.computation()->MakeInstructionPostOrder()) {
if (IsInlineableCallOp(instruction)) {
const auto& callees = instruction->called_computations();
TF_RET_CHECK(callees.size() == 1);
if (!single_call_site_ || call_graph->GetNode(instruction->to_apply())
.caller_callsites()
.size() == 1) {
TF_ASSIGN_OR_RETURN(CallInliner::InlinedInstructionMap inline_map,
Inline(instruction));
if (update_domain_) {
HloDomainIsolator isolator(
[]() { return ShardingDomainCreator{}; });
for (const auto& [call_inst, inlined_inst] : inline_map) {
TF_RETURN_IF_ERROR(isolator.UpdateDomains(inlined_inst).status());
}
}
did_mutate = true;
}
}
}
return absl::OkStatus();
}));
if (did_mutate) {
TF_RETURN_IF_ERROR(HloDCE().Run(module, execution_threads).status());
}
return did_mutate;
}
} | #include "xla/service/call_inliner.h"
#include <cstdint>
#include <string>
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
using CallInlinerTest = HloTestBase;
TEST_F(CallInlinerTest, ControlDependenciesAreCarriedToCaller) {
HloComputation::Builder inner(TestName() + ".inner");
HloInstruction* zero = inner.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(24.0f)));
HloInstruction* one = inner.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
TF_ASSERT_OK(zero->AddControlDependencyTo(one));
auto module = CreateNewVerifiedModule();
HloComputation* inner_computation =
module->AddEmbeddedComputation(inner.Build());
HloComputation::Builder outer(TestName() + ".outer");
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
outer.AddInstruction(
HloInstruction::CreateCall(r0f32, {}, inner_computation));
auto computation = module->AddEntryComputation(outer.Build());
CallInliner call_inliner;
TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));
ASSERT_TRUE(mutated);
EXPECT_THAT(computation->root_instruction(), op::Constant());
EXPECT_EQ(computation->root_instruction()->literal().GetFirstElement<float>(),
42);
ASSERT_EQ(1, computation->root_instruction()->control_predecessors().size());
auto prior = computation->root_instruction()->control_predecessors()[0];
EXPECT_THAT(prior, op::Constant());
EXPECT_EQ(prior->literal().GetFirstElement<float>(), 24);
}
TEST_F(CallInlinerTest, CallsWithinWhileBodiesAreInlined) {
const Shape pred = ShapeUtil::MakeShape(PRED, {});
auto module = CreateNewVerifiedModule();
HloComputation::Builder just_false(TestName() + ".false");
just_false.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* false_computation =
module->AddEmbeddedComputation(just_false.Build());
HloComputation::Builder call_false_builder(TestName() + ".call_false");
call_false_builder.AddInstruction(
HloInstruction::CreateParameter(0, pred, "param"));
call_false_builder.AddInstruction(
HloInstruction::CreateCall(pred, {}, false_computation));
HloComputation* call_false =
module->AddEmbeddedComputation(call_false_builder.Build());
HloComputation::Builder outer(TestName() + ".outer");
HloInstruction* init_value = outer.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
outer.AddInstruction(
HloInstruction::CreateWhile(pred, call_false, call_false, init_value));
auto computation = module->AddEntryComputation(outer.Build());
CallInliner call_inliner;
TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));
ASSERT_TRUE(mutated);
EXPECT_THAT(
computation->root_instruction()->while_condition()->root_instruction(),
op::Constant());
EXPECT_THAT(computation->root_instruction()->while_body()->root_instruction(),
op::Constant());
}
TEST_F(CallInlinerTest, InlineWithoutRunningPass) {
const Shape pred = ShapeUtil::MakeShape(PRED, {});
auto module = CreateNewVerifiedModule();
HloComputation::Builder just_false(TestName() + ".false");
auto* true_constant = just_false.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<bool>({true})));
auto* false_constant = just_false.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
TF_ASSERT_OK(false_constant->AddControlDependencyTo(true_constant));
HloComputation* false_computation =
module->AddEmbeddedComputation(just_false.Build());
HloComputation::Builder call_false_builder(TestName() + ".call_false");
HloInstruction* call = call_false_builder.AddInstruction(
HloInstruction::CreateCall(pred, {}, false_computation));
auto computation = module->AddEntryComputation(call_false_builder.Build());
TF_ASSERT_OK(CallInliner::Inline(call).status());
EXPECT_THAT(computation->root_instruction(), op::Constant());
EXPECT_THAT(computation->root_instruction()->control_successors(),
ElementsAre(op::Constant()));
}
TEST_F(CallInlinerTest, InlineWithEmptyComputation) {
const Shape pred = ShapeUtil::MakeShape(PRED, {});
auto module = CreateNewVerifiedModule();
Shape r0s32 = ShapeUtil::MakeShape(S32, {});
HloComputation::Builder empty(TestName() + ".empty");
empty.AddInstruction(HloInstruction::CreateParameter(0, r0s32, "A"));
empty.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
HloComputation* empty_computation =
module->AddEmbeddedComputation(empty.Build());
HloComputation::Builder empty2(TestName() + ".empty");
empty2.AddInstruction(HloInstruction::CreateParameter(0, r0s32, "A"));
empty2.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
HloComputation* empty2_computation =
module->AddEmbeddedComputation(empty2.Build());
HloComputation::Builder entry("entry");
auto zero = entry.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
entry.AddInstruction(
HloInstruction::CreateCall(r0s32, {zero}, empty_computation));
HloInstruction* call1 = entry.AddInstruction(
HloInstruction::CreateCall(r0s32, {zero}, empty2_computation));
entry.AddInstruction(
HloInstruction::CreateCall(r0s32, {call1}, empty_computation));
auto computation = module->AddEntryComputation(entry.Build());
CallInliner call_inliner;
TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));
ASSERT_TRUE(mutated);
EXPECT_THAT(computation->root_instruction(), op::Constant());
}
TEST_F(CallInlinerTest, CallToOutfeedComputationIsInlined) {
const Shape f32 = ShapeUtil::MakeShape(F32, {});
auto module = CreateNewVerifiedModule();
HloComputation::Builder outfeeder(TestName() + ".outfeeder");
auto value = outfeeder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto token = outfeeder.AddInstruction(HloInstruction::CreateToken());
outfeeder.AddInstruction(
HloInstruction::CreateOutfeed(f32, value, token, ""));
auto outfeed_computation = module->AddEmbeddedComputation(outfeeder.Build());
HloComputation::Builder outer(TestName() + ".outer");
outer.AddInstruction(HloInstruction::CreateCall(
outfeed_computation->root_instruction()->shape(), {},
outfeed_computation));
module->AddEntryComputation(outer.Build());
CallInliner call_inliner;
TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));
ASSERT_TRUE(mutated);
}
TEST_F(CallInlinerTest, InlineSingleUseCalleesOnly) {
const absl::string_view hlo_string = R"(
HloModule inline_module
a {
ROOT tuple = () tuple()
}
b {
ROOT tuple.1 = () tuple()
}
ENTRY inline {
a = () call(), to_apply=a
b = () call(), to_apply=a
c = () call(), to_apply=b
ROOT tuple = ((), (), ()) tuple(a, b, c)
})";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CallInliner call_inliner(true);
TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));
ASSERT_TRUE(mutated);
ASSERT_EQ(module->entry_computation()->instruction_count(), 4);
auto inst = module->entry_computation()->instructions().begin();
EXPECT_THAT(*inst, op::Call());
++inst;
EXPECT_THAT(*inst, op::Call());
++inst;
EXPECT_THAT(*inst, op::Tuple());
++inst;
EXPECT_THAT(*inst, op::Tuple());
}
TEST_F(CallInlinerTest, InliningPerformedInsideSpecifiedThreadsOnly) {
const std::string hlo_string = R"(
HloModule inline_specified_threads_only
%secondary_inner () -> u32[] {
ROOT %co.2 = u32[] constant(2)
}, execution_thread="secondary_thread"
%secondary_outer () -> u32[] {
%co.1 = u32[] constant(1)
%call.1 = u32[] call(), to_apply=%secondary_inner
ROOT %add.1 = add(%co.1, %call.1)
}, execution_thread="secondary_thread"
%main_inner () -> u32[] {
%co.0 = u32[] constant(0)
%async-start = ((), u32[], u32[]) call-start(), async_execution_thread="secondary_thread", to_apply=secondary_outer
%async-done = u32[] call-done(((), u32[], u32[]) %async-start)
ROOT %add.2 = add(%co.0, %async-done)
}
ENTRY %main_outer (p0: u32[]) -> u32[] {
%p.0 = u32[] parameter(0)
%call.0 = u32[] call(), to_apply=%main_inner
ROOT %add.3 = add(%p.0, %call.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto module_clone = module->Clone("");
{
VLOG(1) << "Module BEFORE CallInliner\n" << module->ToString();
CallInliner call_inliner;
TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));
VLOG(1) << "Module AFTER CallInliner\n" << module->ToString();
EXPECT_TRUE(mutated);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Add(op::Parameter(0),
op::Add(op::Constant(LiteralUtil::CreateR0<uint32_t>(0)),
op::AsyncDone())));
EXPECT_THAT(module->entry_computation()
->root_instruction()
->operand(1)
->operand(1)
->async_wrapped_instruction()
->called_computations()
.at(0)
->root_instruction(),
op::Add(op::Constant(LiteralUtil::CreateR0<uint32_t>(1)),
op::Constant(LiteralUtil::CreateR0<uint32_t>(2))));
}
VLOG(1) << "Restricting CallInliner to the secondary thread.";
{
CallInliner call_inliner;
TF_ASSERT_OK_AND_ASSIGN(
bool mutated,
call_inliner.Run(module_clone.get(), {"secondary_thread"}));
VLOG(1) << "Module AFTER CallInliner\n" << module_clone->ToString();
EXPECT_TRUE(mutated);
EXPECT_THAT(module_clone->entry_computation()->root_instruction(),
op::Add(op::Parameter(0), op::Call()));
EXPECT_THAT(module_clone->entry_computation()
->root_instruction()
->operand(1)
->called_computations()
.at(0)
->root_instruction(),
op::Add(op::Constant(LiteralUtil::CreateR0<uint32_t>(0)),
op::AsyncDone()));
EXPECT_THAT(module_clone->entry_computation()
->root_instruction()
->operand(1)
->called_computations()
.at(0)
->root_instruction()
->operand(1)
->async_wrapped_instruction()
->called_computations()
.at(0)
->root_instruction(),
op::Add(op::Constant(LiteralUtil::CreateR0<uint32_t>(1)),
op::Constant(LiteralUtil::CreateR0<uint32_t>(2))));
}
}
TEST_F(CallInlinerTest, InlineCompositeCall) {
const absl::string_view hlo_string = R"(
HloModule composite
%add (lhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] constant(2)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %main () -> f32[] {
%lhs = f32[] constant(42)
ROOT %call = f32[] call(f32[] %lhs), to_apply=%add, is_composite=true, frontend_attributes={composite.attributes={n = 1 : i32, tensor = dense<1> : tensor<i32>},composite.name="foo.bar",composite.version="1"}
})";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CallInliner call_inliner(true);
TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));
ASSERT_TRUE(mutated);
ASSERT_EQ(module->entry_computation()->instruction_count(), 3);
auto inst = module->entry_computation()->instructions().begin();
EXPECT_THAT(*inst, op::Constant());
++inst;
EXPECT_THAT(*inst, op::Constant());
++inst;
EXPECT_THAT(*inst, op::Add());
EXPECT_TRUE((*inst)->frontend_attributes().map().empty());
}
TEST_F(CallInlinerTest, UseShardyMhloToHloShmapBodyNotInlined) {
const char* const hloString = R"(
HloModule jit_f, entry_computation_layout={(f32[8,8]{1,0})->f32[8,8]{1,0}}
%prefix_shmap_body_suffix.4 (Arg_0.5: f32[1,8]) -> f32[1,8] {
%Arg_0.5 = f32[1,8]{1,0} parameter(0)
ROOT %add.6 = f32[1,8]{1,0} add(f32[1,8]{1,0} %Arg_0.5, f32[1,8]{1,0} %Arg_0.5), metadata={source_file="-" source_line=11}
}
ENTRY %main.10 (Arg_0.1: f32[8,8]) -> f32[8,8] {
%Arg_0.1 = f32[8,8]{1,0} parameter(0)
%custom-call.2 = f32[8,8]{1,0} custom-call(f32[8,8]{1,0} %Arg_0.1), custom_call_target="Sharding", sharding={devices=[8,1]<=[8]}, metadata={source_file="-" source_line=3}
%custom-call.3 = f32[1,8]{1,0} custom-call(f32[8,8]{1,0} %custom-call.2), custom_call_target="SPMDFullToShardShape", sharding={manual}, metadata={source_file="-" source_line=4}
%call.7 = f32[1,8]{1,0} call(f32[1,8]{1,0} %custom-call.3), to_apply=%prefix_shmap_body_suffix.4
%custom-call.8 = f32[1,8]{1,0} custom-call(f32[1,8]{1,0} %call.7), custom_call_target="Sharding", sharding={manual}, metadata={source_file="-" source_line=6}
ROOT %custom-call.9 = f32[8,8]{1,0} custom-call(f32[1,8]{1,0} %custom-call.8), custom_call_target="SPMDShardToFullShape", sharding={devices=[8,1]<=[8]}, metadata={source_file="-" source_line=7}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hloString));
module->mutable_config().set_use_shardy_partitioner(true);
TF_ASSERT_OK_AND_ASSIGN(bool changed, CallInliner().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_FALSE(changed);
HloInstruction* call = FindInstruction(module.get(), xla::HloOpcode::kCall);
EXPECT_NE(call, nullptr);
EXPECT_TRUE(call->has_to_apply());
EXPECT_EQ(call->to_apply()->name(), "prefix_shmap_body_suffix.4");
}
TEST_F(CallInlinerTest, UseShardManualComputationBodyNotInlined) {
const char* const hloString = R"(
HloModule jit_f, entry_computation_layout={(f32[8,8]{1,0})->f32[8,8]{1,0}}
%xla.sdy.manual_computation_body.4 (Arg_0.5: f32[1,8]) -> f32[1,8] {
%Arg_0.5 = f32[1,8]{1,0} parameter(0)
ROOT %add.6 = f32[1,8]{1,0} add(f32[1,8]{1,0} %Arg_0.5, f32[1,8]{1,0} %Arg_0.5), metadata={source_file="-" source_line=11}
}
ENTRY %main.10 (Arg_0.1: f32[8,8]) -> f32[8,8] {
%Arg_0.1 = f32[8,8]{1,0} parameter(0)
%custom-call.3 = f32[1,8]{1,0} custom-call(f32[8,8]{1,0} %Arg_0.1), custom_call_target="SPMDFullToShardShape", sharding={manual}, metadata={source_file="-" source_line=4}
%call.7 = f32[1,8]{1,0} call(f32[1,8]{1,0} %custom-call.3), to_apply=%xla.sdy.manual_computation_body.4
ROOT %custom-call.9 = f32[8,8]{1,0} custom-call(f32[1,8]{1,0} %call.7), custom_call_target="SPMDShardToFullShape", sharding={devices=[8,1]<=[8]}, metadata={source_file="-" source_line=7}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hloString));
module->mutable_config().set_use_shardy_partitioner(true);
TF_ASSERT_OK_AND_ASSIGN(bool changed, CallInliner().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_FALSE(changed);
HloInstruction* call = FindInstruction(module.get(), xla::HloOpcode::kCall);
EXPECT_NE(call, nullptr);
EXPECT_TRUE(call->has_to_apply());
EXPECT_EQ(call->to_apply()->name(), "xla.sdy.manual_computation_body.4");
}
TEST_F(CallInlinerTest, UseShardManualComputationBodyInlined) {
const char* const hloString = R"(
HloModule jit_f, entry_computation_layout={(f32[8,8]{1,0})->f32[8,8]{1,0}}
%prefix_xla.sdy.manual_computation_body.4 (Arg_0.5: f32[1,8]) -> f32[1,8] {
%Arg_0.5 = f32[1,8]{1,0} parameter(0)
ROOT %add.6 = f32[1,8]{1,0} add(f32[1,8]{1,0} %Arg_0.5, f32[1,8]{1,0} %Arg_0.5), metadata={source_file="-" source_line=11}
}
ENTRY %main.10 (Arg_0.1: f32[8,8]) -> f32[8,8] {
%Arg_0.1 = f32[8,8]{1,0} parameter(0)
%custom-call.3 = f32[1,8]{1,0} custom-call(f32[8,8]{1,0} %Arg_0.1), custom_call_target="SPMDFullToShardShape", sharding={manual}, metadata={source_file="-" source_line=4}
%call.7 = f32[1,8]{1,0} call(f32[1,8]{1,0} %custom-call.3), to_apply=%prefix_xla.sdy.manual_computation_body.4
ROOT %custom-call.9 = f32[8,8]{1,0} custom-call(f32[1,8]{1,0} %call.7), custom_call_target="SPMDShardToFullShape", sharding={devices=[8,1]<=[8]}, metadata={source_file="-" source_line=7}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hloString));
module->mutable_config().set_use_shardy_partitioner(true);
TF_ASSERT_OK_AND_ASSIGN(bool changed, CallInliner().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/call_inliner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/call_inliner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
164bcdca-2f59-413e-98d0-a4c8cf57007b | cpp | tensorflow/tensorflow | reshape_decomposer | third_party/xla/xla/service/reshape_decomposer.cc | third_party/xla/xla/service/reshape_decomposer_test.cc | #include "xla/service/reshape_decomposer.h"
#include "absl/status/status.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/service/hlo_creation_utils.h"
namespace xla {
namespace {
class ReshapeDecomposerVisitor : public DfsHloRewriteVisitor {
public:
absl::Status HandleReshape(HloInstruction* reshape) override {
HloInstruction* operand = reshape->mutable_operand(0);
auto s = reshape->shape();
auto s0 = operand->shape();
if (ShapeUtil::ReshapeIsBitcast(s, s0)) {
auto b = MakeBitcastHlo(operand, s, &operand->metadata());
return ReplaceInstruction(reshape, b);
} else if (auto output_aligned_input_shape =
ShapeUtil::AlignLayouts(s, s0)) {
Shape new_input_shape = *output_aligned_input_shape;
HloInstruction* copied_operand = MakeCopyHlo(operand, new_input_shape);
VLOG(3) << "Decomposing reshape into reshape-bitcast and a physical "
"transpose on the operand: "
<< copied_operand->ToString();
auto b = MakeBitcastHlo(copied_operand, s, &copied_operand->metadata());
TF_RETURN_IF_ERROR(ReplaceInstruction(reshape, b));
DCHECK(ShapeUtil::ReshapeIsBitcast(b->shape(), b->operand(0)->shape()));
} else if (auto input_aligned_output_shape =
ShapeUtil::AlignLayouts(s0, s)) {
Shape new_output_shape = *input_aligned_output_shape;
auto b = MakeBitcastHlo(operand, new_output_shape, &operand->metadata());
DCHECK(ShapeUtil::ReshapeIsBitcast(b->shape(), b->operand(0)->shape()));
HloInstruction* copied_result = MakeCopyHlo(b, s);
VLOG(3) << "Decomposing reshape into reshape-bitcast and a physical "
"transposition on the result: "
<< copied_result->ToString();
TF_RETURN_IF_ERROR(ReplaceInstruction(reshape, copied_result));
} else {
VLOG(3) << "Both input and output of reshape are not alignable, create "
"two physical transposes";
auto s0_normalized = ShapeUtil::MakeShapeWithDescendingLayout(
s0.element_type(), s0.dimensions());
auto c1 = MakeCopyHlo(reshape->mutable_operand(0), s0_normalized);
auto s_normalized = ShapeUtil::MakeShapeWithDescendingLayout(
s.element_type(), s.dimensions());
auto b = MakeBitcastHlo(c1, s_normalized, &c1->metadata());
DCHECK(ShapeUtil::ReshapeIsBitcast(b->shape(), b->operand(0)->shape()));
auto c2 = MakeCopyHlo(b, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(reshape, c2));
}
return absl::OkStatus();
}
};
}
absl::StatusOr<bool> ReshapeDecomposer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return ReshapeDecomposerVisitor{}.RunOnModule(module, execution_threads);
}
} | #include "xla/service/reshape_decomposer.h"
#include <memory>
#include <optional>
#include "xla/service/hlo_parser.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class ReshapeDecomposerTest : public HloTestBase {
public:
void CheckReshapeDecomposer(const char* hlo,
std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(
hlo, ReshapeDecomposer{}, expected,
[&](HloModule* module) {
EXPECT_TRUE(absl::c_all_of(
module->entry_computation()->instructions(),
[&](const HloInstruction* instr) {
return instr->opcode() != HloOpcode::kReshape ||
ShapeUtil::ReshapeIsBitcast(instr->operand(0)->shape(),
instr->shape());
}));
});
}
};
TEST_F(ReshapeDecomposerTest, IsBitcast) {
const char* hlo = R"(
HloModule Module
ENTRY main {
p = f32[8]{0} parameter(0)
ROOT r = f32[4,2]{1,0} reshape(p)
}
)";
CheckReshapeDecomposer(hlo, R"(
)");
}
TEST_F(ReshapeDecomposerTest, AlignableOutput) {
const char* hlo = R"(
HloModule Module
ENTRY main {
p = f32[8,3]{1,0} parameter(0)
ROOT r = f32[4,2,3]{0,1,2} reshape(p)
}
)";
CheckReshapeDecomposer(hlo, R"(
)");
}
TEST_F(ReshapeDecomposerTest, AlignableInput) {
const char* hlo = R"(
HloModule Module
ENTRY main {
p = f32[4,2,3]{0,1,2} parameter(0)
ROOT r = f32[8,3]{1,0} reshape(p)
}
)";
CheckReshapeDecomposer(hlo, R"(
)");
}
TEST_F(ReshapeDecomposerTest, NotAlignable) {
const char* hlo = R"(
HloModule Module
ENTRY main {
p = f32[4,2,3,8]{0,2,1,3} parameter(0)
ROOT r = f32[8,3,2,4]{0,2,1,3} reshape(p)
}
)";
CheckReshapeDecomposer(hlo, R"(
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reshape_decomposer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reshape_decomposer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
872adbcd-9a42-46b0-9278-91c217a8afec | cpp | tensorflow/tensorflow | indexed_array_analysis | third_party/xla/xla/service/indexed_array_analysis.cc | third_party/xla/xla/service/indexed_array_analysis_test.cc | #include "xla/service/indexed_array_analysis.h"
#include <algorithm>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/map_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using Analysis = IndexedArrayAnalysis;
using UnknownArray = Analysis::UnknownArray;
using ConstantArray = Analysis::ConstantArray;
using ReshapedArray = Analysis::ReshapedArray;
using ScalarIndexedArray = Analysis::ScalarIndexedArray;
using absl::StrJoin;
}
std::string IndexedArrayAnalysis::ToString(Array* root, bool print_constants) {
switch (root->kind()) {
case Array::kUnknown: {
auto* unknown_tensor = root->as<UnknownArray>();
return absl::StrCat("%", unknown_tensor->instruction().name());
}
case Array::kConstant: {
if (print_constants) {
std::string contents = root->as<ConstantArray>()->literal()->ToString();
return absl::StrCat("(constant ", ShapeUtil::HumanString(root->shape()),
" ", contents, ")");
}
return absl::StrCat("(constant ", ShapeUtil::HumanString(root->shape()),
")");
}
case Array::kReshaped: {
ReshapedArray* reshaped_array = root->as<ReshapedArray>();
return absl::StrCat(
"(reshape ", ToString(reshaped_array->operand(), print_constants),
" to ", ShapeUtil::HumanString(reshaped_array->shape()), ")");
}
case Array::kScalarIndexedConstant:
case Array::kScalarIndexed: {
auto* indexed_array = root->as<ScalarIndexedArray>();
std::string name = root->kind() == Array::kScalarIndexedConstant
? "scalar-indexed-const"
: "scalar-indexed";
return absl::StrCat(
"(", name, " ", ToString(indexed_array->source(), print_constants),
" ", ToString(indexed_array->indices(), print_constants), " ",
indexed_array->source_dim(), "->[",
StrJoin(indexed_array->output_dims(), ","), "])");
}
}
}
absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::GetArrayFor(
const HloInstruction* instr) {
auto it = cache_.find(instr);
if (it != cache_.end()) {
return it->second;
}
TF_RETURN_IF_ERROR(TraverseAndPopulateCache(instr));
return FindOrDie(cache_, instr);
}
absl::Status IndexedArrayAnalysis::TraverseAndPopulateCache(
const HloInstruction* root) {
absl::InlinedVector<const HloInstruction*, 4> stack;
enum DfsState { kDiscovered, kVisited };
absl::flat_hash_map<const HloInstruction*, DfsState> dfs_state_map;
stack.push_back(root);
InsertOrDie(&dfs_state_map, root, kDiscovered);
do {
const HloInstruction* instr = stack.back();
if (cache_.contains(instr)) {
stack.pop_back();
continue;
}
switch (FindOrDie(dfs_state_map, instr)) {
case kDiscovered: {
for (const HloInstruction* operand : instr->operands()) {
if (!cache_.contains(operand)) {
stack.push_back(operand);
CHECK(!dfs_state_map.contains(operand) ||
dfs_state_map[operand] == kDiscovered);
dfs_state_map[operand] = kDiscovered;
}
}
dfs_state_map[instr] = kVisited;
break;
}
case kVisited:
stack.pop_back();
TF_ASSIGN_OR_RETURN(Array * array, ComputeArrayFor(instr));
InsertOrDie(&cache_, instr, array);
break;
}
} while (!stack.empty());
return absl::OkStatus();
}
absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayFor(
const HloInstruction* instr) {
Array* computed_array;
if (instr->IsElementwise() && instr->operand_count() == 1) {
TF_ASSIGN_OR_RETURN(
computed_array,
ComputeArrayForElementwiseUnaryOp(
instr->opcode(), FindOrDie(cache_, instr->operand(0))));
} else if (instr->IsElementwise() && instr->operand_count() == 2) {
TF_ASSIGN_OR_RETURN(
computed_array,
ComputeArrayForElementwiseBinaryOp(
instr->opcode(), FindOrDie(cache_, instr->operand(0)),
FindOrDie(cache_, instr->operand(1))));
} else if (instr->opcode() == HloOpcode::kConstant) {
TF_ASSIGN_OR_RETURN(computed_array,
ComputeArrayForConstant(instr->literal()));
} else if (instr->opcode() == HloOpcode::kGather) {
TF_ASSIGN_OR_RETURN(
computed_array,
ComputeArrayForGather(instr->shape(), instr->gather_dimension_numbers(),
instr->gather_slice_sizes(),
FindOrDie(cache_, instr->operand(0)),
FindOrDie(cache_, instr->operand(1))));
} else if (instr->opcode() == HloOpcode::kReshape) {
TF_ASSIGN_OR_RETURN(
computed_array,
ComputeArrayForReshape(instr->shape(),
FindOrDie(cache_, instr->operand(0))));
} else if (instr->opcode() == HloOpcode::kDot) {
TF_ASSIGN_OR_RETURN(
computed_array,
ComputeArrayForDot(instr->shape(), instr->dot_dimension_numbers(),
instr->precision_config(),
FindOrDie(cache_, instr->operand(0)),
FindOrDie(cache_, instr->operand(1))));
} else {
computed_array = nullptr;
}
if (!computed_array) {
computed_array = Construct<UnknownArray>(instr);
}
return computed_array;
}
absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForConstant(
const Literal& literal) {
return Construct<ConstantArray>(&literal);
}
absl::StatusOr<ScalarIndexedArray*> IndexedArrayAnalysis::FoldGatherOfGather(
ScalarIndexedArray* source, Array* indices, int64_t source_dim,
absl::Span<const int64_t> output_dims, Shape shape) {
Array* a = source->source();
Array* x = source->indices();
Array* y = indices;
enum class IndexComponent { Ungathered, GatheredFirst, GatheredSecond };
std::vector<IndexComponent> simulated_index(a->shape().dimensions_size(),
IndexComponent::Ungathered);
EraseAt(&simulated_index, source->source_dim());
for (int64_t gather_dim : source->output_dims()) {
simulated_index.insert(simulated_index.begin() + gather_dim,
IndexComponent::GatheredFirst);
}
EraseAt(&simulated_index, source_dim);
for (int64_t output_dim : output_dims) {
simulated_index.insert(simulated_index.begin() + output_dim,
IndexComponent::GatheredSecond);
}
int64_t source_dim_for_index_array =
FindIndex(source->output_dims(), source_dim);
CHECK_NE(source_dim_for_index_array, source->output_dims().size());
std::vector<int64_t> output_dims_for_index_array;
int64_t gathered_index_components_seen = 0;
for (IndexComponent simulation_dim : simulated_index) {
if (simulation_dim == IndexComponent::GatheredSecond) {
output_dims_for_index_array.push_back(gathered_index_components_seen);
}
if (simulation_dim != IndexComponent::Ungathered) {
gathered_index_components_seen++;
}
}
std::vector<int64_t> dim_sizes_for_composed_index;
std::vector<int64_t> output_dims_for_new_gather;
for (int64_t i = 0, e = simulated_index.size(); i < e; i++) {
if (simulated_index[i] != IndexComponent::Ungathered) {
dim_sizes_for_composed_index.push_back(shape.dimensions(i));
output_dims_for_new_gather.push_back(i);
}
}
Array* inner_indices = ConstructScalarIndexedArray(
x, y, source_dim_for_index_array, output_dims_for_index_array,
ShapeUtil::MakeShape(x->shape().element_type(),
dim_sizes_for_composed_index));
return ConstructScalarIndexedArray(a, inner_indices, source->source_dim(),
output_dims_for_new_gather,
std::move(shape));
}
absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForGather(
const Shape& shape, const GatherDimensionNumbers& dim_numbers,
absl::Span<const int64_t> slice_sizes, Array* source, Array* indices) {
if (dim_numbers.index_vector_dim() != indices->shape().dimensions_size()) {
VLOG(3) << "ComputeArrayForGather: indices are not scalar";
return nullptr;
}
CHECK_EQ(dim_numbers.start_index_map_size(), 1);
if (dim_numbers.collapsed_slice_dims_size() != 1 ||
dim_numbers.collapsed_slice_dims(0) != dim_numbers.start_index_map(0)) {
VLOG(3) << "ComputeArrayForGather: gather operations must elide "
"start_index_map[0] and "
"start_index_map[0] only";
return nullptr;
}
for (int64_t i = 0, e = source->shape().dimensions_size(); i < e; i++) {
if (i != dim_numbers.collapsed_slice_dims(0) &&
source->shape().dimensions(i) != slice_sizes[i]) {
VLOG(3) << "ComputeArrayForGather: slice_sizes[" << i
<< "] != source->shape().dimensions(" << i << ") -- "
<< source->shape().dimensions(i) << " vs. " << slice_sizes[i]
<< " with dim_numbers.collapsed_slice_dims(0) = "
<< dim_numbers.collapsed_slice_dims(0);
return nullptr;
}
}
int64_t source_dim = dim_numbers.start_index_map(0);
std::vector<int64_t> output_dims;
for (int64_t i = 0, e = shape.dimensions_size(); i < e; i++) {
if (!absl::c_binary_search(dim_numbers.offset_dims(), i)) {
output_dims.push_back(i);
}
}
if (auto* indexed = dynamic_cast<ScalarIndexedArray*>(source)) {
if (absl::c_linear_search(indexed->output_dims(), source_dim)) {
return FoldGatherOfGather(indexed, indices, source_dim, output_dims,
shape);
}
} else if (auto* constant = dynamic_cast<ConstantArray*>(source)) {
return Construct<ScalarIndexedConstantArray>(constant, indices, source_dim,
output_dims, shape);
}
return Construct<ScalarIndexedArray>(source, indices, source_dim, output_dims,
shape);
}
namespace {
int64_t FindSuffixWithProduct(absl::Span<const int64_t> values,
int64_t product) {
DCHECK(absl::c_all_of(values, [](int64_t value) { return value > 0; }));
int64_t current_product = 1;
int64_t i;
for (i = values.size() - 1; i >= 0 && product > current_product; --i) {
current_product *= values[i];
}
if (product == current_product) {
return i + 1;
}
return -1;
}
struct ReshapePassthroughDimPair {
int64_t result_dim;
int64_t operand_dim;
};
std::vector<ReshapePassthroughDimPair> ComputeReshapePassthroughDimPairs(
absl::Span<const int64_t> operand_shape,
absl::Span<const int64_t> result_shape) {
std::vector<ReshapePassthroughDimPair> result;
int64_t result_subarray_size = 1;
for (int64_t result_dim = result_shape.size() - 1; result_dim >= 0;
--result_dim) {
int64_t candidate_operand_dim =
FindSuffixWithProduct(operand_shape, result_subarray_size);
CHECK_NE(candidate_operand_dim, 0)
<< "result_dim = " << result_dim
<< ", result_subarray_size = " << result_subarray_size
<< ", result_shape = [" << StrJoin(result_shape, ",") << "]"
<< ", operand_shape = [" << StrJoin(operand_shape, ",") << "]";
if (candidate_operand_dim != -1 &&
result_shape[result_dim] == operand_shape[candidate_operand_dim - 1]) {
result.push_back({result_dim,
candidate_operand_dim - 1});
}
result_subarray_size *= result_shape[result_dim];
}
absl::c_reverse(result);
if (VLOG_IS_ON(3)) {
std::vector<std::string> result_strings;
absl::c_transform(result, std::back_inserter(result_strings),
[](ReshapePassthroughDimPair value) {
return absl::StrCat(value.result_dim, "->",
value.operand_dim);
});
VLOG(3) << "For a reshape from [" << StrJoin(operand_shape, ",") << "] to ["
<< StrJoin(result_shape, ",") << "] passthrough indices are ["
<< StrJoin(result_strings, ",")
<< "] (legend: `result`->`operand`)";
}
DCHECK(absl::c_is_sorted(
result, [](ReshapePassthroughDimPair lhs, ReshapePassthroughDimPair rhs) {
return lhs.result_dim < rhs.result_dim;
}));
DCHECK(absl::c_is_sorted(
result, [](ReshapePassthroughDimPair lhs, ReshapePassthroughDimPair rhs) {
return lhs.operand_dim < rhs.operand_dim;
}));
return result;
}
bool IsReshapePassthroughOperandDim(
absl::Span<const ReshapePassthroughDimPair> passthrough_dims, int64_t dim) {
return absl::c_any_of(passthrough_dims,
[&](ReshapePassthroughDimPair passthrough_dim_pair) {
return passthrough_dim_pair.operand_dim == dim;
});
}
int64_t MapPassthroughOperandDimToResultDim(
absl::Span<const ReshapePassthroughDimPair> passthrough_dims,
int64_t operand_dim) {
auto it = absl::c_find_if(
passthrough_dims, [&](ReshapePassthroughDimPair passthrough_dim_pair) {
return passthrough_dim_pair.operand_dim == operand_dim;
});
CHECK(it != passthrough_dims.end());
return it->result_dim;
}
int64_t FindSourcePositionForPassthroughResultDim(
absl::Span<const int64_t> operand_shape,
absl::Span<const int64_t> result_shape, int64_t source_passthrough_dim) {
VLOG(3) << "FindSourcePositionForPassthroughResultDim(["
<< StrJoin(operand_shape, ",") << "], [" << StrJoin(result_shape, ",")
<< "], " << source_passthrough_dim << ")";
int64_t indexed_source_subarray_size =
std::accumulate(operand_shape.begin() + source_passthrough_dim + 1,
operand_shape.end(), 1LL, std::multiplies<int64_t>());
return FindSuffixWithProduct(result_shape, indexed_source_subarray_size);
}
Shape StripDegenerateDimensions(const Shape& shape) {
DimensionVector new_dims;
absl::c_copy_if(shape.dimensions(), std::back_inserter(new_dims),
[](int64_t dim) { return dim != 1; });
return ShapeUtil::MakeShape(shape.element_type(), new_dims);
}
};
absl::StatusOr<ScalarIndexedArray*>
IndexedArrayAnalysis::ReshapeToRemoveDegenerateDims(
ScalarIndexedArray* operand) {
const Shape& shape = operand->shape();
if (!ShapeUtil::HasDegenerateDimensions(shape)) {
return operand;
}
const Shape& source_shape = operand->source()->shape();
DimensionVector new_source_shape_dims;
for (int64_t i = 0, e = source_shape.dimensions_size(); i < e; i++) {
if (i == operand->source_dim() || source_shape.dimensions(i) != 1) {
new_source_shape_dims.push_back(source_shape.dimensions(i));
}
}
Shape new_source_shape =
ShapeUtil::MakeShape(shape.element_type(), new_source_shape_dims);
Shape new_indices_shape =
StripDegenerateDimensions(operand->indices()->shape());
TF_ASSIGN_OR_RETURN(
Array* const new_source,
ComputeArrayForReshape(new_source_shape, operand->source()));
TF_ASSIGN_OR_RETURN(
Array* const new_indices,
ComputeArrayForReshape(new_indices_shape, operand->indices()));
DimensionVector new_output_dims;
int64_t degenerate_dims_seen = 0;
for (int64_t i = 0, e = shape.dimensions_size(); i < e; i++) {
if (shape.dimensions(i) == 1) {
degenerate_dims_seen++;
} else if (absl::c_linear_search(operand->output_dims(), i)) {
new_output_dims.push_back(i - degenerate_dims_seen);
}
}
int64_t degenerate_dims_before_source_dim =
std::count(source_shape.dimensions().begin(),
source_shape.dimensions().begin() + operand->source_dim(), 1);
int64_t new_source_dim =
operand->source_dim() - degenerate_dims_before_source_dim;
return ConstructScalarIndexedArray(
new_source, new_indices, new_source_dim,
InlinedVectorToVector(new_output_dims),
StripDegenerateDimensions(operand->shape()));
}
absl::StatusOr<ScalarIndexedArray*>
IndexedArrayAnalysis::ReshapeToAddDegenerateDims(
ScalarIndexedArray* operand, absl::Span<const int64_t> degenerate_dims) {
if (degenerate_dims.empty()) {
return operand;
}
CHECK(!ShapeUtil::HasDegenerateDimensions(operand->shape()));
DimensionVector new_output_dims = [&]() {
absl::InlinedVector<bool, 6> output_dims_bitvector(
operand->shape().dimensions_size());
for (int64_t output_dim : operand->output_dims()) {
output_dims_bitvector[output_dim] = true;
}
for (int64_t degenerate_dim : degenerate_dims) {
InsertAt(&output_dims_bitvector, degenerate_dim, false);
}
DimensionVector result;
result.reserve(operand->output_dims().size());
for (int64_t i = 0, e = output_dims_bitvector.size(); i < e; i++) {
if (output_dims_bitvector[i]) {
result.push_back(i);
}
}
return result;
}();
DimensionVector new_result_shape_dims;
absl::c_copy(operand->shape().dimensions(),
std::back_inserter(new_result_shape_dims));
for (int64_t degenerate_dim : degenerate_dims) {
InsertAt(&new_result_shape_dims, degenerate_dim, 1);
}
DimensionVector new_source_shape_dims = new_result_shape_dims;
for (int64_t output_dim : new_output_dims) {
EraseAt(&new_source_shape_dims, output_dim);
}
int64_t new_source_dim = [&]() {
for (int i = 0, e = new_source_shape_dims.size(); i < e; i++) {
int64_t non_degenerate_dims_seen = 0;
if (non_degenerate_dims_seen == operand->source_dim()) {
return i;
}
if (new_source_shape_dims[new_source_dim] != 1) {
non_degenerate_dims_seen++;
}
}
LOG(FATAL) << "Did not find source dim in " << ToString(operand);
}();
int64_t source_dim_size =
operand->source()->shape().dimensions(operand->source_dim());
InsertAt(&new_source_shape_dims, new_source_dim,
source_dim_size);
Shape new_source_shape = ShapeUtil::MakeShape(operand->shape().element_type(),
new_source_shape_dims);
Shape new_result_shape = ShapeUtil::MakeShape(operand->shape().element_type(),
new_result_shape_dims);
TF_ASSIGN_OR_RETURN(
Array* const new_source,
ComputeArrayForReshape(new_source_shape, operand->source()));
return ConstructScalarIndexedArray(
new_source, operand->indices(), new_source_dim,
InlinedVectorToVector(new_output_dims), new_result_shape);
}
absl::StatusOr<ScalarIndexedArray*> IndexedArrayAnalysis::FoldReshapeOfGather(
const Shape& shape, ScalarIndexedConstantArray* operand) {
VLOG(3) << "FoldReshapeOfGather(" << ToString(operand) << ")";
TF_ASSIGN_OR_RETURN(ScalarIndexedArray* const operand_without_degenerate_dims,
ReshapeToRemoveDegenerateDims(operand));
Shape output_shape_without_degenerate_dims = StripDegenerateDimensions(shape);
TF_ASSIGN_OR_RETURN(
ScalarIndexedArray* const folded_reshape_without_degenerate_dims,
FoldReshapeOfGatherNoDegenerateDims(
output_shape_without_degenerate_dims,
operand_without_degenerate_dims->as<ScalarIndexedConstantArray>()));
if (folded_reshape_without_degenerate_dims == nullptr) {
return nullptr;
}
DimensionVector degenerate_result_dims;
for (int64_t i = 0, e = shape.dimensions_size(); i < e; i++) {
if (shape.dimensions(i) == 1) {
degenerate_result_dims.push_back(i);
}
}
return ReshapeToAddDegenerateDims(folded_reshape_without_degenerate_dims,
degenerate_result_dims);
}
absl::StatusOr<ScalarIndexedArray*>
IndexedArrayAnalysis::FoldReshapeOfGatherNoDegenerateDims(
const Shape& shape, ScalarIndexedConstantArray* scalar_indexed) {
VLOG(3) << "FoldReshapeOfGatherNoDegenerateDims(" << ToString(scalar_indexed)
<< ")";
CHECK(!ShapeUtil::HasDegenerateDimensions(shape));
CHECK(!ShapeUtil::HasDegenerateDimensions(scalar_indexed->shape()));
std::vector<ReshapePassthroughDimPair> reshape_passthrough_dims =
ComputeReshapePassthroughDimPairs(
scalar_indexed->shape().dimensions(),
shape.dimensions());
auto is_reshape_passthrough_operand_dim = [&](int64_t operand_dim) {
return IsReshapePassthroughOperandDim(reshape_passthrough_dims,
operand_dim);
};
if (!absl::c_all_of(scalar_indexed->output_dims(),
is_reshape_passthrough_operand_dim)) {
VLOG(3) << "Not all output dims are passthrough dims "
<< ToString(scalar_indexed);
return nullptr;
}
std::vector<int64_t> new_scalar_indexed_source_shape(
shape.dimensions().begin(), shape.dimensions().end());
for (int64_t i = scalar_indexed->output_dims().size() - 1; i >= 0; i--) {
int64_t output_dim = scalar_indexed->output_dims()[i];
int64_t output_dim_after_reshape = MapPassthroughOperandDimToResultDim(
reshape_passthrough_dims, output_dim);
EraseAt(&new_scalar_indexed_source_shape, output_dim_after_reshape);
}
const Shape& scalar_indexed_source_shape = scalar_indexed->source()->shape();
int64_t source_dim_for_new_scalar_indexed_node =
FindSourcePositionForPassthroughResultDim(
scalar_indexed_source_shape.dimensions(),
new_scalar_indexed_source_shape,
scalar_indexed->source_dim());
if (source_dim_for_new_scalar_indexed_node == -1) {
VLOG(3) << "Could not compute the source dim for the new scalar indexed "
"node: scalar_indexed_source_shape = ["
<< StrJoin(scalar_indexed_source_shape.dimensions(), ",")
<< "] and new_scalar_indexed_source_shape = ["
<< StrJoin(new_scalar_indexed_source_shape, ",") << "]";
return nullptr;
}
InsertAt(
&new_scalar_indexed_source_shape, source_dim_for_new_scalar_indexed_node,
scalar_indexed_source_shape.dimensions(scalar_indexed->source_dim()));
CHECK_EQ(absl::c_accumulate(new_scalar_indexed_source_shape, 1LL,
std::multiplies<int64_t>()),
ShapeUtil::ElementsIn(scalar_indexed_source_shape));
CHECK(IsReshapePassthroughOperandDim(
ComputeReshapePassthroughDimPairs(
scalar_indexed_source_shape.dimensions(),
new_scalar_indexed_source_shape),
scalar_indexed->source_dim()));
auto map_passthrough_operand_dim_to_result_dim = [&](int64_t result_dim) {
return MapPassthroughOperandDimToResultDim(reshape_passthrough_dims,
result_dim);
};
std::vector<int64_t> output_dims_for_new_scalar_indexed_node;
absl::c_transform(scalar_indexed->output_dims(),
std::back_inserter(output_dims_for_new_scalar_indexed_node),
map_passthrough_operand_dim_to_result_dim);
TF_ASSIGN_OR_RETURN(const Literal* new_scalar_indexed_source_literal,
TakeOwnership(scalar_indexed->literal().Reshape(
new_scalar_indexed_source_shape)));
TF_ASSIGN_OR_RETURN(
Array * new_scalar_indexed_source,
ComputeArrayForConstant(*new_scalar_indexed_source_literal));
return ConstructScalarIndexedArray(
new_scalar_indexed_source, scalar_indexed->indices(),
source_dim_for_new_scalar_indexed_node,
output_dims_for_new_scalar_indexed_node, shape);
}
absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForReshape(
const Shape& shape, Array* operand) {
if (ShapeUtil::Compatible(operand->shape(), shape)) {
return operand;
}
if (auto* scalar_indexed =
dynamic_cast<ScalarIndexedConstantArray*>(operand)) {
TF_ASSIGN_OR_RETURN(Analysis::Array * reshape_folded_into_gather,
FoldReshapeOfGather(shape, scalar_indexed));
if (reshape_folded_into_gather) {
return reshape_folded_into_gather;
}
}
if (auto* constant_array = dynamic_cast<ConstantArray*>(operand)) {
TF_ASSIGN_OR_RETURN(
Literal* const new_literal,
TakeOwnership(constant_array->literal()->Reshape(shape.dimensions())));
return Construct<ConstantArray>(new_literal);
}
return Construct<ReshapedArray>(operand, shape);
}
absl::StatusOr<Analysis::Array*>
IndexedArrayAnalysis::ComputeArrayForElementwiseBinaryOp(HloOpcode opcode,
Array* lhs,
Array* rhs) {
ScalarIndexedConstantArray* lhs_scalar_indexed_const =
dynamic_cast<ScalarIndexedConstantArray*>(lhs);
ScalarIndexedConstantArray* rhs_scalar_indexed_const =
dynamic_cast<ScalarIndexedConstantArray*>(rhs);
bool lhs_is_indexed;
if (lhs_scalar_indexed_const && !rhs_scalar_indexed_const) {
lhs_is_indexed = true;
} else if (rhs_scalar_indexed_const && !lhs_scalar_indexed_const) {
lhs_is_indexed = false;
} else {
return nullptr;
}
ScalarIndexedConstantArray* scalar_indexed_const =
lhs_is_indexed ? lhs_scalar_indexed_const : rhs_scalar_indexed_const;
UnknownArray* candidate_broadcast_array =
dynamic_cast<UnknownArray*>(lhs_is_indexed ? rhs : lhs);
if (!candidate_broadcast_array ||
candidate_broadcast_array->instruction().opcode() !=
HloOpcode::kBroadcast) {
return nullptr;
}
const HloInstruction* broadcast_instr =
&candidate_broadcast_array->instruction();
const HloInstruction* broadcast_const_operand = broadcast_instr->operand(0);
if (broadcast_const_operand->opcode() != HloOpcode::kConstant) {
return nullptr;
}
absl::Span<const int64_t> broadcast_dims = broadcast_instr->dimensions();
auto is_broadcasted_dim = [&](int64_t output_dim) {
return absl::c_find(broadcast_dims, output_dim) == broadcast_dims.end();
};
if (!absl::c_all_of(scalar_indexed_const->output_dims(),
is_broadcasted_dim)) {
return nullptr;
}
enum class IndexComponent { Broadcasted, NotBroadcasted };
std::vector<IndexComponent> simulated_index(
broadcast_instr->shape().dimensions_size(), IndexComponent::Broadcasted);
for (int64_t broadcast_dim : broadcast_dims) {
simulated_index[broadcast_dim] = IndexComponent::NotBroadcasted;
}
absl::Span<const int64_t> output_dims = scalar_indexed_const->output_dims();
for (int64_t i = output_dims.size() - 1; i >= 0; --i) {
CHECK(simulated_index[output_dims[i]] == IndexComponent::Broadcasted);
EraseAt(&simulated_index, output_dims[i]);
}
InsertAt(&simulated_index, scalar_indexed_const->source_dim(),
IndexComponent::Broadcasted);
std::vector<int64_t> new_inner_broadcast_dims;
for (int64_t i = 0; i < simulated_index.size(); i++) {
if (simulated_index[i] == IndexComponent::NotBroadcasted) {
new_inner_broadcast_dims.push_back(i);
}
}
TF_ASSIGN_OR_RETURN(
Literal inner_broadcast_result,
broadcast_const_operand->literal().Broadcast(
scalar_indexed_const->source()->shape(), new_inner_broadcast_dims));
const Literal* literal_for_new_source;
if (lhs_is_indexed) {
TF_ASSIGN_OR_RETURN(
literal_for_new_source,
TakeOwnership(HloEvaluator{}.EvaluateElementwiseBinaryOp(
opcode, scalar_indexed_const->literal(), inner_broadcast_result)));
} else {
TF_ASSIGN_OR_RETURN(
literal_for_new_source,
TakeOwnership(HloEvaluator{}.EvaluateElementwiseBinaryOp(
opcode, inner_broadcast_result, scalar_indexed_const->literal())));
}
ConstantArray* new_source = Construct<ConstantArray>(literal_for_new_source);
return Construct<ScalarIndexedConstantArray>(
new_source, scalar_indexed_const->indices(),
scalar_indexed_const->source_dim(),
std::vector<int64_t>(scalar_indexed_const->output_dims().begin(),
scalar_indexed_const->output_dims().end()),
scalar_indexed_const->shape());
}
absl::StatusOr<Analysis::Array*>
IndexedArrayAnalysis::ComputeArrayForElementwiseUnaryOp(HloOpcode opcode,
Array* operand) {
auto* scalar_indexed_const =
dynamic_cast<ScalarIndexedConstantArray*>(operand);
if (scalar_indexed_const == nullptr) {
return nullptr;
}
TF_ASSIGN_OR_RETURN(Literal * literal_for_new_source,
TakeOwnership(HloEvaluator{}.EvaluateElementwiseUnaryOp(
opcode, scalar_indexed_const->literal())));
ConstantArray* new_source = Construct<ConstantArray>(literal_for_new_source);
return Construct<ScalarIndexedConstantArray>(
new_source, scalar_indexed_const->indices(),
scalar_indexed_const->source_dim(),
SpanToVector(scalar_indexed_const->output_dims()),
scalar_indexed_const->shape());
}
namespace {
std::optional<int64_t> GetOnlyNonContractingNonBatchDim(
int64_t rank, absl::Span<const int64_t> contracting_dims,
absl::Span<const int64_t> batch_dims) {
std::optional<int64_t> result;
for (int64_t dim = 0; dim < rank; dim++) {
if (!absl::c_linear_search(contracting_dims, dim) &&
!absl::c_linear_search(batch_dims, dim)) {
if (result.has_value()) {
return std::nullopt;
}
result = dim;
}
}
return result;
}
bool CanFoldDotIntoIndexedArray(
absl::string_view tag, Analysis::ScalarIndexedConstantArray* indexed_array,
absl::Span<const int64_t> contracting_dims,
absl::Span<const int64_t> batch_dims) {
std::optional<int64_t> non_contracting_non_batch_dim =
GetOnlyNonContractingNonBatchDim(indexed_array->shape().rank(),
contracting_dims, batch_dims);
if (!non_contracting_non_batch_dim.has_value()) {
VLOG(3) << tag << ": multiple or no non-contracting non-batch dimensions";
return false;
}
if (indexed_array->output_dims().size() != 1 ||
indexed_array->output_dims()[0] != *non_contracting_non_batch_dim) {
VLOG(3) << tag << ": output dims != the lhs non-contracting non-batch dim";
return false;
}
int64_t indexed_array_rank = indexed_array->shape().rank();
if (indexed_array->source_dim() < (indexed_array_rank - 2)) {
VLOG(3) << tag
<< ": source dim is not in the low two dims, won't be able to form "
"a matmul";
return false;
}
return true;
}
}
absl::StatusOr<Analysis::Array*>
IndexedArrayAnalysis::ComputeArrayForDotWithIndexedLhs(
const Shape& shape, const DotDimensionNumbers& dim_numbers,
const PrecisionConfig& precision_config, ScalarIndexedConstantArray* lhs,
ConstantArray* rhs) {
VLOG(3) << "ComputeArrayForDotWithIndexedLhs(" << ToString(lhs) << " "
<< ToString(rhs);
if (!CanFoldDotIntoIndexedArray(
"ComputeArrayForDotWithIndexedLhs", lhs,
dim_numbers.lhs_contracting_dimensions(),
dim_numbers.lhs_batch_dimensions())) {
return nullptr;
}
int64_t lhs_rank = lhs->shape().rank();
DotDimensionNumbers new_dim_numbers = dim_numbers;
new_dim_numbers.set_lhs_contracting_dimensions(
0, lhs->source_dim() == (lhs_rank - 1) ? (lhs_rank - 2) : (lhs_rank - 1));
TF_ASSIGN_OR_RETURN(
Literal * literal_for_new_source,
TakeOwnership(HloEvaluator{}.EvaluateDotOp(
new_dim_numbers, precision_config, lhs->literal(), *rhs->literal())));
int64_t new_source_dim = dim_numbers.lhs_batch_dimensions_size() +
dim_numbers.rhs_batch_dimensions_size();
ConstantArray* new_source = Construct<ConstantArray>(literal_for_new_source);
return Construct<ScalarIndexedConstantArray>(
new_source, lhs->indices(), new_source_dim,
SpanToVector(lhs->output_dims()), shape);
}
absl::StatusOr<Analysis::Array*>
IndexedArrayAnalysis::ComputeArrayForDotWithIndexedRhs(
const Shape& shape, const DotDimensionNumbers& dim_numbers,
const PrecisionConfig& precision_config, ConstantArray* lhs,
ScalarIndexedConstantArray* rhs) {
VLOG(3) << "ComputeArrayForDotWithIndexedRhs(" << ToString(lhs) << " "
<< ToString(rhs);
if (!CanFoldDotIntoIndexedArray(
"ComputeArrayForDotWithIndexedRhs", rhs,
dim_numbers.rhs_contracting_dimensions(),
dim_numbers.rhs_batch_dimensions())) {
return nullptr;
}
int64_t rhs_rank = rhs->shape().rank();
DotDimensionNumbers new_dim_numbers = dim_numbers;
new_dim_numbers.set_rhs_contracting_dimensions(
0, rhs->source_dim() == (rhs_rank - 1) ? (rhs_rank - 2) : (rhs_rank - 1));
TF_ASSIGN_OR_RETURN(
Literal * literal_for_new_source,
TakeOwnership(HloEvaluator{}.EvaluateDotOp(
new_dim_numbers, precision_config, *lhs->literal(), rhs->literal())));
int64_t new_source_dim = dim_numbers.lhs_batch_dimensions_size() +
dim_numbers.rhs_batch_dimensions_size() + 1;
ConstantArray* new_source = Construct<ConstantArray>(literal_for_new_source);
return Construct<ScalarIndexedConstantArray>(
new_source, rhs->indices(), new_source_dim,
SpanToVector(rhs->output_dims()), shape);
}
absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForDot(
const Shape& shape, const DotDimensionNumbers& dim_numbers,
const PrecisionConfig& precision_config, Array* lhs, Array* rhs) {
VLOG(3) << "ComputeArrayForDot(" << ToString(lhs) << " " << ToString(rhs);
if (auto* lhs_indexed_array =
dynamic_cast<ScalarIndexedConstantArray*>(lhs)) {
if (auto* rhs_constant = dynamic_cast<ConstantArray*>(rhs)) {
return ComputeArrayForDotWithIndexedLhs(shape, dim_numbers,
precision_config,
lhs_indexed_array, rhs_constant);
}
}
if (auto* rhs_indexed_array =
dynamic_cast<ScalarIndexedConstantArray*>(rhs)) {
if (auto* lhs_constant = dynamic_cast<ConstantArray*>(lhs)) {
return ComputeArrayForDotWithIndexedRhs(shape, dim_numbers,
precision_config, lhs_constant,
rhs_indexed_array);
}
}
return nullptr;
}
absl::StatusOr<bool> IndexedArrayAnalysisPrinterPass::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (!VLOG_IS_ON(2)) {
return false;
}
IndexedArrayAnalysis analysis;
for (auto* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (auto* instr : computation->instructions()) {
TF_ASSIGN_OR_RETURN(Analysis::Array * t, analysis.GetArrayFor(instr));
if (!dynamic_cast<UnknownArray*>(t) && !dynamic_cast<ConstantArray*>(t)) {
VLOG(2) << instr->ToString() << " -> " << analysis.ToString(t);
}
}
}
return false;
}
} | #include "xla/service/indexed_array_analysis.h"
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/strings/ascii.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class IndexedArrayAnalysisTest : public HloTestBase {
protected:
void AssertArrayForRootExpressionIs(const std::string& hlo_text,
const std::string& root_expression) {
AssertArrayForRootExpressionIsImpl(hlo_text, root_expression,
false);
}
void AssertArrayWithConstantsForRootExpressionIs(
const std::string& hlo_text, const std::string& root_expression) {
AssertArrayForRootExpressionIsImpl(hlo_text, root_expression,
true);
}
private:
std::string CanonicalizeWhitespace(const std::string& text) {
std::string result;
for (char c : text) {
if (!absl::ascii_isspace(c)) {
result.push_back(c);
} else if (!result.empty() && result.back() != ' ') {
result.push_back(' ');
}
}
while (!result.empty() && result.back() == ' ') {
result.pop_back();
}
return result;
}
void AssertArrayForRootExpressionIsImpl(const std::string& hlo_text,
const std::string& root_expression,
bool print_constants) {
IndexedArrayAnalysis indexed_tensor_analysis;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(IndexedArrayAnalysis::Array* const array_result,
indexed_tensor_analysis.GetArrayFor(
m->entry_computation()->root_instruction()));
std::string string_result = CanonicalizeWhitespace(
indexed_tensor_analysis.ToString(array_result, print_constants));
LOG(INFO) << string_result;
ASSERT_EQ(string_result, CanonicalizeWhitespace(root_expression));
}
};
TEST_F(IndexedArrayAnalysisTest, SimpleOneToOneGather) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[5] parameter(1)
ROOT gather = s32[5,3] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,3}
}
)";
AssertArrayForRootExpressionIs(hlo_text,
"(scalar-indexed %operand %indices 0->[0])");
}
TEST_F(IndexedArrayAnalysisTest, SimpleOneToOneConstantGather) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,3] constant({{1,2,3},{1,2,3},{1,2,3}})
indices = s32[5] parameter(0)
ROOT gather = s32[5,3] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,3}
}
)";
AssertArrayForRootExpressionIs(
hlo_text, "(scalar-indexed-const (constant s32[3,3]) %indices 0->[0])");
}
TEST_F(IndexedArrayAnalysisTest, GatherIsNotScalarIndexed0) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,3] constant({{1,2,3},{1,2,3},{1,2,3}})
indices = s32[5,2] parameter(0)
ROOT gather = s32[5] gather(operand, indices),
offset_dims={},
collapsed_slice_dims={0,1},
start_index_map={0,1},
index_vector_dim=1,
slice_sizes={1,1}
}
)";
AssertArrayForRootExpressionIs(hlo_text, "%gather");
}
TEST_F(IndexedArrayAnalysisTest, GatherIsNotScalarIndexed1) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,3,1] parameter(0)
indices = s32[5] parameter(1)
ROOT gather = s32[5,3] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0,2},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,3,1}
}
)";
AssertArrayForRootExpressionIs(hlo_text, "%gather");
}
TEST_F(IndexedArrayAnalysisTest, GatherIsNotScalarIndexed2) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,3,1] parameter(0)
indices = s32[5] parameter(1)
ROOT gather = s32[5,2,3] gather(operand, indices),
offset_dims={1,2},
collapsed_slice_dims={2},
start_index_map={0},
index_vector_dim=1,
slice_sizes={2,3,1}
}
)";
AssertArrayForRootExpressionIs(hlo_text, "%gather");
}
TEST_F(IndexedArrayAnalysisTest, GatherIsNotScalarIndexed3) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[5] parameter(1)
ROOT gather = s32[5,2] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,2}
}
)";
AssertArrayForRootExpressionIs(hlo_text, "%gather");
}
TEST_F(IndexedArrayAnalysisTest, GatherOfGather_OneToOne) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,3] constant({{1,2,3},{1,2,3},{1,2,3}})
indices_a = s32[5] parameter(0)
indices_b = s32[2] parameter(1)
gather_a = s32[5,3] gather(operand, indices_a),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,3}
ROOT gather_b = s32[2,3] gather(gather_a, indices_b),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,3}
}
)";
AssertArrayForRootExpressionIs(
hlo_text,
"(scalar-indexed-const (constant s32[3,3]) (scalar-indexed %indices_a "
"%indices_b 0->[0]) 0->[0])");
}
TEST_F(IndexedArrayAnalysisTest, GatherOfGather_ManyToOneWithOneToOne) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,2] parameter(0)
indices_a = s32[5,7] parameter(1)
indices_b = s32[2] parameter(2)
gather_a = s32[5,3,7] gather(operand, indices_a),
offset_dims={1},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=2,
slice_sizes={3,1}
ROOT gather_b = s32[5,3,2] gather(gather_a, indices_b),
offset_dims={0,1},
collapsed_slice_dims={2},
start_index_map={2},
index_vector_dim=1,
slice_sizes={5,3,1}
}
)";
AssertArrayForRootExpressionIs(hlo_text,
"(scalar-indexed %operand (scalar-indexed "
"%indices_a %indices_b 1->[1]) 1->[0,2])");
}
TEST_F(IndexedArrayAnalysisTest, GatherOfGather_OneToOneWithManyToOne) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,6] parameter(0)
indices_a = s32[2] parameter(1)
indices_b = s32[5,7] parameter(2)
gather_a = s32[2,6] gather(operand, indices_a),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,6}
ROOT gather_b = s32[5,6,7] gather(gather_a, indices_b),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,6}
}
)";
AssertArrayForRootExpressionIs(hlo_text,
"(scalar-indexed %operand (scalar-indexed "
"%indices_a %indices_b 0->[0,1]) 0->[0,2])");
}
TEST_F(IndexedArrayAnalysisTest, GatherOfGather_ManyToOneWithManyToOne) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,2] parameter(0)
indices_a = s32[5,7] parameter(1)
indices_b = s32[4,8] parameter(2)
gather_a = s32[5,3,7] gather(operand, indices_a),
offset_dims={1},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=2,
slice_sizes={3,1}
ROOT gather_b = s32[4,5,3,8] gather(gather_a, indices_b),
offset_dims={1,2},
collapsed_slice_dims={2},
start_index_map={2},
index_vector_dim=2,
slice_sizes={5,3,1}
}
)";
AssertArrayForRootExpressionIs(
hlo_text,
"(scalar-indexed %operand (scalar-indexed %indices_a %indices_b "
"1->[0,2]) 1->[0,1,3])");
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather0) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[3,4] constant({{1,2,3,4},{1,2,3,4},{1,2,3,4}})
indices = s32[5] parameter(0)
gather = s32[5,4] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT reshape = s32[5,2,2] reshape(gather)
}
)";
AssertArrayForRootExpressionIs(
hlo_text, "(scalar-indexed-const (constant s32[3,2,2]) %indices 0->[0])");
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather1) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[3,4] constant({{1,2,3,4},{1,2,3,4},{1,2,3,4}})
indices = s32[5,7] parameter(0)
gather = s32[5,4,7] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,4}
ROOT reshape = s32[5,2,2,7] reshape(gather)
}
)";
AssertArrayForRootExpressionIs(
hlo_text,
"(scalar-indexed-const (constant s32[3,2,2]) %indices 0->[0,3])");
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather2) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[3,2,6] constant({
{{1,2,3,4,5,6},{1,2,3,4,5,6}},
{{1,2,3,4,5,6},{1,2,3,4,5,6}},
{{1,2,3,4,5,6},{1,2,3,4,5,6}}})
indices = s32[5,7] parameter(0)
gather = s32[5,2,6,7] gather(operand, indices),
offset_dims={1,2},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,2,6}
ROOT reshape = s32[5,3,4,7] reshape(gather)
}
)";
AssertArrayForRootExpressionIs(
hlo_text,
"(scalar-indexed-const (constant s32[3,3,4]) %indices 0->[0,3])");
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather3) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[2,6] constant({
{1,2,3,4,5,6},{1,2,3,4,5,6}})
indices = s32[1] parameter(0)
gather = s32[1,6] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,6}
ROOT reshape = s32[1,1,6] reshape(gather)
}
)";
const char* expected_root_expression = R"(
(scalar-indexed-const
(constant s32[2,1,1,6])
(reshape %indices to s32[])
0->[])
)";
AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather4) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[2,3]{1,0} constant({ { 1, 2, 3 }, { 1, 2, 3 } })
i.0 = s64[1,3]{1,0} parameter(0)
g.0 = s32[1,3,3]{2,1,0} gather(operand, i.0), offset_dims={2},
collapsed_slice_dims={0}, start_index_map={0},
index_vector_dim=2, slice_sizes={1,3}
i.1 = s64[1] parameter(1)
g.1 = s32[1,1,3]{2,1,0} gather(g.0, i.1), offset_dims={0,2},
collapsed_slice_dims={1}, start_index_map={1},
index_vector_dim=1, slice_sizes={1,1,3}
ROOT reshape = s32[1,3]{1,0} reshape(g.1)
}
)";
const char* expected_root_expression = R"(
(scalar-indexed-const
(constant s32[2,1,3])
(reshape
(scalar-indexed %i.0 %i.1 1->[1])
to s64[])
0->[])
)";
AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather5) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[1,6] constant({{1,2,3,4,5,6}})
indices = s32[1] parameter(0)
gather = s32[1,6] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,6}
ROOT reshape = s32[1,1,6] reshape(gather)
}
)";
const char* expected_root_expression = R"(
(scalar-indexed-const
(constant s32[1,1,1,6])
(reshape %indices to s32[])
0->[])
)";
AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather6) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[1,2,6] constant({{
{1,2,3,4,5,6},{1,2,3,4,5,6}}})
indices = s32[1] parameter(0)
gather = s32[1,1,6] gather(operand, indices),
offset_dims={1,2},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={1,1,6}
ROOT reshape = s32[1,1,1,6] reshape(gather)
}
)";
const char* expected_root_expression = R"(
(scalar-indexed-const
(constant s32[2,1,1,1,6] s32[2,1,1,1,6] {
{ { { { 1, 2, 3, 4, 5, 6 } } } },
{ { { { 1, 2, 3, 4, 5, 6 } } } } })
(reshape %indices to s32[])
0->[])
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text,
expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather7) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[2,6] constant({
{1,2,3,4,5,6},{1,2,3,4,5,6}})
indices = s32[1,5] parameter(0)
gather = s32[1,5,6] gather(operand, indices),
offset_dims={2},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,6}
ROOT reshape = s32[1,1,5,6] reshape(gather)
}
)";
const char* expected_root_expression = R"(
(scalar-indexed-const
(constant s32[2,1,1,6] s32[2,1,1,6] {
{ { { 1, 2, 3, 4, 5, 6 } } },
{ { { 1, 2, 3, 4, 5, 6 } } } })
(reshape %indices to s32[5])
0->[2])
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text,
expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGatherNoFold0) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[3,4] constant({{1,2,3,4},{1,2,3,4},{1,2,3,4}})
indices = s32[5,6] parameter(0)
gather = s32[5,4,6] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,4}
ROOT reshape = s32[5,2,2,2,3] reshape(gather)
}
)";
const char* expected_root_expression = R"(
(reshape
(scalar-indexed-const
(constant s32[3,4])
%indices
0->[0,2])
to s32[5,2,2,2,3])
)";
AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGatherNoFold1) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[3,5,2] constant({
{{1,2},{3,4},{5,6},{7,8},{9,10}},
{{1,2},{3,4},{5,6},{7,8},{9,10}},
{{1,2},{3,4},{5,6},{7,8},{9,10}}})
indices = s32[7] parameter(0)
gather = s32[3,2,7] gather(operand, indices),
offset_dims={0,1},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3,1,2}
ROOT reshape = s32[6,7] reshape(gather)
}
)";
const char* expected_root_expression = R"(
(reshape
(scalar-indexed-const
(constant s32[3,5,2])
%indices
1->[2])
to s32[6,7])
)";
AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGatherNoFold2) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[3,4,1] constant({
{{1},{2},{3},{4}},
{{1},{2},{3},{4}},
{{1},{2},{3},{4}}})
indices = s32[5,6] parameter(0)
gather = s32[5,4,6,1] gather(operand, indices),
offset_dims={1,3},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,4,1}
ROOT reshape = s32[5,2,2,2,3,1] reshape(gather)
}
)";
const char* expected_root_expression = R"(
(reshape
(scalar-indexed-const
(constant s32[3,4,1])
%indices
0->[0,2])
to s32[5,2,2,2,3,1])
)";
AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, UnaryOpOfGather) {
std::string hlo_text = R"(
HloModule UnaryOpOfGather
ENTRY main {
operand = f32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})
indices = s32[5] parameter(0)
gather = f32[5,4] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT tanh = f32[5,4] tanh(gather)
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const (constant f32[3,4] f32[3,4] {
{ 0.761594176, 0.964027584, 0.995054781, 0.999329329 },
{ 0.761594176, 0.995054781, 0.964027584, 0.999329329 },
{ 0.999329329, 0.995054781, 0.964027584, 0.761594176 }
}) %indices 0->[0]))");
}
TEST_F(IndexedArrayAnalysisTest, AddBroadcastedScalarWithGather) {
std::string hlo_text = R"(
HloModule AddBroadcastedScalarWithGather
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})
constant = s32[] constant(5)
constant_broadcasted = s32[5,4] broadcast(constant), dimensions={}
indices = s32[5] parameter(0)
gather = s32[5,4] gather(gather_operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT add = s32[5,4] add(gather, constant_broadcasted)
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const (constant s32[3,4] s32[3,4] {
{ 6, 7, 8, 9 },
{ 6, 8, 7, 9 },
{ 9, 8, 7, 6 }
}) %indices 0->[0]))");
}
TEST_F(IndexedArrayAnalysisTest,
SubtractBroadcastedScalarWithGather_GatherIsLhs) {
std::string hlo_text = R"(
HloModule SubtractBroadcastedScalarWithGather
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})
constant = s32[] constant(5)
constant_broadcasted = s32[5,4] broadcast(constant), dimensions={}
indices = s32[5] parameter(0)
gather = s32[5,4] gather(gather_operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT sub = s32[5,4] subtract(gather, constant_broadcasted)
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const (constant s32[3,4] s32[3,4] {
{ -4, -3, -2, -1 },
{ -4, -2, -3, -1 },
{ -1, -2, -3, -4 }
}) %indices 0->[0]))");
}
TEST_F(IndexedArrayAnalysisTest,
SubtractBroadcastedScalarWithGather_GatherIsRhs) {
std::string hlo_text = R"(
HloModule SubtractBroadcastedScalarWithGather
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})
constant = s32[] constant(5)
constant_broadcasted = s32[5,4] broadcast(constant), dimensions={}
indices = s32[5] parameter(0)
gather = s32[5,4] gather(gather_operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT sub = s32[5,4] subtract(constant_broadcasted, gather)
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const (constant s32[3,4] s32[3,4] {
{ 4, 3, 2, 1 },
{ 4, 2, 3, 1 },
{ 1, 2, 3, 4 }
}) %indices 0->[0]))");
}
TEST_F(IndexedArrayAnalysisTest, AddBroadcastedVectorWithGather) {
std::string hlo_text = R"(
HloModule AddBroadcastedVectorWithGather
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})
constant_vect = s32[4] constant({10,11,12,13})
constant_broadcasted = s32[5,4] broadcast(constant_vect), dimensions={1}
indices = s32[5] parameter(0)
gather = s32[5,4] gather(gather_operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT add = s32[5,4] add(gather, constant_broadcasted)
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const (constant s32[3,4] s32[3,4] {
{ 11, 13, 15, 17 },
{ 11, 14, 14, 17 },
{ 14, 14, 14, 14 }
}) %indices 0->[0]))");
}
TEST_F(IndexedArrayAnalysisTest, AddBroadcastedVectorWithGather_Negative) {
std::string hlo_text = R"(
HloModule AddBroadcastedVectorWithGather
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})
constant_vect = s32[5] constant({10,11,12,13,14})
constant_broadcasted = s32[5,4] broadcast(constant_vect), dimensions={0}
indices = s32[5] parameter(0)
gather = s32[5,4] gather(gather_operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT add = s32[5,4] add(gather, constant_broadcasted)
}
)";
AssertArrayForRootExpressionIs(hlo_text, "%add");
}
TEST_F(IndexedArrayAnalysisTest, RegularUnaryOp) {
std::string hlo_text = R"(
HloModule RegularUnaryOp
ENTRY main {
input = f32[100] parameter(0)
ROOT tanh = f32[100] tanh(input)
}
)";
AssertArrayForRootExpressionIs(hlo_text, "%tanh");
}
TEST_F(IndexedArrayAnalysisTest, RegularBinaryOp) {
std::string hlo_text = R"(
HloModule RegularUnaryOp
ENTRY main {
input0 = f32[100] parameter(0)
input1 = f32[100] parameter(1)
ROOT add = f32[100] add(input0, input1)
}
)";
AssertArrayForRootExpressionIs(hlo_text, "%add");
}
TEST_F(IndexedArrayAnalysisTest, DotOpBasic_0) {
std::string hlo_text = R"(
HloModule DotOp
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{5,6,7,8},{9,10,11,12}})
dot_rhs_constant = s32[4,3] constant({{1,2,3},{4,5,6},{7,8,9},{10,11,12}})
indices = s32[5] parameter(0)
dot_lhs = s32[5,4] gather(gather_operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT dot = s32[5,3] dot(dot_lhs, dot_rhs_constant), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const
(constant s32[3,3] s32[3,3] {
{ 70, 80, 90 },
{ 158, 184, 210 },
{ 246, 288, 330 } })
%indices 0->[0]))");
}
TEST_F(IndexedArrayAnalysisTest, DotOpBasic_1) {
std::string hlo_text = R"(
HloModule DotOp
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{5,6,7,8},{9,10,11,12}})
dot_rhs_constant = s32[3,3] constant({{1,2,3},{4,5,6},{7,8,9}})
indices = s32[5] parameter(0)
dot_lhs = s32[3,5] gather(gather_operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3,1}
ROOT dot = s32[5,3] dot(dot_lhs, dot_rhs_constant), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const
(constant s32[4,3] s32[4,3] {
{ 84, 99, 114 },
{ 96, 114, 132 },
{ 108, 129, 150 },
{ 120, 144, 168 } })
%indices 0->[1]))");
}
TEST_F(IndexedArrayAnalysisTest, DotOpBasic_2) {
std::string hlo_text = R"(
HloModule DotOp
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{5,6,7,8},{9,10,11,12}})
dot_lhs_constant = s32[4,3] constant({{1,2,3},{4,5,6},{7,8,9},{10,11,12}})
indices = s32[5] parameter(0)
dot_rhs = s32[3,5] gather(gather_operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3,1}
ROOT dot = s32[4,5] dot(dot_lhs_constant, dot_rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const
(constant s32[4,4] s32[4,4] {
{ 38, 44, 50, 56 },
{ 83, 98, 113, 128 },
{ 128, 152, 176, 200 },
{ 173, 206, 239, 272 } })
%indices 1->[1])
)");
}
TEST_F(IndexedArrayAnalysisTest, DotOpBasic_3) {
std::string hlo_text = R"(
HloModule DotOp
ENTRY main {
gather_operand = s32[4,3] constant({{1,2,3},{4,5,6},{7,8,9},{10,11,12}})
dot_lhs_constant = s32[4,3] constant({{1,2,3},{4,5,6},{7,8,9},{10,11,12}})
indices = s32[5] parameter(0)
dot_rhs = s32[5,3] gather(gather_operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,3}
ROOT dot = s32[4,5] dot(dot_lhs_constant, dot_rhs), lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const
(constant s32[4,4] s32[4,4] {
{ 14, 32, 50, 68 },
{ 32, 77, 122, 167 },
{ 50, 122, 194, 266 },
{ 68, 167, 266, 365 } })
%indices 1->[0])
)");
}
TEST_F(IndexedArrayAnalysisTest, DotOpWithBatch) {
std::string hlo_text = R"(
HloModule DotOp
ENTRY main {
gather_operand = s32[2,3,2] constant({{{1,2},{3,4},{5,6}},{{7,8},{9,10},{11,12}}})
dot_lhs_constant = s32[2,2,3] constant({{{1,2,3},{4,5,6}},{{7,8,9},{10,11,12}}})
indices = s32[4] parameter(0)
dot_rhs = s32[2,3,4] gather(gather_operand, indices),
offset_dims={0,1},
collapsed_slice_dims={2},
start_index_map={2},
index_vector_dim=1,
slice_sizes={2,3,1}
ROOT dot = s32[2,2,4] dot(dot_lhs_constant, dot_rhs),
lhs_contracting_dims={2}, rhs_contracting_dims={1},
lhs_batch_dims={0}, rhs_batch_dims={0}
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const
(constant s32[2,2,2] s32[2,2,2] {
{ { 22, 28 },
{ 49, 64 } },
{ { 220, 244 },
{ 301, 334 } } })
%indices 3->[2])
)");
}
TEST_F(IndexedArrayAnalysisTest, DotOpNegative) {
std::string hlo_text = R"(
HloModule DotOp
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{5,6,7,8},{9,10,11,12}})
dot_rhs_constant = s32[2,3] constant({{1,2,3},{4,5,6}})
indices = s32[2] parameter(0)
dot_lhs = s32[3,2] gather(gather_operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3,1}
ROOT dot = s32[3,3] dot(dot_lhs, dot_rhs_constant), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, "%dot");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/indexed_array_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/indexed_array_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0246551a-2064-4f3b-ad55-2a13cb8a1a45 | cpp | tensorflow/tensorflow | all_gather_broadcast_reorder | third_party/xla/xla/service/all_gather_broadcast_reorder.cc | third_party/xla/xla/service/all_gather_broadcast_reorder_test.cc | #include "xla/service/all_gather_broadcast_reorder.h"
#include <cstdint>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
absl::StatusOr<bool> AllGatherBroadcastReorder::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
if (hlo_query::ContainsLayoutConstrainedCollective(*module,
HloOpcode::kAllGather)) {
VLOG(1) << "Skip AllGatherBroadcastReorder because the module contains "
"all-gather with constrained layouts";
return false;
}
int64_t next_channel_id = hlo_query::NextChannelId(*module);
bool changed = false;
for (auto computation : module->computations(execution_threads)) {
for (HloInstruction *inst : computation->MakeInstructionPostOrder()) {
if (inst->opcode() != HloOpcode::kAllGather || !inst->shape().IsArray() ||
inst->operand(0)->opcode() != HloOpcode::kBroadcast) {
continue;
}
HloAllGatherInstruction *ag = Cast<HloAllGatherInstruction>(inst);
HloBroadcastInstruction *bcast =
Cast<HloBroadcastInstruction>(inst->mutable_operand(0));
absl::flat_hash_set<int64_t> non_uniform_dims;
non_uniform_dims.insert(bcast->dimensions().begin(),
bcast->dimensions().end());
const bool all_gather_along_uniform_dim =
non_uniform_dims.insert(ag->all_gather_dimension()).second;
int64_t uniform_dim_size = 1;
for (int64_t i = 0; i < ag->shape().rank(); ++i) {
if (non_uniform_dims.count(i) == 0) {
uniform_dim_size *= ag->shape().dimensions(i);
}
}
if (uniform_dim_size == 1) {
continue;
}
HloInstruction *replacement;
const int64_t ag_dim = ag->all_gather_dimension();
if (!all_gather_along_uniform_dim) {
VLOG(2) << "All-gather along non uniform dimension";
auto ag_dim_index = PositionInContainer(bcast->dimensions(), ag_dim);
Shape new_ag_shape = bcast->operand(0)->shape();
new_ag_shape.set_dimensions(ag_dim_index,
ag->shape().dimensions(ag_dim));
auto *new_ag =
Cast<HloAllGatherInstruction>(computation->AddInstruction(
ag->CloneWithNewOperands(new_ag_shape, bcast->operands())));
if (ag->channel_id()) {
new_ag->set_channel_id(next_channel_id++);
}
new_ag->set_all_gather_dimension(ag_dim_index);
replacement = computation->AddInstruction(
bcast->CloneWithNewOperands(ag->shape(), {new_ag}));
} else {
VLOG(2) << "All-gather along uniform dimension";
HloInstruction *x = bcast->mutable_operand(0);
std::vector<int64_t> shape_dims{1};
absl::Span<const int64_t> x_dims = x->shape().dimensions();
shape_dims.insert(shape_dims.end(), x_dims.begin(), x_dims.end());
Shape shape =
ShapeUtil::MakeShape(x->shape().element_type(), shape_dims);
HloInstruction *rs0 = computation->AddInstruction(
HloInstruction::CreateReshape(shape, x));
const int64_t ag_factor = ag->shape().dimensions(ag_dim) /
ag->operand(0)->shape().dimensions(ag_dim);
shape.set_dimensions(0, ag_factor);
auto *new_ag =
Cast<HloAllGatherInstruction>(computation->AddInstruction(
ag->CloneWithNewOperands(shape, {rs0})));
if (ag->channel_id()) {
new_ag->set_channel_id(next_channel_id++);
}
new_ag->set_all_gather_dimension(0);
std::vector<int64_t> bcast_shape_dims =
SpanToVector(ag->shape().dimensions());
bcast_shape_dims[ag_dim] = ag_factor;
bcast_shape_dims.insert(bcast_shape_dims.begin() + ag_dim + 1,
ag->shape().dimensions(ag_dim) / ag_factor);
Shape bcast_shape =
ShapeUtil::MakeShape(x->shape().element_type(), bcast_shape_dims);
std::vector<int64_t> bcast_dims;
bcast_dims.push_back(ag_dim);
for (int64_t d : bcast->dimensions()) {
bcast_dims.push_back(d + (d > ag_dim));
}
HloInstruction *bcast = computation->AddInstruction(
HloInstruction::CreateBroadcast(bcast_shape, new_ag, bcast_dims));
replacement = computation->AddInstruction(
HloInstruction::CreateReshape(ag->shape(), bcast));
}
TF_RETURN_IF_ERROR(ag->ReplaceAllUsesWith(replacement));
TF_RETURN_IF_ERROR(computation->RemoveInstructionAndUnusedOperands(ag));
changed = true;
}
}
return changed;
}
} | #include "xla/service/all_gather_broadcast_reorder.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = xla::testing::opcode_matchers;
class AllGatherBroadcastReorderTest : public HloTestBase {
public:
enum class PassOutput { NoChange, NonUniformAGPattern, UniformAGPattern };
void RunPass(absl::string_view hlo_module, PassOutput expected_output) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_module));
auto changed = AllGatherBroadcastReorder().Run(module.get());
ASSERT_TRUE(changed.ok());
if (expected_output == PassOutput::NoChange) {
EXPECT_FALSE(changed.value());
} else {
EXPECT_TRUE(changed.value());
if (expected_output == PassOutput::NonUniformAGPattern) {
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::Broadcast(m::AllGather(m::Parameter())));
} else {
EXPECT_THAT(
module->entry_computation()->root_instruction(),
m::Reshape(m::Broadcast(m::AllGather(m::Reshape(m::Parameter())))));
}
}
}
};
TEST_F(AllGatherBroadcastReorderTest, Simple_GatherAlongNonUniformDim) {
absl::string_view hlo_string = R"(
HloModule m
ENTRY main {
x = f32[128, 5] parameter(0)
bc = f32[5, 4, 8, 128] broadcast(x), dimensions={3, 0}
ROOT ag = f32[5, 4, 8, 256] all-gather(bc), dimensions={3}, replica_groups={{0, 1}}
}
)";
RunPass(hlo_string, PassOutput::NonUniformAGPattern);
}
TEST_F(AllGatherBroadcastReorderTest, Simple_GatherAlongUniformDim) {
absl::string_view hlo_string = R"(
HloModule m
ENTRY main {
x = f32[128, 5] parameter(0)
bc = f32[5, 4, 8, 128] broadcast(x), dimensions={3, 0}
ROOT ag = f32[5, 12, 8, 128] all-gather(bc), dimensions={1}, replica_groups={{0, 1, 2}}
}
)";
RunPass(hlo_string, PassOutput::UniformAGPattern);
}
TEST_F(AllGatherBroadcastReorderTest, Simple_GatherBroadcastScalar) {
absl::string_view hlo_string = R"(
HloModule m
ENTRY main {
x = f32[] parameter(0)
bc = f32[4, 8] broadcast(x), dimensions={}
ROOT ag = f32[12, 8] all-gather(bc), dimensions={0}, replica_groups={{0, 1, 2}}
}
)";
RunPass(hlo_string, PassOutput::UniformAGPattern);
}
TEST_F(AllGatherBroadcastReorderTest, T5Test) {
absl::string_view hlo_string = R"(
HloModule m
ENTRY main {
x = f32[128] parameter(0)
bc = f32[1,4,84,128]{3,2,1,0} broadcast(x), dimensions={3}
ROOT ag = f32[8,4,84,128]{3,2,1,0} all-gather(bc), channel_id=6,
replica_groups={{0,1,2,3,4,5,6,7}}, dimensions={0}, use_global_device_ids=true
}
)";
RunPass(hlo_string, PassOutput::UniformAGPattern);
}
TEST_F(AllGatherBroadcastReorderTest, FailedMatch) {
absl::string_view hlo_string = R"(
HloModule m
ENTRY main {
x = f32[1,4,84,128] parameter(0)
ROOT ag = f32[8,4,84,128]{3,2,1,0} all-gather(x), channel_id=6,
replica_groups={{0,1,2,3,4,5,6,7}}, dimensions={0}, use_global_device_ids=true
}
)";
RunPass(hlo_string, PassOutput::NoChange);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_gather_broadcast_reorder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_gather_broadcast_reorder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9aa7f320-8708-44ab-8106-6d86e1782605 | cpp | tensorflow/tensorflow | space_to_batch_converter | third_party/xla/xla/service/space_to_batch_converter.cc | third_party/xla/xla/service/space_to_batch_converter_test.cc | #include "xla/service/space_to_batch_converter.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <map>
#include <memory>
#include <queue>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/algorithm.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/shape_inference.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tsl/lib/core/bitmap.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
namespace m = match;
constexpr int64_t kNumMappedDims = 3;
class ConvolutionVisitor {
public:
absl::Status PerformSpaceToBatchOnConvolution(HloInstruction* convolution);
struct ConvDetails {
std::vector<int64_t> spatial_dimensions_to_split;
int64_t inherent_low_padding, inherent_high_padding, stride, spatial_size,
base_dilation_factor, halo_size, high_padding_for_conv,
low_padding_for_conv, kernel_spatial_dim_size, input_dim_size;
};
ConvDetails GetConvolutionDetails(HloInstruction* convolution,
ConvolutionDimensionNumbers& dim_numbers);
std::pair<std::vector<int64_t>, std::vector<int64_t>> GetSpatialDimsToSplit(
HloInstruction* old_operand);
bool IsForwardWindowDilatedConv(HloInstruction* convolution,
ConvolutionDimensionNumbers& dim_numbers);
bool CanPropagate(HloInstruction* consumer, HloInstruction* producer);
bool IsBroadcastTree(HloInstruction* op, HloInstruction* consumer,
std::vector<HloInstruction*>& instructions_to_transform);
void RewriteBroadcastTree(
HloInstruction* producer,
std::vector<HloInstruction*>& instructions_to_transform);
void PropagateOnBroadcast(HloInstruction* consumer, HloInstruction* producer);
bool IsOpcodeNonPropagatable(HloInstruction* consumer);
bool SupportedOpForPropagation(HloInstruction* consumer,
HloInstruction* producer);
bool SupportedDotForPropagation(HloInstruction* consumer,
HloInstruction* producer);
bool IsBroadcastPropagatable(HloInstruction* broadcast,
HloInstruction* old_other_op);
absl::StatusOr<bool> Propagate(HloInstruction* consumer,
HloInstruction* producer);
absl::StatusOr<std::pair<HloInstruction*, std::vector<int64_t>>> SplitSpace(
HloInstruction* activations, ConvolutionDimensionNumbers& dim_numbers,
int64_t& activations_batch_dim, int64_t high_padding, int64_t low_padding,
int64_t spatial_split_size, int64_t num_splits,
std::vector<int64_t>* spatial_dimensions_to_split,
bool is_backprop = false, bool is_rhs = false);
absl::StatusOr<HloInstruction*> PerformSplitSpace(
HloInstruction* activations,
absl::Span<const int64_t> spatial_dimensions_to_split,
int64_t activations_batch_dim, int64_t spatial_split_size,
int64_t num_splits);
absl::StatusOr<HloInstruction*> TransposeAndMergeBatch(
HloInstruction* activations,
absl::Span<const int64_t> final_split_spatial_dim_positioning,
int64_t activations_batch_dim, int64_t old_batch_size);
absl::StatusOr<HloInstruction*> PadAndSplitSpace(
HloInstruction* activations,
absl::Span<const int64_t> spatial_dimensions_to_split,
int64_t activations_batch_dim, int64_t high_padding, int64_t low_padding,
int64_t spatial_split_size, int64_t num_splits);
absl::StatusOr<HloInstruction*> PropagateOnConstant(HloInstruction* consumer,
HloInstruction* producer);
absl::Status PropagateOnConv(HloInstruction* convolution);
absl::Status PropagateOnConcat(HloInstruction* concat);
absl::Status PropagateOnReverse(HloInstruction* reverse);
absl::Status PropagateOnPad(HloInstruction* pad);
absl::Status PropagateOnSlice(HloInstruction* slice);
absl::Status PropagateOnBackpropFilterConv(HloInstruction* convolution);
bool IsConvSuitableForSpaceToBatch(HloInstruction* convolution);
bool IsThisBackPropFilterConv(HloInstruction* convolution);
absl::Status PropagateOnUsers(HloInstruction* old_conv);
absl::StatusOr<HloInstruction*> SelectValidPortion(
HloInstruction* new_instr, HloInstruction* old_instr,
HloInstruction* select_val, int64_t new_batch_dim,
absl::Span<const int64_t> new_space_dims, int64_t old_batch_dim,
absl::Span<const int64_t> old_space_dims);
struct SpaceNextToBatchDetails {
HloInstruction* instr;
std::vector<int64_t> transpose_dims;
};
absl::StatusOr<SpaceNextToBatchDetails> BringSpaceNextToBatch(
HloInstruction* activations, ConvolutionDimensionNumbers& dim_numbers,
int64_t& activations_batch_dim,
std::vector<int64_t>* spatial_dimensions_to_split,
bool is_backprop = false, bool is_rhs = false);
absl::StatusOr<HloInstruction*> ChangeSpatialSizeOnSpaceToBatchedShape(
HloInstruction* activations, int64_t batch_dimension,
int64_t old_batch_size,
absl::Span<const int64_t> spatial_dimensions_to_split,
int64_t new_spatial_dim_size, bool increase_spatial_size = false);
absl::StatusOr<HloInstruction*> SplitAndTransposeMergedBatch(
HloInstruction* activations, int64_t batch_dimension,
int64_t old_batch_size, absl::Span<const int64_t> spatial_dimensions);
absl::StatusOr<HloInstruction*> BatchToSpace(HloInstruction* old_instr);
absl::StatusOr<HloInstruction*> HaloDuplicateWithSlice(
HloInstruction* activations,
absl::Span<const int64_t> spatial_dimensions_to_split,
int64_t activations_batch_dim, int64_t low_padding, int64_t halo_size,
HloInstruction* pad_val = nullptr);
absl::StatusOr<bool> Run();
const bool changed() const { return changed_; }
~ConvolutionVisitor() = default;
explicit ConvolutionVisitor(SpaceToBatchController ctrl,
HloComputation* computation);
int64_t GetFirstChosenSpatialDim(HloInstruction* convolution) {
const int64_t dim_count = ctrl_.count_of_dimensions_to_convert;
const int64_t end_point = convolution->convolution_dimension_numbers()
.input_spatial_dimensions_size() -
ctrl_.dimension_from_end_to_convert;
return end_point - dim_count + 1;
}
std::vector<int64_t> GetChosenSpatialDims(HloInstruction* convolution) {
const int64_t dim_count = ctrl_.count_of_dimensions_to_convert;
const int64_t first_dim = GetFirstChosenSpatialDim(convolution);
std::vector<int64_t> dims(dim_count);
for (int i = 0; i < dim_count; ++i) {
dims[i] =
convolution->convolution_dimension_numbers().input_spatial_dimensions(
first_dim + i);
}
return dims;
}
int64_t DimLookUp(absl::Span<const int64_t> permute_dims, int64_t id) {
return permute_dims[id];
}
int DimMapper(SpaceToBatchDimMap s) { return static_cast<int>(s); }
int64_t ReverseDimLookUp(absl::Span<const int64_t> permute_dims, int64_t id) {
return std::distance(permute_dims.begin(), absl::c_find(permute_dims, id));
}
HloInstruction* DoesConvolutionFeedReduceWindowOrSelectAndScatter(
HloInstruction* instr, int64_t depth);
bool DoesConvolutionFeedUnpropagatableOp(
HloInstruction* instr, int64_t depth = kUnpropagatableOpSearchDepth);
bool IsSpaceToBatchedSpaceSizeSuitable(HloInstruction* instr);
private:
HloComputation* computation_;
absl::flat_hash_set<HloInstruction*> convs_to_visit_;
std::vector<HloInstruction*> conv_visitor_list_;
HloInstructionSet non_propagatable_instrs_;
absl::flat_hash_map<HloInstruction*, HloInstruction*> batch_to_space_map_;
absl::flat_hash_map<HloInstruction*, HloInstruction*> old_to_new_instrs_;
absl::flat_hash_map<HloInstruction*, std::vector<int64_t>> instr_to_dim_map_;
absl::flat_hash_map<HloInstruction*, std::vector<int64_t>>
instr_to_dim_permute_map_;
absl::flat_hash_map<HloInstruction*, absl::flat_hash_set<HloInstruction*>>
broadcast_map_;
bool changed_ = false;
static constexpr int64_t kReduceWindowSearchDepth = 10;
static constexpr int64_t kUnpropagatableOpSearchDepth = 3;
static constexpr int64_t kMultiplierOnSpaceForBaseDilation = 3;
absl::flat_hash_map<std::pair<HloInstruction*, int64_t>, bool>
unpropagatability_cache_;
SpaceToBatchController ctrl_;
};
ConvolutionVisitor::ConvolutionVisitor(SpaceToBatchController ctrl,
HloComputation* computation) {
ctrl_ = ctrl;
computation_ = computation;
for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {
if (inst->opcode() != HloOpcode::kConvolution) {
continue;
}
auto convolution = inst;
if (!IsConvSuitableForSpaceToBatch(convolution)) {
VLOG(1) << "Conv not suitable for space-to-batch "
<< convolution->ToString();
continue;
}
VLOG(1) << "Conv added to space-to-batch worklist "
<< convolution->ToString();
convs_to_visit_.insert(convolution);
conv_visitor_list_.push_back(convolution);
}
}
std::pair<std::vector<int64_t>, std::vector<int64_t>>
ConvolutionVisitor::GetSpatialDimsToSplit(HloInstruction* old_operand) {
auto new_operand = old_to_new_instrs_[old_operand];
auto dim_map_val = instr_to_dim_map_[old_operand];
auto permute_dims = instr_to_dim_permute_map_[new_operand];
std::vector<int64_t> old_dims(ctrl_.count_of_dimensions_to_convert),
new_dims(ctrl_.count_of_dimensions_to_convert);
old_dims[0] = dim_map_val[DimMapper(SpaceToBatchDimMap::kSpace0)];
new_dims[0] = DimLookUp(permute_dims, old_dims[0]);
for (int i = 1; i < ctrl_.count_of_dimensions_to_convert; ++i) {
old_dims[i] = old_dims[0] + i;
new_dims[i] = new_dims[0] + i;
}
return std::make_pair(old_dims, new_dims);
}
bool ConvolutionVisitor::IsForwardWindowDilatedConv(
HloInstruction* convolution, ConvolutionDimensionNumbers& dim_numbers) {
const int64_t window_dilation_factor =
convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.window_dilation();
if (window_dilation_factor == 1) {
return false;
}
const int64_t output_spatial_dim = dim_numbers.output_spatial_dimensions(
GetFirstChosenSpatialDim(convolution));
const int64_t kernel_spatial_dim = dim_numbers.kernel_spatial_dimensions(
GetFirstChosenSpatialDim(convolution));
return convolution->operand(1)->shape().dimensions(kernel_spatial_dim) <
convolution->shape().dimensions(output_spatial_dim);
}
bool ConvolutionVisitor::IsConvSuitableForSpaceToBatch(
HloInstruction* convolution) {
ConvolutionDimensionNumbers dim_numbers =
convolution->convolution_dimension_numbers();
if (GetFirstChosenSpatialDim(convolution) < 0) {
return false;
}
if (convolution->batch_group_count() != 1) {
return false;
}
if (convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.window_dilation() != 1) {
if (!IsForwardWindowDilatedConv(convolution, dim_numbers)) {
return false;
}
}
const ConvDetails c = GetConvolutionDetails(convolution, dim_numbers);
const int64_t low_pad = convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.padding_low();
if (c.base_dilation_factor != 1) {
if (!ctrl_.enable_propagations_on_base_dilations) {
return false;
}
if (c.stride != 1) {
return false;
}
if (low_pad == 0) {
if (c.kernel_spatial_dim_size != 1) {
return false;
}
} else if (low_pad != c.base_dilation_factor - 1 &&
low_pad != c.base_dilation_factor) {
return false;
}
}
int64_t activations_batch_dim = dim_numbers.input_batch_dimension();
const int64_t old_batch_size =
convolution->operand(0)->shape().dimensions(activations_batch_dim);
if (old_batch_size > ctrl_.limit_on_batch_size) {
return false;
}
VLOG(1) << "spatial size " << c.spatial_size << " halo size " << c.halo_size;
if (c.halo_size > CeilOfRatio(c.spatial_size, ctrl_.number_of_splits)) {
return false;
}
if (c.base_dilation_factor > 1 &&
c.inherent_low_padding == c.base_dilation_factor) {
if (c.spatial_size <
kMultiplierOnSpaceForBaseDilation * ctrl_.number_of_splits) {
return false;
}
}
VLOG(1) << "Legal space-to-batch convolution " << convolution->ToString();
return true;
}
bool ConvolutionVisitor::IsThisBackPropFilterConv(HloInstruction* convolution) {
auto activations = convolution->mutable_operand(0);
auto kernel = convolution->mutable_operand(1);
auto dim_numbers = convolution->convolution_dimension_numbers();
if (!old_to_new_instrs_.contains(kernel) &&
!old_to_new_instrs_.contains(activations)) {
return false;
}
if (old_to_new_instrs_.contains(kernel)) {
auto dim_map_val_op_0 = instr_to_dim_map_[kernel];
const int64_t old_batch_dim =
dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)];
if (convolution->convolution_dimension_numbers()
.kernel_input_feature_dimension() != old_batch_dim) {
return false;
}
}
if (old_to_new_instrs_.contains(activations)) {
auto dim_map_val_op_0 = instr_to_dim_map_[activations];
const int64_t old_batch_dim =
dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)];
if (dim_numbers.input_feature_dimension() != old_batch_dim) {
return false;
}
}
return true;
}
absl::StatusOr<HloInstruction*> ConvolutionVisitor::HaloDuplicateWithSlice(
HloInstruction* activations,
absl::Span<const int64_t> spatial_dimensions_to_split,
int64_t activations_batch_dim, int64_t low_padding, int64_t halo_size,
HloInstruction* pad_val) {
const int64_t spatial_dim_count = spatial_dimensions_to_split.size();
const int64_t additional_batch_size =
IPow<int64_t>(ctrl_.number_of_splits, spatial_dim_count);
const int64_t original_batch_size =
activations->shape().dimensions(activations_batch_dim) /
additional_batch_size;
const int64_t spatial_split_size =
activations->shape().dimensions(spatial_dimensions_to_split[0]);
const int64_t batch_size = ctrl_.number_of_splits;
TF_ASSIGN_OR_RETURN(
activations, SplitAndTransposeMergedBatch(
activations, activations_batch_dim, original_batch_size,
spatial_dimensions_to_split));
const int64_t rank = activations->shape().rank();
VLOG(1) << "In HaloDuplicateWithSlice with activations "
<< activations->ToString() << " batch_size " << batch_size
<< " spatial_split_size " << spatial_split_size << " low_padding "
<< low_padding << " halo size " << halo_size;
CHECK_LE(std::abs(halo_size - low_padding), spatial_split_size);
for (int64_t i = 0; i < spatial_dimensions_to_split.size(); ++i) {
int64_t spatial_dimension_to_split = activations_batch_dim + 2 * (i + 1);
int64_t remapped_batch_dimension = spatial_dimension_to_split - 1;
HloInstruction* first_slice = nullptr;
std::vector<int64_t> strides(rank, 1);
HloInstruction* padding =
pad_val == nullptr
? activations->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(activations->shape().element_type())))
: pad_val;
if (low_padding > 0) {
std::vector<int64_t> start_indices(rank, 0),
end_indices(activations->shape().dimensions().begin(),
activations->shape().dimensions().end());
start_indices[spatial_dimension_to_split] =
spatial_split_size - low_padding;
end_indices[remapped_batch_dimension] = batch_size - 1;
end_indices[spatial_dimension_to_split] = spatial_split_size;
TF_ASSIGN_OR_RETURN(first_slice,
MakeSliceHlo(activations, start_indices, end_indices,
strides, &activations->metadata(),
&activations->frontend_attributes()));
VLOG(1) << "first slice " << first_slice->ToString();
PaddingConfig padding_config =
MakeNoPaddingConfig(first_slice->shape().dimensions_size());
padding_config.mutable_dimensions(remapped_batch_dimension)
->set_edge_padding_low(1);
TF_ASSIGN_OR_RETURN(first_slice,
MakePadHlo(first_slice, padding, padding_config,
&first_slice->metadata(),
&first_slice->frontend_attributes()));
}
HloInstruction* halo_region = nullptr;
if (halo_size - low_padding > 0) {
std::vector<int64_t> start_indices_halo(rank, 0),
end_indices_halo(activations->shape().dimensions().begin(),
activations->shape().dimensions().end());
start_indices_halo[remapped_batch_dimension] = 1;
end_indices_halo[spatial_dimension_to_split] = halo_size - low_padding;
TF_ASSIGN_OR_RETURN(
halo_region,
MakeSliceHlo(activations, start_indices_halo, end_indices_halo,
strides, &activations->metadata(),
&activations->frontend_attributes()));
VLOG(1) << "halo_region " << halo_region->ToString();
PaddingConfig padding_config_halo =
MakeNoPaddingConfig(halo_region->shape().dimensions_size());
padding_config_halo.mutable_dimensions(remapped_batch_dimension)
->set_edge_padding_high(1);
TF_ASSIGN_OR_RETURN(halo_region,
MakePadHlo(halo_region, padding, padding_config_halo,
&halo_region->metadata(),
&halo_region->frontend_attributes()));
}
if ((halo_size == 0 && low_padding != 0) || low_padding < 0) {
std::vector<int64_t> start_indices_activations_cut(rank, 0),
end_indices_activations_cut(activations->shape().dimensions().begin(),
activations->shape().dimensions().end());
if (low_padding > 0) {
end_indices_activations_cut[spatial_dimension_to_split] =
spatial_split_size - low_padding;
} else {
start_indices_activations_cut[spatial_dimension_to_split] =
0 - low_padding;
end_indices_activations_cut[spatial_dimension_to_split] =
spatial_split_size;
}
TF_ASSIGN_OR_RETURN(
activations, MakeSliceHlo(activations, start_indices_activations_cut,
end_indices_activations_cut, strides,
&activations->metadata(),
&activations->frontend_attributes()));
}
if (first_slice != nullptr) {
TF_ASSIGN_OR_RETURN(
activations,
MakeConcatHlo({first_slice, activations}, spatial_dimension_to_split,
&activations->metadata(),
&activations->frontend_attributes()));
}
if (halo_region != nullptr) {
TF_ASSIGN_OR_RETURN(
activations,
MakeConcatHlo({activations, halo_region}, spatial_dimension_to_split,
&activations->metadata(),
&activations->frontend_attributes()));
}
}
TF_ASSIGN_OR_RETURN(
activations,
TransposeAndMergeBatch(
activations,
spatial_dimensions_to_split,
activations_batch_dim, original_batch_size));
VLOG(1) << "HaloDuplicated activations " << activations->ToString();
return activations;
}
absl::StatusOr<ConvolutionVisitor::SpaceNextToBatchDetails>
ConvolutionVisitor::BringSpaceNextToBatch(
HloInstruction* activations, ConvolutionDimensionNumbers& dim_numbers,
int64_t& activations_batch_dim,
std::vector<int64_t>* spatial_dimensions_to_split, bool is_backprop,
bool is_rhs) {
for (int64_t i = 1; i < spatial_dimensions_to_split->size(); ++i) {
CHECK_EQ(spatial_dimensions_to_split->at(i),
spatial_dimensions_to_split->at(i - 1) + 1)
<< "Spatial dimensions are not contiguous";
}
int64_t spatial_dimension_to_split = spatial_dimensions_to_split->at(0);
std::vector<int64_t> transpose_dims(activations->shape().rank());
if (spatial_dimension_to_split == activations_batch_dim + 1) {
absl::c_iota(transpose_dims, 0);
} else {
ConvolutionDimensionNumbers new_dim_numbers = dim_numbers;
int64_t pushed_counter = 0;
int64_t new_batch_dim, new_spatial_dim;
int64_t dim_counter = 0;
if (is_rhs) {
CHECK(is_backprop);
for (int i = 0; i < activations->shape().rank(); ++i) {
if (i == activations_batch_dim) {
continue;
}
if (i == spatial_dimension_to_split) {
transpose_dims[dim_counter++] = activations_batch_dim;
new_batch_dim = pushed_counter;
pushed_counter++;
new_spatial_dim = pushed_counter;
}
if (i == dim_numbers.kernel_output_feature_dimension()) {
new_dim_numbers.set_kernel_output_feature_dimension(pushed_counter);
} else {
auto it = absl::c_find(dim_numbers.kernel_spatial_dimensions(), i);
if (it != dim_numbers.kernel_spatial_dimensions().end()) {
int64_t j = it - dim_numbers.kernel_spatial_dimensions().begin();
new_dim_numbers.set_kernel_spatial_dimensions(j, pushed_counter);
}
}
transpose_dims[dim_counter++] = i;
pushed_counter++;
}
activations_batch_dim = new_batch_dim;
spatial_dimension_to_split = new_spatial_dim;
TF_ASSIGN_OR_RETURN(activations,
MakeTransposeHlo(activations, transpose_dims));
new_dim_numbers.set_kernel_input_feature_dimension(activations_batch_dim);
} else {
for (int i = 0; i < activations->shape().rank(); ++i) {
if (i == activations_batch_dim) {
continue;
}
if (i == spatial_dimension_to_split) {
transpose_dims[dim_counter++] = activations_batch_dim;
new_batch_dim = pushed_counter;
pushed_counter++;
new_spatial_dim = pushed_counter;
}
if (is_backprop && i == dim_numbers.input_batch_dimension()) {
new_dim_numbers.set_input_batch_dimension(pushed_counter);
} else if (i == dim_numbers.input_feature_dimension()) {
new_dim_numbers.set_input_feature_dimension(pushed_counter);
} else {
auto it = absl::c_find(dim_numbers.input_spatial_dimensions(), i);
if (it != dim_numbers.input_spatial_dimensions().end()) {
int64_t j = it - dim_numbers.input_spatial_dimensions().begin();
new_dim_numbers.set_input_spatial_dimensions(j, pushed_counter);
}
}
transpose_dims[dim_counter++] = i;
pushed_counter++;
}
activations_batch_dim = new_batch_dim;
spatial_dimension_to_split = new_spatial_dim;
TF_ASSIGN_OR_RETURN(activations,
MakeTransposeHlo(activations, transpose_dims));
if (is_backprop) {
new_dim_numbers.set_input_feature_dimension(activations_batch_dim);
} else {
new_dim_numbers.set_input_batch_dimension(activations_batch_dim);
}
}
dim_numbers = new_dim_numbers;
}
for (int64_t i = 0; i < spatial_dimensions_to_split->size(); ++i) {
(*spatial_dimensions_to_split)[i] = spatial_dimension_to_split + i;
}
return SpaceNextToBatchDetails{activations, transpose_dims};
}
absl::StatusOr<HloInstruction*>
ConvolutionVisitor::SplitAndTransposeMergedBatch(
HloInstruction* activations, int64_t batch_dimension,
int64_t old_batch_size, absl::Span<const int64_t> spatial_dimensions) {
CHECK_EQ(batch_dimension + 1, spatial_dimensions[0]);
std::vector<int64_t> new_dimensions(activations->shape().dimensions().begin(),
activations->shape().dimensions().end());
const int64_t new_batch_size =
activations->shape().dimensions(batch_dimension);
VLOG(3) << "Decreasing the spatial size while propagating new_batch_size "
<< new_batch_size << " old_batch_size " << old_batch_size;
new_dimensions[batch_dimension] = old_batch_size;
const int64_t spatial_dim_count = spatial_dimensions.size();
for (int64_t i = 0; i < spatial_dim_count; ++i) {
new_dimensions.insert(new_dimensions.begin() + spatial_dimensions[0],
ctrl_.number_of_splits);
}
TF_ASSIGN_OR_RETURN(HloInstruction * batch_split_activations,
MakeReshapeHlo(new_dimensions, activations));
if (spatial_dim_count > 1) {
std::vector<int64_t> transpose_dims(new_dimensions.size());
absl::c_iota(transpose_dims, 0);
std::vector<int64_t> trans_dims(new_dimensions.size());
absl::c_iota(trans_dims, 0);
int64_t start_batch_dim_position = batch_dimension + 1;
int64_t start_space_dim_position = batch_dimension + 2;
for (int i = 0; i < spatial_dim_count; ++i) {
transpose_dims[start_batch_dim_position + 2 * i] =
batch_dimension + spatial_dim_count - i;
transpose_dims[start_space_dim_position + 2 * i] =
batch_dimension + spatial_dim_count + 1 + i;
}
TF_ASSIGN_OR_RETURN(
batch_split_activations,
MakeTransposeHlo(batch_split_activations, transpose_dims));
}
return batch_split_activations;
}
absl::StatusOr<HloInstruction*>
ConvolutionVisitor::ChangeSpatialSizeOnSpaceToBatchedShape(
HloInstruction* activations, int64_t batch_dimension,
int64_t old_batch_size, absl::Span<const int64_t> spatial_dimensions,
int64_t new_spatial_dim_size, bool increase_spatial_size) {
CHECK_EQ(batch_dimension + 1, spatial_dimensions[0]);
std::vector<int64_t> new_dimensions(activations->shape().dimensions().begin(),
activations->shape().dimensions().end());
const int64_t spatial_dim_count = spatial_dimensions.size();
const int64_t spatial_dim_size =
activations->shape().dimensions(spatial_dimensions[0]);
const int64_t reshaped_space_size = spatial_dim_size * ctrl_.number_of_splits;
TF_ASSIGN_OR_RETURN(
HloInstruction * batch_split_activations,
SplitAndTransposeMergedBatch(activations, batch_dimension, old_batch_size,
spatial_dimensions));
std::vector<int64_t> batch_space_collapse_reshape_dims(
batch_split_activations->shape().dimensions().begin(),
batch_split_activations->shape().dimensions().end());
batch_space_collapse_reshape_dims.erase(
batch_space_collapse_reshape_dims.begin() + spatial_dimensions[0],
batch_space_collapse_reshape_dims.begin() + spatial_dimensions[0] +
spatial_dim_count);
for (auto spatial_dimension : spatial_dimensions) {
batch_space_collapse_reshape_dims[spatial_dimension] = reshaped_space_size;
}
TF_ASSIGN_OR_RETURN(HloInstruction * batch_space_collapsed_reshape,
MakeReshapeHlo(batch_space_collapse_reshape_dims,
batch_split_activations));
VLOG(3) << "First reshape done";
const int64_t rank = activations->shape().rank();
if (increase_spatial_size) {
PaddingConfig padding_config = MakeNoPaddingConfig(
batch_space_collapsed_reshape->shape().dimensions_size());
for (auto spatial_dimension : spatial_dimensions) {
padding_config.mutable_dimensions(spatial_dimension)
->set_edge_padding_high(new_spatial_dim_size *
ctrl_.number_of_splits -
reshaped_space_size);
padding_config.mutable_dimensions(spatial_dimension)
->set_edge_padding_low(0);
}
HloInstruction* padding = activations->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(
batch_space_collapsed_reshape->shape().element_type())));
TF_ASSIGN_OR_RETURN(
batch_space_collapsed_reshape,
MakePadHlo(batch_space_collapsed_reshape, padding, padding_config,
&batch_space_collapsed_reshape->metadata(),
&batch_space_collapsed_reshape->frontend_attributes()));
} else {
std::vector<int64_t> start_indices(rank, 0),
end_indices(batch_space_collapsed_reshape->shape().dimensions().begin(),
batch_space_collapsed_reshape->shape().dimensions().end()),
strides(rank, 1);
for (auto spatial_dimension : spatial_dimensions) {
end_indices[spatial_dimension] =
new_spatial_dim_size * ctrl_.number_of_splits;
}
TF_ASSIGN_OR_RETURN(
batch_space_collapsed_reshape,
MakeSliceHlo(batch_space_collapsed_reshape, start_indices, end_indices,
strides, &batch_space_collapsed_reshape->metadata(),
&batch_space_collapsed_reshape->frontend_attributes()));
}
TF_ASSIGN_OR_RETURN(
HloInstruction * activations_new,
PerformSplitSpace(batch_space_collapsed_reshape, spatial_dimensions,
batch_dimension, new_spatial_dim_size,
ctrl_.number_of_splits));
VLOG(3) << "Size decreased activations " << activations_new->ToString();
return activations_new;
}
absl::StatusOr<bool> ConvolutionVisitor::Run() {
for (auto conv : conv_visitor_list_) {
if (ctrl_.disable_starting_on_small_chains &&
DoesConvolutionFeedUnpropagatableOp(conv)) {
VLOG(1) << "Giving up on conv " << conv->ToString()
<< " because it feeds an unpropagatable op";
convs_to_visit_.erase(conv);
}
if (convs_to_visit_.count(conv) > 0) {
TF_CHECK_OK(PerformSpaceToBatchOnConvolution(conv));
changed_ = true;
}
}
conv_visitor_list_.clear();
convs_to_visit_.clear();
for (auto instr : non_propagatable_instrs_) {
if (instr->opcode() == HloOpcode::kConvolution) {
VLOG(1) << "Instr " << instr->ToString();
}
if (instr->opcode() == HloOpcode::kConvolution &&
!IsConvSuitableForSpaceToBatch(instr)) {
HloInstruction* producer = nullptr;
if (old_to_new_instrs_.contains(instr->mutable_operand(0))) {
producer = instr->mutable_operand(0);
} else if (old_to_new_instrs_.contains(instr->mutable_operand(1))) {
producer = instr->mutable_operand(1);
}
if (producer) {
if (CanPropagate(instr, producer)) {
bool needs_further_propagation;
TF_ASSIGN_OR_RETURN(needs_further_propagation,
Propagate(instr, producer));
TF_CHECK_OK(computation_->ReplaceInstruction(
instr, old_to_new_instrs_[instr]));
continue;
}
}
}
VLOG(1) << "Could not eventually propagate through " << instr->ToString();
absl::flat_hash_map<int64_t, HloInstruction*> operand_map;
for (int64_t i = 0; i < instr->operand_count(); ++i) {
if (old_to_new_instrs_.count(instr->mutable_operand(i))) {
TF_ASSIGN_OR_RETURN(operand_map[i],
BatchToSpace(instr->mutable_operand(i)));
}
}
for (auto entry : operand_map) {
TF_CHECK_OK(instr->ReplaceOperandWith(entry.first, entry.second));
}
}
non_propagatable_instrs_.clear();
return changed_;
}
bool IsTrivialElementwise(HloInstruction* hlo) {
if (hlo->opcode() == HloOpcode::kFusion || hlo->opcode() == HloOpcode::kRng ||
hlo->opcode() == HloOpcode::kCopy ||
hlo->opcode() == HloOpcode::kConstant ||
hlo->opcode() == HloOpcode::kIota || hlo->opcode() == HloOpcode::kMap) {
return false;
}
return hlo->IsElementwise();
}
bool ConvolutionVisitor::CanPropagate(HloInstruction* consumer,
HloInstruction* producer) {
if (IsTrivialElementwise(consumer)) {
VLOG(2) << "Doing propagation check on elementwise op: "
<< consumer->ToString();
HloInstruction* pivot_operand = nullptr;
for (int64_t i = 0; i < consumer->operand_count(); ++i) {
auto old_producer = consumer->mutable_operand(i);
std::vector<HloInstruction*> to_transform;
const bool broadcast_or_constant =
(old_producer->opcode() == HloOpcode::kConstant) ||
(old_producer->opcode() == HloOpcode::kBroadcast &&
IsBroadcastPropagatable(old_producer, producer)) ||
(consumer->IsElementwiseBinary() &&
old_producer->opcode() == HloOpcode::kBroadcast &&
IsBroadcastTree(old_producer, producer, to_transform));
if (!old_to_new_instrs_.contains(old_producer) &&
!broadcast_or_constant) {
VLOG(1) << "Cannot propagate on elementwise op " << consumer->ToString()
<< " because operand " << old_producer->ToString()
<< " isn't ready ";
return false;
} else {
if (broadcast_or_constant) {
VLOG(2) << "Skipping on " << old_producer->ToString();
continue;
}
CHECK(old_to_new_instrs_.contains(old_producer));
CHECK(instr_to_dim_map_.contains(old_producer));
if (pivot_operand == nullptr) {
pivot_operand = old_producer;
VLOG(2) << "Elementwise op: pivot " << old_producer->ToString();
} else {
if (instr_to_dim_map_[pivot_operand]
[DimMapper(SpaceToBatchDimMap::kBatch)] !=
instr_to_dim_map_[old_producer]
[DimMapper(SpaceToBatchDimMap::kBatch)] ||
instr_to_dim_map_[pivot_operand]
[DimMapper(SpaceToBatchDimMap::kSpace0)] !=
instr_to_dim_map_[old_producer]
[DimMapper(SpaceToBatchDimMap::kSpace0)]) {
VLOG(2) << "Elementwise op: checking for shape equivalence "
<< consumer->ToString()
<< " failed due to changed batch space ordering ";
return false;
}
auto pivot_new_instr = old_to_new_instrs_[pivot_operand];
auto pivot_permute_dims = instr_to_dim_permute_map_[pivot_new_instr];
auto new_instr = old_to_new_instrs_[old_producer];
auto permute_dims = instr_to_dim_permute_map_[new_instr];
for (int j = 0; j < pivot_permute_dims.size(); ++j) {
if (pivot_permute_dims[j] != permute_dims[j]) {
VLOG(2) << "Elementwise op: checking for shape equivalence "
<< consumer->ToString()
<< " failed due to permuted dimensions ";
return false;
}
if (pivot_new_instr->shape().dimensions(j) !=
new_instr->shape().dimensions(j)) {
if (!((consumer->IsElementwiseBinary() ||
consumer->opcode() == HloOpcode::kSelect) &&
j == instr_to_dim_map_[pivot_operand][DimMapper(
SpaceToBatchDimMap::kSpace0)])) {
VLOG(2) << "Elementwise op: checking for shape equivalence "
<< consumer->ToString()
<< " failed due to changed shape sizes ";
return false;
}
}
}
}
}
}
}
if (consumer->opcode() == HloOpcode::kConcatenate) {
for (int64_t i = 0; i < consumer->operand_count(); ++i) {
if (!instr_to_dim_map_.contains(consumer->mutable_operand(i))) {
return false;
}
}
auto pivot_operand = consumer->mutable_operand(0);
auto pivot_new_instr = old_to_new_instrs_[pivot_operand];
auto pivot_permute_dims = instr_to_dim_permute_map_[pivot_new_instr];
for (int64_t i = 1; i < consumer->operand_count(); ++i) {
auto new_instr = old_to_new_instrs_[consumer->mutable_operand(i)];
auto permute_dims = instr_to_dim_permute_map_[new_instr];
for (int j = 0; j < pivot_permute_dims.size(); ++j) {
if (pivot_permute_dims[j] != permute_dims[j]) {
VLOG(2) << "Concat op: checking for shape equivalence "
<< consumer->ToString()
<< " failed due to permuted dimensions ";
return false;
}
if (pivot_new_instr->shape().dimensions(j) !=
new_instr->shape().dimensions(j)) {
VLOG(2) << "Concat op: checking for shape equivalence "
<< consumer->ToString()
<< " failed due to changed shape sizes ";
return false;
}
}
}
return true;
}
if (consumer->opcode() == HloOpcode::kConvolution) {
if (!ConsumeFuel("space-to-batch-converter", [&] {
return "Skipping space-to-batch propagation because fuel over\n";
})) {
return false;
}
auto are_conv_dims_compatible =
[&](const ConvolutionDimensionNumbers dim_numbers,
std::vector<int64_t>& dim_map, bool check_lhs) {
if (check_lhs) {
if (dim_numbers.input_spatial_dimensions(
GetFirstChosenSpatialDim(consumer)) !=
dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)]) {
return false;
}
for (int i = 0; i < dim_numbers.input_spatial_dimensions().size();
++i) {
if (dim_numbers.input_spatial_dimensions(i) ==
dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] ||
dim_numbers.input_spatial_dimensions(i) ==
dim_map[DimMapper(SpaceToBatchDimMap::kFeature)]) {
return false;
}
}
} else {
if (dim_numbers.kernel_spatial_dimensions(
GetFirstChosenSpatialDim(consumer)) !=
dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)]) {
return false;
}
for (int i = 0; i < dim_numbers.kernel_spatial_dimensions().size();
++i) {
if (dim_numbers.kernel_spatial_dimensions(i) ==
dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] ||
dim_numbers.kernel_spatial_dimensions(i) ==
dim_map[DimMapper(SpaceToBatchDimMap::kFeature)]) {
return false;
}
}
}
return true;
};
VLOG(1) << "Checking if conv is supported for propagation "
<< consumer->ToString();
bool found_good_non_window_dilated_conv = true;
if (IsConvSuitableForSpaceToBatch(consumer)) {
if (!old_to_new_instrs_.contains(consumer->mutable_operand(0))) {
found_good_non_window_dilated_conv = false;
}
ConvolutionDimensionNumbers dim_numbers =
consumer->convolution_dimension_numbers();
ConvDetails c = GetConvolutionDetails(consumer, dim_numbers);
auto retval = GetSpatialDimsToSplit(consumer->mutable_operand(0));
std::vector<int64_t> new_spatial_dims = retval.second;
auto new_activations = old_to_new_instrs_[consumer->mutable_operand(0)];
if (new_activations->shape().dimensions(retval.second[0]) <
c.inherent_low_padding) {
return false;
}
auto dim_map_val_op_0 = instr_to_dim_map_[consumer->mutable_operand(0)];
if (!are_conv_dims_compatible(consumer->convolution_dimension_numbers(),
dim_map_val_op_0, true)) {
found_good_non_window_dilated_conv = false;
}
if (consumer->convolution_dimension_numbers().input_batch_dimension() !=
dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)]) {
found_good_non_window_dilated_conv = false;
}
if (found_good_non_window_dilated_conv) {
return true;
}
}
if (!ctrl_.enable_propagations_on_window_dilations) {
return false;
}
if (!IsThisBackPropFilterConv(consumer)) {
return false;
}
if (GetFirstChosenSpatialDim(consumer) < 0) {
return false;
}
if (consumer->window()
.dimensions(GetFirstChosenSpatialDim(consumer))
.stride() != 1) {
return false;
}
if (consumer->feature_group_count() != 1) {
return false;
}
VLOG(2) << "Checking for backprop filter conv propagatability";
CHECK_EQ(consumer->operand_count(), 2);
auto activations = consumer->mutable_operand(0);
auto kernel = consumer->mutable_operand(1);
auto win_dims =
consumer->window().dimensions(GetFirstChosenSpatialDim(consumer));
const int64_t rhs_dilation = win_dims.window_dilation();
const int64_t lhs_dilation = win_dims.base_dilation();
if (lhs_dilation != 1) {
return false;
}
if (rhs_dilation == 1 &&
!ctrl_.enable_propagations_on_trivial_window_dilations) {
if (!old_to_new_instrs_.contains(kernel) ||
!old_to_new_instrs_.contains(activations)) {
return false;
}
}
if (!old_to_new_instrs_.contains(kernel)) {
const int64_t rhs_batch =
kernel->shape().dimensions(consumer->convolution_dimension_numbers()
.kernel_input_feature_dimension());
auto dim_map_val_op_0 = instr_to_dim_map_[activations];
const int64_t old_batch_dim =
dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim =
dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kSpace0)];
auto first_operand = old_to_new_instrs_[activations];
auto permute_dims_first_operand =
instr_to_dim_permute_map_[first_operand];
const int64_t new_batch_dim =
DimLookUp(permute_dims_first_operand, old_batch_dim);
const int64_t new_space_dim =
DimLookUp(permute_dims_first_operand, old_space_dim);
const int64_t lhs_batch =
first_operand->shape().dimensions(new_batch_dim);
if (first_operand->shape().dimensions(new_space_dim) % rhs_dilation !=
0) {
return false;
}
if (rhs_batch * ctrl_.number_of_splits != lhs_batch) {
return false;
}
if (!are_conv_dims_compatible(consumer->convolution_dimension_numbers(),
dim_map_val_op_0, true)) {
return false;
}
VLOG(2)
<< "Backprop filter conv ready for propagation: activations ready, "
" kernel will be space-to-batched";
return true;
}
if (!old_to_new_instrs_.contains(activations)) {
const int64_t lhs_batch = activations->shape().dimensions(
consumer->convolution_dimension_numbers().input_feature_dimension());
auto dim_map_val_op_1 = instr_to_dim_map_[consumer->mutable_operand(1)];
const int64_t old_batch_dim =
dim_map_val_op_1[DimMapper(SpaceToBatchDimMap::kBatch)];
auto second_operand = old_to_new_instrs_[kernel];
auto permute_dims_second_operand =
instr_to_dim_permute_map_[second_operand];
const int64_t new_batch_dim =
DimLookUp(permute_dims_second_operand, old_batch_dim);
const int64_t rhs_batch =
second_operand->shape().dimensions(new_batch_dim);
if (rhs_batch != ctrl_.number_of_splits * lhs_batch) {
return false;
}
if (!are_conv_dims_compatible(consumer->convolution_dimension_numbers(),
dim_map_val_op_1, false)) {
return false;
}
VLOG(2) << "Backprop filter conv ready for propagation: kernel ready, "
" activations will be space-to-batched";
return true;
}
auto first_operand = old_to_new_instrs_[activations];
auto dim_map_val_op_0 = instr_to_dim_map_[activations];
auto second_operand = old_to_new_instrs_[kernel];
auto dim_map_val_op_1 = instr_to_dim_map_[kernel];
auto permute_dims_first_operand = instr_to_dim_permute_map_[first_operand];
auto permute_dims_second_operand =
instr_to_dim_permute_map_[second_operand];
const int64_t new_batch_dim_operand_0 =
DimLookUp(permute_dims_first_operand,
dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)]);
const int64_t new_space_dim_operand_0 =
DimLookUp(permute_dims_first_operand,
dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kSpace0)]);
const int64_t new_batch_dim_operand_1 =
DimLookUp(permute_dims_second_operand,
dim_map_val_op_1[DimMapper(SpaceToBatchDimMap::kBatch)]);
const int64_t new_space_dim_operand_1 =
DimLookUp(permute_dims_second_operand,
dim_map_val_op_1[DimMapper(SpaceToBatchDimMap::kSpace0)]);
if (first_operand->shape().dimensions(new_batch_dim_operand_0) !=
second_operand->shape().dimensions(new_batch_dim_operand_1)) {
VLOG(2) << "Backprop filter conv not ready for propagation because batch "
"dimensions don't line up";
return false;
}
if (first_operand->shape().dimensions(new_space_dim_operand_0) >
rhs_dilation *
second_operand->shape().dimensions(new_space_dim_operand_1)) {
VLOG(2) << "Backprop filter conv not ready for propagation because of "
"dilation factor mismatch";
return false;
}
if (!are_conv_dims_compatible(consumer->convolution_dimension_numbers(),
dim_map_val_op_0, true)) {
return false;
}
if (!are_conv_dims_compatible(consumer->convolution_dimension_numbers(),
dim_map_val_op_1, false)) {
return false;
}
VLOG(2) << "Backprop filter conv ready for propagation";
return true;
}
if (consumer->opcode() == HloOpcode::kReduceWindow ||
consumer->opcode() == HloOpcode::kReduce) {
for (int64_t i = 0; i < consumer->operand_count(); ++i) {
auto old_producer = consumer->mutable_operand(i);
if (i == 0 && !old_to_new_instrs_.contains(old_producer)) {
return false;
}
}
if (consumer->opcode() == HloOpcode::kReduceWindow) {
return IsSpaceToBatchedSpaceSizeSuitable(consumer);
}
}
if (consumer->opcode() == HloOpcode::kSelectAndScatter) {
for (int64_t i = 0; i < consumer->operand_count(); ++i) {
auto old_producer = consumer->mutable_operand(i);
if (i < 2 && !old_to_new_instrs_.contains(old_producer)) {
return false;
}
}
auto first_operand = old_to_new_instrs_[consumer->mutable_operand(0)];
auto dim_map_val_op_0 = instr_to_dim_map_[consumer->mutable_operand(0)];
auto second_operand = old_to_new_instrs_[consumer->mutable_operand(1)];
auto permute_dims_first_operand = instr_to_dim_permute_map_[first_operand];
auto permute_dims_second_operand =
instr_to_dim_permute_map_[second_operand];
if (permute_dims_first_operand != permute_dims_second_operand) {
VLOG(2) << "Can't propagate through select and scatter due to "
"permutation mismatch";
return false;
}
const int64_t old_batch_dim =
dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim =
dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kSpace0)];
const int64_t new_batch_dim =
DimLookUp(permute_dims_first_operand, old_batch_dim);
const int64_t new_space_dim =
DimLookUp(permute_dims_first_operand, old_space_dim);
if (first_operand->shape().dimensions(new_batch_dim) !=
second_operand->shape().dimensions(new_batch_dim)) {
VLOG(2)
<< "Can't propagate through select and scatter due to dim mismatch";
return false;
}
const int64_t stride =
consumer->window().dimensions(old_space_dim).stride();
const int64_t pad_high =
consumer->window().dimensions(old_space_dim).padding_high();
const int64_t pad_low =
consumer->window().dimensions(old_space_dim).padding_low();
if ((first_operand->shape().dimensions(new_space_dim) + pad_high +
pad_low) /
stride !=
second_operand->shape().dimensions(new_space_dim)) {
VLOG(2) << "Can't propagate through select and scatter due to stride "
"mismatch";
return false;
}
return IsSpaceToBatchedSpaceSizeSuitable(consumer);
}
return true;
}
void ConvolutionVisitor::PropagateOnBroadcast(HloInstruction* consumer,
HloInstruction* producer) {
auto new_producer = old_to_new_instrs_[producer];
auto permute_dims = instr_to_dim_permute_map_[new_producer];
auto dim_map_val = instr_to_dim_map_[producer];
const int64_t old_batch_dim =
dim_map_val[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim =
dim_map_val[DimMapper(SpaceToBatchDimMap::kSpace0)];
auto orig_broadcast_dims = consumer->dimensions();
bool batch_is_broadcasted =
absl::c_linear_search(orig_broadcast_dims, old_batch_dim);
const int64_t new_batch_dim = DimLookUp(permute_dims, old_batch_dim);
const int64_t new_space_dim = DimLookUp(permute_dims, old_space_dim);
bool map_found = broadcast_map_.contains(consumer);
if (map_found) {
for (auto previous_broadcast : broadcast_map_[consumer]) {
if (ShapeUtil::CompatibleIgnoringElementType(previous_broadcast->shape(),
new_producer->shape())) {
return;
}
}
}
std::vector<int64_t> final_shape_dims(
new_producer->shape().dimensions().begin(),
new_producer->shape().dimensions().end());
if (batch_is_broadcasted) {
final_shape_dims[new_batch_dim] =
producer->shape().dimensions(old_batch_dim);
final_shape_dims[new_space_dim] *= ctrl_.number_of_splits;
}
std::vector<int64_t> broadcast_dims;
const auto& dimensions = consumer->dimensions();
broadcast_dims.reserve(dimensions.size());
for (auto j : dimensions) {
broadcast_dims.push_back(DimLookUp(permute_dims, j));
}
auto new_broadcast = MakeBroadcastHlo(
consumer->mutable_operand(0), broadcast_dims, final_shape_dims,
&consumer->metadata(), &consumer->frontend_attributes());
VLOG(1) << "Created broadcast " << new_broadcast->ToString();
if (batch_is_broadcasted) {
new_broadcast =
MakeReshapeHlo(new_producer->shape().dimensions(), new_broadcast)
.value();
VLOG(2) << "Created reshape of broadcast " << new_broadcast->ToString();
}
if (!map_found) {
absl::flat_hash_set<HloInstruction*> set_of_broadcasts;
broadcast_map_[consumer] = set_of_broadcasts;
}
broadcast_map_[consumer].insert(new_broadcast);
}
void ConvolutionVisitor::RewriteBroadcastTree(
HloInstruction* producer,
std::vector<HloInstruction*>& instructions_to_transform) {
CHECK(old_to_new_instrs_.contains(producer));
for (auto instr : instructions_to_transform) {
if (instr->opcode() == HloOpcode::kBroadcast) {
PropagateOnBroadcast(instr, producer);
} else if (IsTrivialElementwise(instr)) {
Propagate(instr, instr->mutable_operand(0)).value();
} else {
LOG(FATAL) << "Unsupported opcode in RewriteBroadcastTree";
}
}
}
bool ConvolutionVisitor::IsBroadcastTree(
HloInstruction* op, HloInstruction* consumer,
std::vector<HloInstruction*>& instructions_to_transform) {
if (op->opcode() == HloOpcode::kBroadcast) {
if (IsBroadcastPropagatable(op, consumer)) {
instructions_to_transform.push_back(op);
return true;
} else {
return false;
}
}
if (Match(op, m::ConstantScalar())) {
return true;
}
if (!IsTrivialElementwise(op)) {
return false;
}
for (int64_t i = 0; i < op->operand_count(); ++i) {
if (!IsBroadcastTree(op->mutable_operand(i), consumer,
instructions_to_transform)) {
return false;
}
}
instructions_to_transform.push_back(op);
return true;
}
bool ConvolutionVisitor::IsBroadcastPropagatable(HloInstruction* broadcast,
HloInstruction* old_other_op) {
CHECK_EQ(broadcast->opcode(), HloOpcode::kBroadcast);
CHECK(instr_to_dim_map_.contains(old_other_op));
auto result = instr_to_dim_map_[old_other_op];
const int64_t space_dim = result[DimMapper(SpaceToBatchDimMap::kSpace0)];
auto broadcast_dims = broadcast->dimensions();
return !absl::c_linear_search(broadcast_dims, space_dim);
}
bool ConvolutionVisitor::IsOpcodeNonPropagatable(HloInstruction* consumer) {
switch (consumer->opcode()) {
case HloOpcode::kCustomCall:
return true;
default:
return false;
}
}
bool ConvolutionVisitor::SupportedDotForPropagation(HloInstruction* consumer,
HloInstruction* producer) {
if (consumer->opcode() != HloOpcode::kDot) {
return false;
}
auto operand = consumer->mutable_operand(0);
if (operand != producer || !instr_to_dim_map_.contains(operand)) {
return false;
}
const auto& dnums = consumer->dot_dimension_numbers();
const auto& contracting_dims = dnums.lhs_contracting_dimensions();
const auto& batch_dims = dnums.lhs_batch_dimensions();
auto result = instr_to_dim_map_[operand];
const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim = result[DimMapper(SpaceToBatchDimMap::kSpace0)];
const int64_t old_feature_dim =
result[DimMapper(SpaceToBatchDimMap::kFeature)];
if (consumer->operand(1)->shape().rank() ==
batch_dims.size() + contracting_dims.size()) {
return false;
}
bool found = false;
for (auto dim : batch_dims) {
if (dim == old_batch_dim || dim == old_space_dim) {
return false;
}
if (dim == old_feature_dim) {
found = true;
}
}
if (!found) {
return false;
}
for (auto dim : contracting_dims) {
if (dim == old_batch_dim || dim == old_space_dim) {
return false;
}
}
return true;
}
bool ConvolutionVisitor::SupportedOpForPropagation(HloInstruction* consumer,
HloInstruction* producer) {
if (IsOpcodeNonPropagatable(consumer)) {
return false;
}
if (IsTrivialElementwise(consumer)) {
for (int64_t i = 0; i < consumer->operand_count(); ++i) {
if (consumer->operand(i)->opcode() == HloOpcode::kBroadcast) {
if (!IsBroadcastPropagatable(consumer->mutable_operand(i), producer)) {
VLOG(2) << "Could not propagate through broadcast";
return false;
}
}
}
return true;
}
if (consumer->opcode() == HloOpcode::kConvolution) {
return true;
}
if (consumer->opcode() == HloOpcode::kConcatenate) {
HloInstruction* pivot_operand = nullptr;
for (int64_t i = 0; i < consumer->operand_count(); ++i) {
if (instr_to_dim_map_.contains(consumer->mutable_operand(i))) {
pivot_operand = consumer->mutable_operand(i);
break;
}
}
if (pivot_operand == nullptr) {
VLOG(1) << "Concat: Dim map not found on any operand";
return false;
}
auto result = instr_to_dim_map_[pivot_operand];
const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim =
result[DimMapper(SpaceToBatchDimMap::kSpace0)];
if (consumer->concatenate_dimension() == old_batch_dim ||
consumer->concatenate_dimension() == old_space_dim) {
return false;
}
return true;
}
if (consumer->opcode() == HloOpcode::kReverse) {
auto operand_0 = consumer->mutable_operand(0);
if (!instr_to_dim_map_.contains(operand_0)) {
return false;
}
auto result = instr_to_dim_map_[operand_0];
const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim =
result[DimMapper(SpaceToBatchDimMap::kSpace0)];
for (auto dim : consumer->dimensions()) {
if (dim == old_batch_dim || dim == old_space_dim) {
return false;
}
}
return true;
}
if (consumer->opcode() == HloOpcode::kTranspose) {
return true;
}
if (consumer->opcode() == HloOpcode::kPad) {
auto operand_0 = consumer->mutable_operand(0);
if (!instr_to_dim_map_.contains(operand_0)) {
return false;
}
auto result = instr_to_dim_map_[operand_0];
const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim =
result[DimMapper(SpaceToBatchDimMap::kSpace0)];
auto does_dim_have_padding = [](PaddingConfig padding_config, int64_t dim) {
return padding_config.dimensions(dim).edge_padding_low() != 0 ||
padding_config.dimensions(dim).edge_padding_high() != 0 ||
padding_config.dimensions(dim).interior_padding() != 0;
};
if (does_dim_have_padding(consumer->padding_config(), old_batch_dim) ||
does_dim_have_padding(consumer->padding_config(), old_space_dim)) {
return false;
}
return true;
}
if (consumer->opcode() == HloOpcode::kSlice) {
auto operand = consumer->mutable_operand(0);
if (!instr_to_dim_map_.contains(operand)) {
return false;
}
auto result = instr_to_dim_map_[operand];
const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim =
result[DimMapper(SpaceToBatchDimMap::kSpace0)];
if (consumer->shape().dimensions(old_batch_dim) !=
operand->shape().dimensions(old_batch_dim)) {
return false;
}
if (consumer->shape().dimensions(old_space_dim) !=
operand->shape().dimensions(old_space_dim)) {
return false;
}
return true;
}
if (SupportedDotForPropagation(consumer, producer)) {
return true;
}
if (consumer->opcode() == HloOpcode::kReduce) {
if (consumer->shape().IsTuple()) {
return false;
}
auto reduce_dims = consumer->dimensions();
auto result = instr_to_dim_map_[consumer->mutable_operand(0)];
const int64_t batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t space_dim = result[DimMapper(SpaceToBatchDimMap::kSpace0)];
if (!absl::c_linear_search(reduce_dims, batch_dim) &&
!absl::c_linear_search(reduce_dims, space_dim)) {
return true;
}
return absl::c_linear_search(reduce_dims, batch_dim) &&
absl::c_linear_search(reduce_dims, space_dim);
}
if (consumer->opcode() == HloOpcode::kReduceWindow &&
consumer->shape().IsTuple()) {
return false;
}
if (consumer->opcode() == HloOpcode::kReduceWindow ||
consumer->opcode() == HloOpcode::kSelectAndScatter) {
auto first_operand = consumer->mutable_operand(0);
auto window = consumer->window();
if (instr_to_dim_map_.count(first_operand) <= 0) {
VLOG(1) << "Dim map not found on windowed operand. Window dim count "
<< window.dimensions().size();
return false;
}
auto result = instr_to_dim_map_[first_operand];
const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim =
result[DimMapper(SpaceToBatchDimMap::kSpace0)];
if (window.dimensions(old_batch_dim).size() != 1) {
return false;
}
if (window.dimensions(old_space_dim).padding_low() != 0) {
return false;
}
if (window.dimensions(old_space_dim).base_dilation() != 1 ||
window.dimensions(old_space_dim).window_dilation() != 1) {
return false;
}
if (window.dimensions(old_batch_dim).base_dilation() != 1 ||
window.dimensions(old_batch_dim).window_dilation() != 1) {
return false;
}
if (window.dimensions(old_space_dim).padding_high() >
window.dimensions(old_space_dim).size()) {
return false;
}
if (old_to_new_instrs_.count(first_operand) <= 0) {
return false;
}
auto new_operand = old_to_new_instrs_[first_operand];
auto permute_dims = instr_to_dim_permute_map_[new_operand];
if (consumer->opcode() == HloOpcode::kSelectAndScatter) {
const int64_t new_space_dim = DimLookUp(permute_dims, old_space_dim);
if (new_operand->shape().dimensions(new_space_dim) %
window.dimensions(old_space_dim).stride() !=
0) {
return false;
}
if (!ShapeUtil::ElementIsFloating(consumer->shape())) {
return false;
}
auto scatter_comp = consumer->scatter();
if (!Match(scatter_comp->root_instruction(),
m::AddAnyOrder(m::Parameter(0), m::Parameter(1)))) {
return false;
}
auto select_comp = consumer->select();
if (!Match(select_comp->root_instruction(),
m::Compare(m::Parameter(0), m::Parameter(1))
.WithComparisonDirection(ComparisonDirection::kGe)) &&
!Match(select_comp->root_instruction(),
m::Compare(m::Parameter(1), m::Parameter(0))
.WithComparisonDirection(ComparisonDirection::kGe))) {
return false;
}
if (consumer->window().dimensions(old_space_dim).padding_low() != 0) {
return false;
}
}
return true;
}
return false;
}
absl::StatusOr<bool> ConvolutionVisitor::Propagate(HloInstruction* consumer,
HloInstruction* producer) {
auto computation = consumer->parent();
if (IsTrivialElementwise(consumer)) {
auto dim_map_val = instr_to_dim_map_[producer];
auto new_consumer = computation->AddInstruction(consumer->Clone());
bool is_pivot_producer_modified = false;
if (consumer->IsElementwiseBinary() ||
consumer->opcode() == HloOpcode::kSelect) {
int64_t pivot_operand_number = -1;
HloInstruction* pivot_operand = nullptr;
for (int i = 0; i < consumer->operand_count(); ++i) {
if (consumer->operand(i)->opcode() == HloOpcode::kBroadcast) {
continue;
}
auto operand = consumer->mutable_operand(i);
if (old_to_new_instrs_.contains(operand)) {
if (pivot_operand_number == -1 ||
old_to_new_instrs_[pivot_operand]->shape().dimensions() <
old_to_new_instrs_[operand]->shape().dimensions()) {
is_pivot_producer_modified = true;
pivot_operand_number = i;
pivot_operand = consumer->mutable_operand(pivot_operand_number);
}
}
}
if (pivot_operand_number != -1) {
producer = pivot_operand;
}
}
for (int64_t i = 0; i < consumer->operand_count(); ++i) {
std::vector<HloInstruction*> instructions_to_transform;
if (consumer->operand(i)->opcode() == HloOpcode::kBroadcast) {
auto broadcast = consumer->mutable_operand(i);
PropagateOnBroadcast(broadcast, producer);
HloInstruction* new_broadcast = nullptr;
auto new_producer = old_to_new_instrs_[producer];
for (auto previous_broadcast : broadcast_map_[broadcast]) {
if (ShapeUtil::CompatibleIgnoringElementType(
previous_broadcast->shape(), new_producer->shape())) {
new_broadcast = previous_broadcast;
break;
}
}
CHECK_NE(new_broadcast, nullptr);
TF_CHECK_OK(
new_consumer->ReplaceOperandWithDifferentShape(i, new_broadcast));
} else if (old_to_new_instrs_.contains(consumer->mutable_operand(i))) {
HloInstruction* operand_to_use = nullptr;
auto result = instr_to_dim_map_[producer];
const int64_t old_batch_dim =
result[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim =
result[DimMapper(SpaceToBatchDimMap::kSpace0)];
const int64_t old_batch_size =
producer->shape().dimensions(old_batch_dim);
HloInstruction* new_instr =
old_to_new_instrs_[consumer->mutable_operand(i)];
HloInstruction* pivot_new_instr = old_to_new_instrs_[producer];
auto permute_dims = instr_to_dim_permute_map_[new_instr];
const int64_t batch_dim = DimLookUp(permute_dims, old_batch_dim);
const int64_t space_dim = DimLookUp(permute_dims, old_space_dim);
const int64_t batch_size = new_instr->shape().dimensions(batch_dim);
if (new_instr->shape().dimensions(space_dim) !=
pivot_new_instr->shape().dimensions(space_dim)) {
CHECK_EQ(batch_dim + 1, space_dim);
std::vector<int64_t> new_dimensions(
new_instr->shape().dimensions().begin(),
new_instr->shape().dimensions().end());
new_dimensions[space_dim] *= (batch_size / old_batch_size);
new_dimensions[batch_dim] = old_batch_size;
TF_ASSIGN_OR_RETURN(HloInstruction * reshape,
MakeReshapeHlo(new_dimensions, new_instr));
const int64_t pivot_space_size =
pivot_new_instr->shape().dimensions(space_dim) * batch_size /
old_batch_size;
CHECK(pivot_space_size > new_dimensions[space_dim] ||
!is_pivot_producer_modified);
PaddingConfig padding_config =
MakeNoPaddingConfig(reshape->shape().dimensions_size());
padding_config.mutable_dimensions(space_dim)->set_edge_padding_high(
pivot_space_size - new_dimensions[space_dim]);
padding_config.mutable_dimensions(space_dim)->set_edge_padding_low(0);
HloInstruction* padding =
consumer->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(reshape->shape().element_type())));
TF_ASSIGN_OR_RETURN(
HloInstruction * padded_operand,
MakePadHlo(reshape, padding, padding_config, &reshape->metadata(),
&reshape->frontend_attributes()));
TF_ASSIGN_OR_RETURN(
operand_to_use,
MakeReshapeHlo(pivot_new_instr->shape().dimensions(),
padded_operand));
} else {
operand_to_use = old_to_new_instrs_[consumer->mutable_operand(i)];
}
TF_CHECK_OK(
new_consumer->ReplaceOperandWithDifferentShape(i, operand_to_use));
} else if (consumer->IsElementwiseBinary() &&
consumer->mutable_operand(i)->opcode() ==
HloOpcode::kBroadcast &&
IsBroadcastTree(consumer->mutable_operand(i), producer,
instructions_to_transform)) {
RewriteBroadcastTree(producer, instructions_to_transform);
TF_CHECK_OK(new_consumer->ReplaceOperandWithDifferentShape(
i, old_to_new_instrs_[consumer->mutable_operand(i)]));
} else if (consumer->operand(i)->opcode() == HloOpcode::kConstant) {
TF_ASSIGN_OR_RETURN(
auto new_constant,
PropagateOnConstant(consumer->mutable_operand(i), producer));
TF_CHECK_OK(
new_consumer->ReplaceOperandWithDifferentShape(i, new_constant));
}
}
auto old_type = new_consumer->mutable_shape()->element_type();
*(new_consumer->mutable_shape()) = old_to_new_instrs_[producer]->shape();
new_consumer->mutable_shape()->set_element_type(old_type);
old_to_new_instrs_[consumer] = new_consumer;
instr_to_dim_map_[consumer] = std::vector<int64_t>(dim_map_val);
CHECK(instr_to_dim_permute_map_.contains(old_to_new_instrs_[producer]));
instr_to_dim_permute_map_[new_consumer] = std::vector<int64_t>(
instr_to_dim_permute_map_[old_to_new_instrs_[producer]]);
VLOG(2) << " new_consumer " << new_consumer->ToString()
<< " old_to_new_instrs_[producer] "
<< old_to_new_instrs_[producer]->ToString() << " permute dims "
<< instr_to_dim_permute_map_.count(new_consumer);
return true;
}
if (consumer->opcode() == HloOpcode::kConvolution) {
if (IsConvSuitableForSpaceToBatch(consumer)) {
TF_CHECK_OK(PropagateOnConv(consumer));
return true;
} else {
TF_CHECK_OK(PropagateOnBackpropFilterConv(consumer));
return false;
}
}
if (consumer->opcode() == HloOpcode::kConcatenate) {
TF_CHECK_OK(PropagateOnConcat(consumer));
return true;
}
if (consumer->opcode() == HloOpcode::kReverse) {
TF_CHECK_OK(PropagateOnReverse(consumer));
return true;
}
if (consumer->opcode() == HloOpcode::kDot) {
auto dim_map_val = instr_to_dim_map_[producer];
const int64_t old_batch_dim =
dim_map_val[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim =
dim_map_val[DimMapper(SpaceToBatchDimMap::kSpace0)];
int64_t new_batch_dim = -1;
int64_t new_space_dim = -1;
int64_t outer = 0;
for (int64_t i = 0; i < producer->shape().rank(); ++i) {
if (absl::c_linear_search(
consumer->dot_dimension_numbers().lhs_batch_dimensions(), i) ||
absl::c_linear_search(
consumer->dot_dimension_numbers().lhs_contracting_dimensions(),
i)) {
continue;
}
if (i == old_batch_dim) {
new_batch_dim =
outer +
consumer->dot_dimension_numbers().lhs_batch_dimensions_size();
}
if (i == old_space_dim) {
new_batch_dim =
outer +
consumer->dot_dimension_numbers().lhs_batch_dimensions_size();
}
++outer;
}
std::vector<int64_t> dim_map(kNumMappedDims);
dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] = new_batch_dim;
dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] = new_space_dim;
dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] =
consumer->shape().rank() - 1;
instr_to_dim_map_[consumer] = dim_map;
auto new_consumer = computation->AddInstruction(consumer->Clone());
new_consumer->mutable_shape()->mutable_dimensions()[new_batch_dim] =
producer->shape().dimensions(old_batch_dim);
new_consumer->mutable_shape()->mutable_dimensions()[new_space_dim] =
producer->shape().dimensions(old_space_dim);
old_to_new_instrs_[consumer] = new_consumer;
return true;
}
if (consumer->opcode() == HloOpcode::kPad) {
TF_CHECK_OK(PropagateOnPad(consumer));
return true;
}
if (consumer->opcode() == HloOpcode::kSlice) {
TF_CHECK_OK(PropagateOnSlice(consumer));
return true;
}
if (consumer->opcode() == HloOpcode::kReduce) {
auto reduce_dims = consumer->dimensions();
auto dim_map_val = instr_to_dim_map_[consumer->mutable_operand(0)];
auto first_operand = old_to_new_instrs_[consumer->mutable_operand(0)];
auto permute_dims = instr_to_dim_permute_map_[first_operand];
const int64_t old_batch_dim =
dim_map_val[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t space_dim =
dim_map_val[DimMapper(SpaceToBatchDimMap::kSpace0)];
const int64_t new_batch_dim = DimLookUp(permute_dims, old_batch_dim);
const int64_t new_space_dim = DimLookUp(permute_dims, space_dim);
std::vector<int64_t> changed_dims(consumer->dimensions().size());
if (!absl::c_linear_search(reduce_dims, old_batch_dim) &&
!absl::c_linear_search(reduce_dims, space_dim)) {
for (int64_t i = 0; i < consumer->dimensions().size(); ++i) {
changed_dims[i] = DimLookUp(permute_dims, consumer->dimensions(i));
}
int64_t new_output_batch_dim = new_batch_dim;
int64_t new_output_space_dim = new_space_dim;
for (int64_t i = 0; i < consumer->dimensions().size(); ++i) {
if (changed_dims[i] < new_batch_dim) {
new_output_batch_dim--;
}
if (changed_dims[i] < new_space_dim) {
new_output_space_dim--;
}
}
int64_t old_output_batch_dim = old_batch_dim;
int64_t old_output_space_dim = space_dim;
for (int64_t i = 0; i < consumer->dimensions().size(); ++i) {
if (reduce_dims[i] < old_batch_dim) {
old_output_batch_dim--;
}
if (reduce_dims[i] < space_dim) {
old_output_space_dim--;
}
}
HloInstruction* new_consumer = nullptr;
TF_ASSIGN_OR_RETURN(
new_consumer,
MakeReduceHlo(first_operand, consumer->mutable_operand(1),
changed_dims, consumer->called_computations()[0]));
VLOG(3) << " new_output_batch_dim " << new_output_batch_dim << " size "
<< first_operand->shape().dimensions(new_batch_dim)
<< " new_output_space_dim " << new_output_space_dim << " size "
<< first_operand->shape().dimensions(new_space_dim);
std::vector<int64_t> dim_map(kNumMappedDims);
dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] = old_output_batch_dim;
dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] = old_output_space_dim;
dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] = -1;
instr_to_dim_map_[consumer] = dim_map;
const int64_t rank = first_operand->shape().rank();
const int64_t output_rank = new_consumer->shape().rank();
std::vector<int64_t> old_reduce_output_to_input(output_rank);
int dim_number_to_assign_old = 0;
for (int64_t i = 0; i < rank; ++i) {
if (auto it = absl::c_find(reduce_dims, i); it != reduce_dims.end()) {
continue;
}
old_reduce_output_to_input[dim_number_to_assign_old++] = i;
}
std::vector<int64_t> new_reduce_output_to_input(output_rank);
int dim_number_to_assign_new = 0;
for (int64_t i = 0; i < rank; ++i) {
if (auto it = absl::c_find(changed_dims, i); it != changed_dims.end()) {
continue;
}
new_reduce_output_to_input[dim_number_to_assign_new++] = i;
}
std::vector<int64_t> new_permute_dims(output_rank);
for (int64_t i = 0; i < output_rank; ++i) {
new_permute_dims[i] = std::distance(
new_reduce_output_to_input.begin(),
absl::c_find(
new_reduce_output_to_input,
DimLookUp(permute_dims, old_reduce_output_to_input[i])));
}
instr_to_dim_permute_map_[new_consumer] = new_permute_dims;
old_to_new_instrs_[consumer] = new_consumer;
return true;
}
HloInstruction* new_consumer =
computation->AddInstruction(consumer->Clone());
auto retval = GetSpatialDimsToSplit(consumer->mutable_operand(0));
std::vector<int64_t> old_spatial_dims = retval.first;
std::vector<int64_t> new_spatial_dims = retval.second;
TF_ASSIGN_OR_RETURN(
first_operand,
SelectValidPortion(first_operand, consumer->mutable_operand(0),
consumer->mutable_operand(1), new_batch_dim,
new_spatial_dims, old_batch_dim, old_spatial_dims));
for (int64_t i = 0; i < new_consumer->dimensions().size(); ++i) {
changed_dims[i] = DimLookUp(permute_dims, new_consumer->dimensions(i));
}
*(new_consumer->mutable_dimensions()) = changed_dims;
TF_CHECK_OK(
new_consumer->ReplaceOperandWithDifferentShape(0, first_operand));
old_to_new_instrs_[consumer] = new_consumer;
instr_to_dim_map_[consumer] = std::vector<int64_t>(dim_map_val);
return false;
}
if (consumer->opcode() == HloOpcode::kTranspose) {
auto first_operand = old_to_new_instrs_[consumer->mutable_operand(0)];
auto new_consumer = computation->AddInstruction(first_operand->Clone());
old_to_new_instrs_[consumer] = new_consumer;
auto dim_map_val = instr_to_dim_map_[consumer->mutable_operand(0)];
const int64_t old_batch_dim =
dim_map_val[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim =
dim_map_val[DimMapper(SpaceToBatchDimMap::kSpace0)];
const int64_t old_feature_dim =
dim_map_val[DimMapper(SpaceToBatchDimMap::kFeature)];
int64_t new_batch_dim, new_space_dim, new_feature_dim;
std::vector<int64_t> new_dimensions(consumer->dimensions().size());
for (int64_t ctr = 0; ctr < consumer->dimensions().size(); ++ctr) {
int64_t dim = consumer->dimensions(ctr);
if (dim == old_batch_dim) {
new_batch_dim = ctr;
}
if (dim == old_space_dim) {
new_space_dim = ctr;
}
if (dim == old_feature_dim) {
new_feature_dim = ctr;
}
}
std::vector<int64_t> dim_map(kNumMappedDims);
dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] = new_batch_dim;
dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] = new_feature_dim;
dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] = new_space_dim;
instr_to_dim_map_[consumer] = dim_map;
std::vector<int64_t> new_permute_dims(consumer->dimensions().size());
auto permute_dims = instr_to_dim_permute_map_[first_operand];
for (int64_t i = 0; i < consumer->dimensions().size(); ++i) {
new_permute_dims[i] = DimLookUp(permute_dims, consumer->dimensions(i));
}
instr_to_dim_permute_map_[new_consumer] = new_permute_dims;
return true;
}
if (consumer->opcode() == HloOpcode::kReduceWindow ||
consumer->opcode() == HloOpcode::kSelectAndScatter) {
bool is_select_and_scatter =
consumer->opcode() == HloOpcode::kSelectAndScatter;
auto first_operand = old_to_new_instrs_[consumer->mutable_operand(0)];
auto init_val = is_select_and_scatter ? consumer->mutable_operand(2)
: consumer->mutable_operand(1);
auto dim_map_val = instr_to_dim_map_[consumer->mutable_operand(0)];
auto retval = GetSpatialDimsToSplit(consumer->mutable_operand(0));
std::vector<int64_t> old_spatial_dims = retval.first;
std::vector<int64_t> new_spatial_dims = retval.second;
const int64_t old_batch_dim =
dim_map_val[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim = old_spatial_dims[0];
auto permute_dims = instr_to_dim_permute_map_[first_operand];
const int64_t new_batch_dim = DimLookUp(permute_dims, old_batch_dim);
const int64_t new_space_dim = new_spatial_dims[0];
auto new_shape = first_operand->shape();
auto old_shape = consumer->mutable_operand(0)->shape();
const int64_t new_space_size = new_shape.dimensions(new_space_dim);
const int64_t stride =
consumer->window().dimensions(old_space_dim).stride();
auto pad_val =
is_select_and_scatter
? consumer->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::MinValue(
consumer->operand(2)->shape().element_type())))
: init_val;
TF_ASSIGN_OR_RETURN(
first_operand,
SelectValidPortion(first_operand, consumer->mutable_operand(0), pad_val,
new_batch_dim, new_spatial_dims, old_batch_dim,
old_spatial_dims));
const int64_t extra_space = new_space_size % stride;
if (extra_space) {
CHECK_EQ(consumer->opcode(), HloOpcode::kReduceWindow);
const int64_t old_batch_size = old_shape.dimensions(old_batch_dim);
const int64_t old_space_size = old_shape.dimensions(old_space_dim);
if ((new_space_size - extra_space) * old_batch_size *
ctrl_.number_of_splits >=
old_batch_size * old_space_size) {
TF_ASSIGN_OR_RETURN(
first_operand, ChangeSpatialSizeOnSpaceToBatchedShape(
first_operand, new_batch_dim, old_batch_size,
new_spatial_dims, new_space_size - extra_space));
} else {
TF_ASSIGN_OR_RETURN(
first_operand,
ChangeSpatialSizeOnSpaceToBatchedShape(
first_operand, new_batch_dim, old_batch_size, new_spatial_dims,
new_space_size + stride - extra_space,
true));
}
}
const int64_t window_size =
consumer->window().dimensions(old_space_dim).size();
const int64_t last_overlap_point = ((new_space_size - 1) / stride) * stride;
VLOG(1) << "last_overlap_point " << last_overlap_point << " window_size "
<< window_size << " new_space_size " << new_space_size;
const int64_t halo_size = last_overlap_point + window_size - new_space_size;
if (halo_size > 0) {
TF_ASSIGN_OR_RETURN(
first_operand,
HaloDuplicateWithSlice(first_operand, new_spatial_dims, new_batch_dim,
0, halo_size, init_val));
}
Window new_win;
for (int64_t i = 0; i < consumer->window().dimensions().size(); ++i) {
auto dim = ReverseDimLookUp(permute_dims, i);
new_win.add_dimensions();
new_win.mutable_dimensions(i)->set_stride(
consumer->window().dimensions(dim).stride());
new_win.mutable_dimensions(i)->set_size(
consumer->window().dimensions(dim).size());
if (i == old_space_dim) {
new_win.mutable_dimensions(i)->set_padding_high(0);
new_win.mutable_dimensions(i)->set_padding_low(0);
} else {
new_win.mutable_dimensions(i)->set_padding_high(
consumer->window().dimensions(dim).padding_high());
new_win.mutable_dimensions(i)->set_padding_low(
consumer->window().dimensions(dim).padding_low());
}
new_win.mutable_dimensions(i)->set_window_dilation(
consumer->window().dimensions(dim).window_dilation());
new_win.mutable_dimensions(i)->set_base_dilation(
consumer->window().dimensions(dim).base_dilation());
new_win.mutable_dimensions(i)->set_window_reversal(
consumer->window().dimensions(dim).window_reversal());
}
new_shape = first_operand->shape();
HloInstruction* new_consumer = nullptr;
if (is_select_and_scatter) {
auto second_operand = old_to_new_instrs_[consumer->mutable_operand(1)];
auto select_comp = consumer->select();
auto scatter_comp = consumer->scatter();
TF_ASSIGN_OR_RETURN(
auto new_select_and_scatter_shape,
ShapeInference::InferSelectAndScatterShape(
new_shape, select_comp->ComputeProgramShape(), new_win,
second_operand->shape(), init_val->shape(),
scatter_comp->ComputeProgramShape()));
new_consumer = computation_->AddInstruction(
HloInstruction::CreateSelectAndScatter(
new_select_and_scatter_shape, first_operand, select_comp, new_win,
second_operand, init_val, scatter_comp),
&consumer->metadata(), &consumer->frontend_attributes());
TF_CHECK_OK(
new_consumer->ReplaceOperandWithDifferentShape(0, first_operand));
TF_CHECK_OK(
new_consumer->ReplaceOperandWithDifferentShape(1, second_operand));
VLOG(2) << "New select and scatter " << new_consumer->ToString();
if (halo_size > 0) {
const int64_t rank = new_consumer->shape().rank();
const int64_t batch_size =
new_consumer->shape().dimensions(new_batch_dim);
std::vector<int64_t> start_indices(rank, 0),
end_indices(new_consumer->shape().dimensions().begin(),
new_consumer->shape().dimensions().end()),
strides(rank, 1);
start_indices[new_space_dim] = new_space_size;
end_indices[new_space_dim] = new_space_size + halo_size;
end_indices[new_batch_dim] = batch_size - 1;
TF_ASSIGN_OR_RETURN(
HloInstruction * bottom,
MakeSliceHlo(new_consumer, start_indices, end_indices, strides,
&consumer->metadata(),
&consumer->frontend_attributes()));
std::vector<int64_t> start_indices_top(rank, 0),
end_indices_top(new_consumer->shape().dimensions().begin(),
new_consumer->shape().dimensions().end());
end_indices_top[new_space_dim] = halo_size;
start_indices_top[new_batch_dim] = 1;
TF_ASSIGN_OR_RETURN(
HloInstruction * top,
MakeSliceHlo(new_consumer, start_indices_top, end_indices_top,
strides, &consumer->metadata(),
&consumer->frontend_attributes()));
HloInstruction* default_fill = MakeBroadcastHlo(
init_val, {}, top->shape().dimensions(), &init_val->metadata(),
&init_val->frontend_attributes());
TF_ASSIGN_OR_RETURN(
HloInstruction * bottom_compare,
MakeCompareHlo(ComparisonDirection::kNe, bottom, default_fill,
&bottom->metadata(),
&bottom->frontend_attributes()));
TF_ASSIGN_OR_RETURN(
HloInstruction * bottom_taken,
MakeSelectHlo(bottom_compare, bottom, default_fill, nullptr,
&bottom_compare->metadata(),
&bottom_compare->frontend_attributes()));
TF_ASSIGN_OR_RETURN(
HloInstruction * top_compare,
MakeCompareHlo(ComparisonDirection::kNe, top, default_fill,
&top->metadata(), &top->frontend_attributes()));
TF_ASSIGN_OR_RETURN(HloInstruction * top_taken,
MakeSelectHlo(top_compare, top, bottom_taken,
nullptr, &top_compare->metadata(),
&top_compare->frontend_attributes()));
TF_ASSIGN_OR_RETURN(HloInstruction * both_compare,
MakeBinaryHlo(HloOpcode::kAnd, top_compare,
bottom_compare, &consumer->metadata(),
&consumer->frontend_attributes()));
TF_ASSIGN_OR_RETURN(
HloInstruction * both_added,
MakeBinaryHlo(HloOpcode::kAdd, top, bottom, &consumer->metadata(),
&consumer->frontend_attributes()));
TF_ASSIGN_OR_RETURN(
HloInstruction * final_selection,
MakeSelectHlo(both_compare, both_added, top_taken, nullptr,
&both_compare->metadata(),
&both_compare->frontend_attributes()));
PaddingConfig padding_config =
MakeNoPaddingConfig(final_selection->shape().dimensions_size());
padding_config.mutable_dimensions(new_batch_dim)
->set_edge_padding_low(1);
padding_config.mutable_dimensions(new_space_dim)
->set_edge_padding_high(new_space_size);
HloInstruction* padding = computation_->AddInstruction(
HloInstruction::CreateConstant(
LiteralUtil::Zero(final_selection->shape().element_type())),
&consumer->metadata(), &consumer->frontend_attributes());
TF_ASSIGN_OR_RETURN(
final_selection,
MakePadHlo(final_selection, padding, padding_config,
&final_selection->metadata(),
&final_selection->frontend_attributes()));
tsl::core::Bitmap b(batch_size * (new_space_size + halo_size));
for (int k = 0; k < batch_size * (new_space_size + halo_size); ++k) {
const int64_t space_index = k % (new_space_size + halo_size);
const int64_t batch_index = (k / (new_space_size + halo_size));
if (batch_index < 1 || space_index >= halo_size) {
b.set(k);
} else {
b.clear(k);
}
}
auto arg_literal = LiteralUtil::CreateR1(b);
VLOG(4) << "Slice mask created: arg literal " << arg_literal.ToString();
HloInstruction* slice_mask = computation_->AddInstruction(
HloInstruction::CreateConstant(std::move(arg_literal)),
&consumer->metadata(), &consumer->frontend_attributes());
std::vector<int64_t> slice_mask_reshape_dims(2);
slice_mask_reshape_dims[0] = batch_size;
slice_mask_reshape_dims[1] = (new_space_size + halo_size);
TF_ASSIGN_OR_RETURN(
HloInstruction * slice_mask_reshaped,
MakeReshapeHlo(slice_mask_reshape_dims, slice_mask));
HloInstruction* shape_mask = MakeBroadcastHlo(
slice_mask_reshaped, {new_batch_dim, new_space_dim},
final_selection->shape().dimensions(), &slice_mask->metadata(),
&slice_mask->frontend_attributes());
TF_ASSIGN_OR_RETURN(
new_consumer,
MakeSelectHlo(shape_mask, new_consumer, final_selection, nullptr,
&shape_mask->metadata(),
&shape_mask->frontend_attributes()));
}
auto previous_shape =
old_to_new_instrs_[consumer->mutable_operand(0)]->shape();
std::vector<int64_t> start_indices(previous_shape.rank(), 0),
end_indices(previous_shape.dimensions().begin(),
previous_shape.dimensions().end()),
strides(previous_shape.rank(), 1);
TF_ASSIGN_OR_RETURN(new_consumer,
MakeSliceHlo(new_consumer, start_indices, end_indices,
strides, &consumer->metadata(),
&consumer->frontend_attributes()));
} else {
auto reduce_comp = consumer->to_apply();
TF_ASSIGN_OR_RETURN(auto new_reduce_window_shape,
ShapeInference::InferReduceWindowShape(
new_shape, init_val->shape(), new_win));
new_consumer = computation_->AddInstruction(
HloInstruction::CreateReduceWindow(new_reduce_window_shape,
first_operand, init_val, new_win,
reduce_comp),
&consumer->metadata(), &consumer->frontend_attributes());
TF_CHECK_OK(
new_consumer->ReplaceOperandWithDifferentShape(0, first_operand));
VLOG(1) << "New reduce window " << new_consumer->ToString();
}
old_to_new_instrs_[consumer] = new_consumer;
instr_to_dim_map_[consumer] = std::vector<int64_t>(dim_map_val);
instr_to_dim_permute_map_[new_consumer] = std::vector<int64_t>(
instr_to_dim_permute_map_[old_to_new_instrs_[consumer->mutable_operand(
0)]]);
return true;
}
LOG(FATAL) << "Trying to propagate through an unsupported instruction "
<< consumer->ToString();
return true;
}
absl::StatusOr<HloInstruction*> ConvolutionVisitor::SelectValidPortion(
HloInstruction* new_instr, HloInstruction* old_instr,
HloInstruction* select_val, int64_t new_batch_dim,
absl::Span<const int64_t> new_space_dims, int64_t old_batch_dim,
absl::Span<const int64_t> old_space_dims) {
auto new_shape = new_instr->shape();
auto old_shape = old_instr->shape();
VLOG(1) << "In SelectValidPortion new_batch_dim " << new_batch_dim
<< " new_space_dim " << new_space_dims[0] << " old_batch_dim "
<< old_batch_dim << " old_space_dim " << old_space_dims[0];
const int64_t new_batch_size = new_shape.dimensions(new_batch_dim);
const int64_t new_space_size = new_shape.dimensions(new_space_dims[0]);
const int64_t old_batch_size = old_shape.dimensions(old_batch_dim);
const int64_t old_space_size = old_shape.dimensions(old_space_dims[0]);
CHECK_EQ(new_batch_size % old_batch_size, 0)
<< " New batch size " << new_batch_size << " old batch size "
<< old_batch_size;
const int64_t num_splits = ctrl_.number_of_splits;
const int64_t spatial_dim_count = new_space_dims.size();
std::vector<int64_t> bounds(2 + spatial_dim_count, new_space_size);
bounds[0] = old_batch_size;
bounds[1] = IPow<int64_t>(num_splits, spatial_dim_count);
const int64_t total_new_space =
IPow<int64_t>(new_space_size, spatial_dim_count);
tsl::core::Bitmap b(new_batch_size * total_new_space);
for (int k = 0; k < new_batch_size * total_new_space; ++k) {
auto radix = ToMixedRadix(k, bounds);
bool out_of_bounds = false;
int64_t batch_residue = 1;
for (int i = 0; i < spatial_dim_count; ++i) {
const int64_t space_index = radix[2 + i];
const int64_t batch_index = (radix[1] / batch_residue) % num_splits;
batch_residue *= num_splits;
if (batch_index * new_space_size + space_index >= old_space_size) {
out_of_bounds = true;
}
}
if (!out_of_bounds) {
b.set(k);
} else {
b.clear(k);
}
}
auto arg_literal = LiteralUtil::CreateR1(b);
VLOG(4) << "Slice mask created: arg literal " << arg_literal.ToString();
HloInstruction* slice_mask = computation_->AddInstruction(
HloInstruction::CreateConstant(std::move(arg_literal)),
&old_instr->metadata(), &old_instr->frontend_attributes());
std::vector<int64_t> slice_mask_reshape_dims(1 + spatial_dim_count,
new_space_size);
slice_mask_reshape_dims[0] = new_batch_size;
TF_ASSIGN_OR_RETURN(HloInstruction * slice_mask_reshaped,
MakeReshapeHlo(slice_mask_reshape_dims, slice_mask));
std::vector<int64_t> broadcast_dims(new_space_dims.begin(),
new_space_dims.end());
broadcast_dims.insert(broadcast_dims.begin(), new_batch_dim);
HloInstruction* shape_mask = MakeBroadcastHlo(
slice_mask_reshaped, broadcast_dims, new_instr->shape().dimensions(),
&slice_mask_reshaped->metadata(),
&slice_mask_reshaped->frontend_attributes());
VLOG(1) << "Shape mask made " << shape_mask->ToString();
HloInstruction* zeroes = MakeBroadcastHlo(
select_val, {}, new_instr->shape().dimensions(), &select_val->metadata(),
&select_val->frontend_attributes());
TF_ASSIGN_OR_RETURN(new_instr,
MakeSelectHlo(shape_mask, new_instr, zeroes, nullptr,
&shape_mask->metadata(),
&shape_mask->frontend_attributes()));
return new_instr;
}
absl::StatusOr<HloInstruction*> ConvolutionVisitor::BatchToSpace(
HloInstruction* old_instr) {
if (batch_to_space_map_.count(old_instr)) {
CHECK_NE(batch_to_space_map_[old_instr], nullptr);
return batch_to_space_map_[old_instr];
}
auto result = instr_to_dim_map_[old_instr];
const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim = result[DimMapper(SpaceToBatchDimMap::kSpace0)];
const int64_t old_batch_size = old_instr->shape().dimensions(old_batch_dim);
CHECK(old_to_new_instrs_.contains(old_instr));
auto new_instr = old_to_new_instrs_[old_instr];
VLOG(2) << "old_batch_dim " << old_batch_dim << " old_space_dim "
<< old_space_dim << " old_instr " << old_instr->ToString()
<< "\n new_instr " << new_instr->ToString() << " permute dims "
<< instr_to_dim_permute_map_.count(new_instr) << " old_batch_size "
<< old_batch_size;
CHECK(instr_to_dim_permute_map_.contains(new_instr));
auto permute_dims = instr_to_dim_permute_map_[new_instr];
const int64_t batch_dim = DimLookUp(permute_dims, old_batch_dim);
const int64_t space_dim = DimLookUp(permute_dims, old_space_dim);
const int64_t spatial_dim_size = new_instr->shape().dimensions(space_dim);
std::vector<int64_t> split_spatial_dimensions(
ctrl_.count_of_dimensions_to_convert);
absl::c_iota(split_spatial_dimensions, space_dim);
TF_ASSIGN_OR_RETURN(new_instr, SplitAndTransposeMergedBatch(
new_instr, batch_dim, old_batch_size,
split_spatial_dimensions));
std::vector<int64_t> new_dimensions(new_instr->shape().dimensions().begin(),
new_instr->shape().dimensions().end());
new_dimensions.erase(new_dimensions.begin() + split_spatial_dimensions[0],
new_dimensions.begin() + split_spatial_dimensions[0] +
ctrl_.count_of_dimensions_to_convert);
for (auto spatial_dimension : split_spatial_dimensions) {
new_dimensions[spatial_dimension] =
spatial_dim_size * ctrl_.number_of_splits;
}
TF_ASSIGN_OR_RETURN(HloInstruction * reshape,
MakeReshapeHlo(new_dimensions, new_instr));
VLOG(1) << "Batch to space reshape " << reshape->ToString();
const int64_t rank = old_instr->shape().rank();
std::vector<int64_t> start_indices(rank, 0),
end_indices(new_dimensions.begin(), new_dimensions.end()),
strides(rank, 1);
for (auto spatial_dimension : split_spatial_dimensions) {
end_indices[spatial_dimension] =
old_instr->shape().dimensions(old_space_dim);
}
TF_ASSIGN_OR_RETURN(
HloInstruction * output_slice,
MakeSliceHlo(reshape, start_indices, end_indices, strides,
&reshape->metadata(), &reshape->frontend_attributes()));
VLOG(1) << "Batch to space slice " << output_slice->ToString();
std::vector<int64_t> transpose_dims(permute_dims);
TF_ASSIGN_OR_RETURN(HloInstruction * output_transpose,
MakeTransposeHlo(output_slice, transpose_dims));
old_instr->SetupDerivedInstruction(output_transpose);
batch_to_space_map_[old_instr] = output_transpose;
return output_transpose;
}
absl::Status ConvolutionVisitor::PropagateOnUsers(HloInstruction* old_conv) {
std::queue<std::pair<HloInstruction*, HloInstruction*>> propagation_worklist;
if (old_conv->user_count() == 0) {
TF_ASSIGN_OR_RETURN(HloInstruction * batch_to_space,
BatchToSpace(old_conv));
VLOG(1) << "Replacing the root instruction to "
<< batch_to_space->ToString();
TF_CHECK_OK(computation_->ReplaceInstruction(old_conv, batch_to_space));
VLOG(1) << "Replacement successful";
return absl::OkStatus();
}
int64_t iteration_count = 0;
propagation_worklist.push(
std::make_pair(old_conv, old_conv->mutable_operand(0)));
while (!propagation_worklist.empty()) {
auto top = propagation_worklist.front();
auto node = top.first;
auto parent = top.second;
VLOG(1) << "Traversing for propagation operating on " << node->ToString();
propagation_worklist.pop();
if (old_to_new_instrs_.count(node) > 0 && iteration_count != 0) {
continue;
}
bool needs_further_propagation = true;
if (iteration_count != 0) {
TF_ASSIGN_OR_RETURN(needs_further_propagation, Propagate(node, parent));
}
iteration_count++;
if (node->parent()->root_instruction() == node) {
if (!needs_further_propagation) {
VLOG(1) << "Replacing the root instruction to "
<< old_to_new_instrs_[node]->ToString();
TF_CHECK_OK(
computation_->ReplaceInstruction(node, old_to_new_instrs_[node]));
continue;
}
TF_ASSIGN_OR_RETURN(HloInstruction * batch_to_space, BatchToSpace(node));
VLOG(1) << "Replacing the root instruction to "
<< batch_to_space->ToString();
TF_CHECK_OK(computation_->ReplaceInstruction(node, batch_to_space));
} else {
if (!needs_further_propagation) {
TF_CHECK_OK(
computation_->ReplaceInstruction(node, old_to_new_instrs_[node]));
continue;
}
HloInstructionSet unsupported_users;
for (auto user : node->users()) {
if (!SupportedOpForPropagation(user, node)) {
VLOG(1) << "Unsupported op found " << user->ToString();
unsupported_users.insert(user);
continue;
}
if (CanPropagate(user, node)) {
non_propagatable_instrs_.erase(user);
propagation_worklist.push(std::make_pair(user, node));
} else {
non_propagatable_instrs_.insert(user);
}
}
if (!unsupported_users.empty()) {
TF_ASSIGN_OR_RETURN(HloInstruction * batch_to_space,
BatchToSpace(node));
for (auto user : unsupported_users) {
for (int64_t i = 0; i < user->operand_count(); ++i) {
if (user->operand(i) == node) {
TF_CHECK_OK(user->ReplaceOperandWith(i, batch_to_space));
}
}
}
}
}
}
return absl::OkStatus();
}
absl::Status ConvolutionVisitor::PropagateOnConv(HloInstruction* convolution) {
auto activations_old = convolution->mutable_operand(0);
CHECK(old_to_new_instrs_.contains(activations_old));
auto activations_new = old_to_new_instrs_[activations_old];
auto permute_dims = instr_to_dim_permute_map_[activations_new];
auto original_conv_dims = convolution->convolution_dimension_numbers();
auto old_new_dims = GetSpatialDimsToSplit(activations_old);
std::vector<int64_t> old_spatial_dims = old_new_dims.first;
std::vector<int64_t> new_spatial_dims = old_new_dims.second;
auto permuted_conv_dims_numbers = original_conv_dims;
int64_t activations_batch_dim =
DimLookUp(permute_dims, original_conv_dims.input_batch_dimension());
int64_t activations_feature_dim =
DimLookUp(permute_dims, original_conv_dims.input_feature_dimension());
permuted_conv_dims_numbers.set_input_batch_dimension(activations_batch_dim);
permuted_conv_dims_numbers.set_input_feature_dimension(
activations_feature_dim);
for (int64_t i = 0; i < original_conv_dims.input_spatial_dimensions_size();
++i) {
permuted_conv_dims_numbers.set_input_spatial_dimensions(
i, DimLookUp(permute_dims,
original_conv_dims.input_spatial_dimensions(i)));
}
const int64_t old_batch_dim = original_conv_dims.input_batch_dimension();
const int64_t old_batch_size =
activations_old->shape().dimensions(old_batch_dim);
ConvDetails c =
GetConvolutionDetails(convolution, permuted_conv_dims_numbers);
VLOG(1) << "Propagating on conv activations_batch_dim "
<< activations_batch_dim << " spatial_dimension_to_split "
<< c.spatial_dimensions_to_split[0] << " old_batch_size "
<< old_batch_size;
TF_ASSIGN_OR_RETURN(
auto retval,
BringSpaceNextToBatch(activations_new, permuted_conv_dims_numbers,
activations_batch_dim, &new_spatial_dims));
activations_new = retval.instr;
std::vector<int64_t> trans_dims = retval.transpose_dims;
CHECK(!trans_dims.empty());
auto select_val = computation_->AddInstruction(
HloInstruction::CreateConstant(
LiteralUtil::Zero(activations_new->shape().element_type())),
&convolution->metadata(), &convolution->frontend_attributes());
TF_ASSIGN_OR_RETURN(
activations_new,
SelectValidPortion(activations_new, activations_old, select_val,
activations_batch_dim, new_spatial_dims, old_batch_dim,
old_spatial_dims));
auto new_dim_numbers = permuted_conv_dims_numbers;
const int64_t num_splits = ctrl_.number_of_splits;
const int64_t output_offsets = convolution->shape().dimensions(
permuted_conv_dims_numbers.output_spatial_dimensions(
GetFirstChosenSpatialDim(convolution)));
const int64_t output_offsets_per_split =
CeilOfRatio(output_offsets, num_splits);
int64_t spatial_split_size =
CeilOfRatio(output_offsets_per_split, c.base_dilation_factor) * c.stride;
VLOG(1) << "spatial size " << c.spatial_size << " halo size " << c.halo_size
<< " spatial_split_size " << spatial_split_size;
while (spatial_split_size * num_splits + c.halo_size - c.spatial_size < 0 ||
spatial_split_size < c.halo_size - c.inherent_low_padding) {
spatial_split_size += c.stride;
}
VLOG(1) << "Modified spatial_split_size " << spatial_split_size;
const int64_t new_space_size =
activations_new->shape().dimensions(new_spatial_dims[0]);
int64_t slice_size = spatial_split_size + c.halo_size;
if (spatial_split_size > new_space_size) {
TF_ASSIGN_OR_RETURN(
activations_new,
ChangeSpatialSizeOnSpaceToBatchedShape(
activations_new, activations_batch_dim, old_batch_size,
new_spatial_dims, spatial_split_size,
true));
} else {
if (spatial_split_size < new_space_size) {
VLOG(3)
<< "Decreasing the spatial size while propagating spatial_split_size "
<< spatial_split_size << " new_space_size " << new_space_size;
if (new_space_size % c.stride != 0 || c.base_dilation_factor != 1) {
TF_ASSIGN_OR_RETURN(
activations_new,
ChangeSpatialSizeOnSpaceToBatchedShape(
activations_new, activations_batch_dim, old_batch_size,
new_spatial_dims, spatial_split_size));
} else {
const int64_t additional_space_present = spatial_split_size % c.stride;
spatial_split_size = new_space_size;
slice_size =
spatial_split_size + std::max(c.kernel_spatial_dim_size - c.stride -
additional_space_present,
static_cast<int64_t>(0));
}
}
}
TF_ASSIGN_OR_RETURN(
activations_new,
HaloDuplicateWithSlice(
activations_new, new_spatial_dims, activations_batch_dim,
c.base_dilation_factor != 1 &&
c.inherent_low_padding != 0
? (c.inherent_low_padding == c.base_dilation_factor ? 1 : 0)
: c.inherent_low_padding,
slice_size - spatial_split_size));
const int64_t rank = (convolution->shape().rank());
std::vector<int64_t> transpose_dims(rank);
int dim_count = 0;
std::map<int64_t, int64_t> dim_translator;
for (int j = 0;
j < permuted_conv_dims_numbers.output_spatial_dimensions_size(); ++j) {
if (j == GetFirstChosenSpatialDim(convolution)) {
dim_translator[permuted_conv_dims_numbers.output_batch_dimension()] =
dim_count;
new_dim_numbers.set_output_batch_dimension(dim_count++);
}
dim_translator[permuted_conv_dims_numbers.output_spatial_dimensions(j)] =
dim_count;
new_dim_numbers.set_output_spatial_dimensions(j, dim_count);
dim_count++;
}
dim_translator[permuted_conv_dims_numbers.output_feature_dimension()] =
dim_count;
new_dim_numbers.set_output_feature_dimension(dim_count);
int p = 0;
for (const auto& entry : dim_translator) {
transpose_dims[p] = entry.second;
p++;
}
auto new_window = convolution->window();
const int64_t first_dim = GetFirstChosenSpatialDim(convolution);
for (int i = 0; i < ctrl_.count_of_dimensions_to_convert; ++i) {
new_window.mutable_dimensions(first_dim + i)
->set_padding_high(c.high_padding_for_conv);
new_window.mutable_dimensions(first_dim + i)
->set_padding_low(c.low_padding_for_conv);
}
TF_ASSIGN_OR_RETURN(
HloInstruction * new_conv,
MakeConvolveHlo(
activations_new, convolution->mutable_operand(1),
convolution->feature_group_count(), convolution->batch_group_count(),
new_window, new_dim_numbers, convolution->precision_config(),
convolution->shape().element_type()));
convolution->SetupDerivedInstruction(new_conv);
old_to_new_instrs_[convolution] = new_conv;
VLOG(1) << "Space-to-batched convolution " << new_conv->ToString();
std::vector<int64_t> dim_map(kNumMappedDims);
dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] =
original_conv_dims.output_batch_dimension();
dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] =
original_conv_dims.output_feature_dimension();
dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] =
original_conv_dims.output_spatial_dimensions(
GetFirstChosenSpatialDim(convolution));
instr_to_dim_map_[convolution] = dim_map;
instr_to_dim_permute_map_[new_conv] = std::vector<int64_t>(transpose_dims);
convs_to_visit_.erase(convolution);
return absl::OkStatus();
}
absl::Status ConvolutionVisitor::PropagateOnConcat(HloInstruction* concat) {
auto first_operand = old_to_new_instrs_[concat->mutable_operand(0)];
auto permute_dims = instr_to_dim_permute_map_[first_operand];
const int64_t new_concat_dim =
DimLookUp(permute_dims, concat->concatenate_dimension());
std::vector<HloInstruction*> new_operands(concat->operand_count());
for (int64_t i = 0; i < concat->operand_count(); ++i) {
new_operands[i] = old_to_new_instrs_[concat->mutable_operand(i)];
}
TF_ASSIGN_OR_RETURN(
HloInstruction * new_concat,
MakeConcatHlo(new_operands, new_concat_dim, &concat->metadata(),
&concat->frontend_attributes()));
old_to_new_instrs_[concat] = new_concat;
instr_to_dim_map_[concat] =
std::vector<int64_t>(instr_to_dim_map_[concat->mutable_operand(0)]);
instr_to_dim_permute_map_[new_concat] =
std::vector<int64_t>(instr_to_dim_permute_map_[first_operand]);
return absl::OkStatus();
}
absl::Status ConvolutionVisitor::PropagateOnReverse(HloInstruction* reverse) {
auto first_operand = old_to_new_instrs_[reverse->mutable_operand(0)];
auto permute_dims = instr_to_dim_permute_map_[first_operand];
std::vector<int64_t> new_reverse_dimensions(reverse->dimensions().size());
int dim_count = 0;
for (auto dim : reverse->dimensions()) {
new_reverse_dimensions[dim_count++] = DimLookUp(permute_dims, dim);
}
TF_ASSIGN_OR_RETURN(HloInstruction * new_reverse,
MakeReverseHlo(first_operand, new_reverse_dimensions));
old_to_new_instrs_[reverse] = new_reverse;
instr_to_dim_map_[reverse] =
std::vector<int64_t>(instr_to_dim_map_[reverse->mutable_operand(0)]);
instr_to_dim_permute_map_[new_reverse] =
std::vector<int64_t>(instr_to_dim_permute_map_[first_operand]);
return absl::OkStatus();
}
absl::Status ConvolutionVisitor::PropagateOnPad(HloInstruction* pad) {
auto first_operand = old_to_new_instrs_[pad->mutable_operand(0)];
auto permute_dims = instr_to_dim_permute_map_[first_operand];
PaddingConfig padding_config;
for (int i = 0; i < pad->shape().rank(); ++i) {
auto dimension = padding_config.add_dimensions();
const int64_t old_dim = ReverseDimLookUp(permute_dims, i);
auto old_padding = pad->padding_config().dimensions(old_dim);
dimension->set_edge_padding_low(old_padding.edge_padding_low());
dimension->set_edge_padding_high(old_padding.edge_padding_high());
dimension->set_interior_padding(old_padding.interior_padding());
}
HloInstruction* padding = pad->mutable_operand(1);
TF_ASSIGN_OR_RETURN(auto new_pad,
MakePadHlo(first_operand, padding, padding_config,
&first_operand->metadata(),
&first_operand->frontend_attributes()));
old_to_new_instrs_[pad] = new_pad;
instr_to_dim_map_[pad] =
std::vector<int64_t>(instr_to_dim_map_[pad->mutable_operand(0)]);
instr_to_dim_permute_map_[new_pad] =
std::vector<int64_t>(instr_to_dim_permute_map_[first_operand]);
return absl::OkStatus();
}
absl::Status ConvolutionVisitor::PropagateOnSlice(HloInstruction* slice) {
auto operand = old_to_new_instrs_[slice->mutable_operand(0)];
auto permute_dims = instr_to_dim_permute_map_[operand];
DimensionVector starts(slice->shape().rank());
DimensionVector limits(slice->shape().rank());
DimensionVector strides(slice->shape().rank());
for (int i = 0; i < slice->shape().rank(); ++i) {
const int64_t old_dim = ReverseDimLookUp(permute_dims, i);
if (slice->shape().dimensions(old_dim) ==
slice->operand(0)->shape().dimensions(old_dim)) {
starts[i] = 0;
strides[i] = 1;
limits[i] = operand->shape().dimensions(i);
continue;
}
starts[i] = slice->slice_starts(old_dim);
strides[i] = slice->slice_strides(old_dim);
limits[i] = slice->slice_limits(old_dim);
}
TF_ASSIGN_OR_RETURN(
auto new_slice,
MakeSliceHlo(operand, starts, limits, strides, &operand->metadata(),
&operand->frontend_attributes()));
old_to_new_instrs_[slice] = new_slice;
instr_to_dim_map_[slice] =
std::vector<int64_t>(instr_to_dim_map_[slice->mutable_operand(0)]);
instr_to_dim_permute_map_[new_slice] =
std::vector<int64_t>(instr_to_dim_permute_map_[operand]);
return absl::OkStatus();
}
absl::StatusOr<HloInstruction*> ConvolutionVisitor::TransposeAndMergeBatch(
HloInstruction* activations,
absl::Span<const int64_t> final_split_spatial_dim_positioning,
int64_t activations_batch_dim, int64_t old_batch_size) {
const int64_t spatial_dim_count = final_split_spatial_dim_positioning.size();
if (final_split_spatial_dim_positioning.size() > 1) {
int64_t start_batch_dim_position = activations_batch_dim + 1;
int64_t start_space_dim_position =
start_batch_dim_position + spatial_dim_count;
std::vector<int64_t> trans_dims(activations->shape().dimensions_size());
absl::c_iota(trans_dims, 0);
for (int i = 0; i < spatial_dim_count; ++i) {
trans_dims[start_batch_dim_position + i] =
start_batch_dim_position + (spatial_dim_count - 1 - i) * 2;
trans_dims[start_space_dim_position + i] =
start_batch_dim_position + i * 2 + 1;
}
TF_ASSIGN_OR_RETURN(activations, MakeTransposeHlo(activations, trans_dims));
}
std::vector<int64_t> batch_collapse_reshape_dims(
activations->shape().dimensions().begin(),
activations->shape().dimensions().end());
const int64_t collapsed_batch_size =
old_batch_size * IPow<int64_t>(ctrl_.number_of_splits, spatial_dim_count);
batch_collapse_reshape_dims.erase(
batch_collapse_reshape_dims.begin() + activations_batch_dim,
batch_collapse_reshape_dims.begin() + activations_batch_dim +
spatial_dim_count);
batch_collapse_reshape_dims[activations_batch_dim] = collapsed_batch_size;
TF_ASSIGN_OR_RETURN(HloInstruction * batch_collapsed_reshape,
MakeReshapeHlo(batch_collapse_reshape_dims, activations));
return batch_collapsed_reshape;
}
absl::StatusOr<HloInstruction*> ConvolutionVisitor::PerformSplitSpace(
HloInstruction* activations,
absl::Span<const int64_t> spatial_dimensions_to_split,
int64_t activations_batch_dim, int64_t spatial_split_size,
int64_t num_splits) {
const int64_t old_batch_size =
activations->shape().dimensions(activations_batch_dim);
std::vector<int64_t> reshape_dimensions(
activations->shape().dimensions().begin(),
activations->shape().dimensions().end());
for (auto spatial_dimension_to_split : spatial_dimensions_to_split) {
reshape_dimensions[spatial_dimension_to_split] = spatial_split_size;
}
int counter = 0;
for (auto spatial_dimension_to_split : spatial_dimensions_to_split) {
reshape_dimensions.insert(
reshape_dimensions.begin() + (spatial_dimension_to_split + counter),
num_splits);
counter++;
}
TF_ASSIGN_OR_RETURN(HloInstruction * batch_increased_reshape,
MakeReshapeHlo(reshape_dimensions, activations));
return TransposeAndMergeBatch(
batch_increased_reshape,
spatial_dimensions_to_split,
activations_batch_dim, old_batch_size);
}
absl::StatusOr<HloInstruction*> ConvolutionVisitor::PadAndSplitSpace(
HloInstruction* activations,
absl::Span<const int64_t> spatial_dimensions_to_split,
int64_t activations_batch_dim, int64_t high_padding, int64_t low_padding,
int64_t spatial_split_size, int64_t num_splits) {
const int64_t old_batch_size =
activations->shape().dimensions(activations_batch_dim);
if (high_padding || low_padding) {
PaddingConfig padding_config =
MakeNoPaddingConfig(activations->shape().dimensions_size());
for (auto spatial_dimension_to_split : spatial_dimensions_to_split) {
padding_config.mutable_dimensions(spatial_dimension_to_split)
->set_edge_padding_high(high_padding);
padding_config.mutable_dimensions(spatial_dimension_to_split)
->set_edge_padding_low(low_padding);
}
HloInstruction* padding = computation_->AddInstruction(
HloInstruction::CreateConstant(
LiteralUtil::Zero(activations->shape().element_type())),
&activations->metadata(), &activations->frontend_attributes());
TF_ASSIGN_OR_RETURN(activations,
MakePadHlo(activations, padding, padding_config,
&activations->metadata(),
&activations->frontend_attributes()));
}
VLOG(1) << "Initial padded activations shape "
<< activations->shape().ToString() << " old_batch_size "
<< old_batch_size << " activations_batch_dim "
<< activations_batch_dim;
return PerformSplitSpace(activations, spatial_dimensions_to_split,
activations_batch_dim, spatial_split_size,
num_splits);
}
absl::StatusOr<std::pair<HloInstruction*, std::vector<int64_t>>>
ConvolutionVisitor::SplitSpace(
HloInstruction* activations, ConvolutionDimensionNumbers& dim_numbers,
int64_t& activations_batch_dim, int64_t high_padding, int64_t low_padding,
int64_t spatial_split_size, int64_t num_splits,
std::vector<int64_t>* spatial_dimensions_to_split, bool is_backprop,
bool is_rhs) {
TF_ASSIGN_OR_RETURN(
auto retval,
BringSpaceNextToBatch(activations, dim_numbers, activations_batch_dim,
spatial_dimensions_to_split, is_backprop, is_rhs));
activations = retval.instr;
std::vector<int64_t> transpose_dims = retval.transpose_dims;
TF_ASSIGN_OR_RETURN(
auto new_activations,
PadAndSplitSpace(activations, *spatial_dimensions_to_split,
activations_batch_dim, high_padding, low_padding,
spatial_split_size, num_splits));
return std::make_pair(new_activations, transpose_dims);
}
absl::StatusOr<HloInstruction*> ConvolutionVisitor::PropagateOnConstant(
HloInstruction* consumer, HloInstruction* producer) {
CHECK(old_to_new_instrs_.contains(producer));
HloInstruction* new_producer = old_to_new_instrs_[producer];
auto prod_transpose_dims = instr_to_dim_permute_map_[new_producer];
std::vector<int64_t> reversed_transpose_dims(prod_transpose_dims.size());
for (int64_t i = 0; i < prod_transpose_dims.size(); ++i) {
reversed_transpose_dims[i] = ReverseDimLookUp(prod_transpose_dims, i);
}
TF_ASSIGN_OR_RETURN(consumer,
MakeTransposeHlo(consumer, reversed_transpose_dims));
auto retval = GetSpatialDimsToSplit(producer);
std::vector<int64_t> old_spatial_dims = retval.first;
std::vector<int64_t> new_spatial_dims = retval.second;
auto dim_map = instr_to_dim_map_[producer];
const int64_t old_batch_dim = dim_map[DimMapper(SpaceToBatchDimMap::kBatch)];
const int64_t old_space_dim = old_spatial_dims[0];
const int64_t new_batch_dim = DimLookUp(prod_transpose_dims, old_batch_dim);
const int64_t new_space_dim = new_spatial_dims[0];
const int64_t old_batch_size = producer->shape().dimensions(old_batch_dim);
const int64_t new_batch_size = old_batch_size * ctrl_.number_of_splits;
const int64_t high_padding =
(new_batch_size * new_producer->shape().dimensions(new_space_dim) -
old_batch_size * producer->shape().dimensions(old_space_dim)) /
old_batch_size;
auto new_consumer = PadAndSplitSpace(
consumer, new_spatial_dims, new_batch_dim, high_padding,
0, new_producer->shape().dimensions(new_space_dim),
ctrl_.number_of_splits);
return new_consumer;
}
absl::Status ConvolutionVisitor::PropagateOnBackpropFilterConv(
HloInstruction* convolution) {
auto activations_old = convolution->mutable_operand(0);
const int64_t rhs_dilation =
convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.window_dilation();
auto original_conv_dims = convolution->convolution_dimension_numbers();
std::vector<int64_t> old_split_spatial_dims(
ctrl_.dimension_from_end_to_convert),
old_split_kernel_spatial_dims(ctrl_.dimension_from_end_to_convert);
for (int i = 0; i < ctrl_.dimension_from_end_to_convert; ++i) {
old_split_spatial_dims[i] = original_conv_dims.input_spatial_dimensions(
GetFirstChosenSpatialDim(convolution) + i);
old_split_kernel_spatial_dims[i] =
original_conv_dims.kernel_spatial_dimensions(
GetFirstChosenSpatialDim(convolution) + i);
}
auto kernel_old = convolution->mutable_operand(1);
const int64_t old_kernel_split_dim_size =
kernel_old->shape().dimensions(old_split_kernel_spatial_dims[0]);
int64_t old_split_dim_size =
activations_old->shape().dimensions(old_split_spatial_dims[0]);
int64_t old_batch_dim = original_conv_dims.input_feature_dimension();
int64_t kernel_old_batch_dim =
original_conv_dims.kernel_input_feature_dimension();
const int64_t old_batch_size =
activations_old->shape().dimensions(old_batch_dim);
CHECK(old_to_new_instrs_.contains(kernel_old) ||
old_to_new_instrs_.contains(activations_old));
HloInstruction* activations_new = nullptr;
HloInstruction* kernel_new = nullptr;
bool activations_locally_space_to_batched = false;
bool kernel_locally_space_to_batched = false;
std::vector<int64_t> permute_dims_kernel, permute_dims;
if (old_to_new_instrs_.contains(activations_old)) {
activations_new = old_to_new_instrs_[activations_old];
permute_dims = instr_to_dim_permute_map_[activations_new];
}
if (old_to_new_instrs_.contains(kernel_old)) {
kernel_new = old_to_new_instrs_[kernel_old];
permute_dims_kernel = instr_to_dim_permute_map_[kernel_new];
}
if (!old_to_new_instrs_.contains(activations_old)) {
kernel_new = old_to_new_instrs_[kernel_old];
permute_dims_kernel = instr_to_dim_permute_map_[kernel_new];
VLOG(1) << "Space-to-batching activations to enable space-to-depth";
const int64_t new_kernel_space_dim =
DimLookUp(permute_dims_kernel, old_split_kernel_spatial_dims[0]);
const int64_t new_kernel_split_dim_size =
kernel_new->shape().dimensions(new_kernel_space_dim);
const int64_t needed_spatial_size =
rhs_dilation * new_kernel_split_dim_size;
const int64_t pad_size =
needed_spatial_size * ctrl_.number_of_splits - old_split_dim_size;
ConvolutionDimensionNumbers tmp_dim_numbers;
tmp_dim_numbers = original_conv_dims;
TF_ASSIGN_OR_RETURN(
auto retval, SplitSpace(activations_old, tmp_dim_numbers, old_batch_dim,
pad_size, 0,
needed_spatial_size, ctrl_.number_of_splits,
&old_split_spatial_dims,
true));
activations_new = retval.first;
std::vector<int64_t> reversed_transpose_dims(retval.second.size());
for (int64_t i = 0; i < retval.second.size(); ++i) {
reversed_transpose_dims[i] = ReverseDimLookUp(retval.second, i);
}
permute_dims = reversed_transpose_dims;
VLOG(3) << "New Activations " << retval.first->ToString();
activations_locally_space_to_batched = true;
} else if (!old_to_new_instrs_.contains(kernel_old)) {
activations_new = old_to_new_instrs_[activations_old];
permute_dims = instr_to_dim_permute_map_[activations_new];
VLOG(1) << "Space-to-batching kernel to enable space-to-depth";
const int64_t new_space_dim =
DimLookUp(permute_dims, old_split_spatial_dims[0]);
const int64_t new_split_dim_size =
activations_new->shape().dimensions(new_space_dim);
const int64_t needed_spatial_size =
CeilOfRatio(new_split_dim_size, rhs_dilation);
int64_t old_kernel_split_dim_size =
kernel_old->shape().dimensions(old_split_kernel_spatial_dims[0]);
const int64_t pad_size = needed_spatial_size * ctrl_.number_of_splits -
old_kernel_split_dim_size;
ConvolutionDimensionNumbers tmp_dim_numbers;
tmp_dim_numbers = original_conv_dims;
TF_ASSIGN_OR_RETURN(
auto retval,
SplitSpace(kernel_old, tmp_dim_numbers, kernel_old_batch_dim,
pad_size, 0,
needed_spatial_size, ctrl_.number_of_splits,
&old_split_kernel_spatial_dims,
true, true));
kernel_new = retval.first;
std::vector<int64_t> reversed_transpose_dims(retval.second.size());
for (int64_t i = 0; i < retval.second.size(); ++i) {
reversed_transpose_dims[i] = ReverseDimLookUp(retval.second, i);
}
permute_dims_kernel = reversed_transpose_dims;
VLOG(3) << "New kernel " << retval.first->ToString();
kernel_locally_space_to_batched = true;
}
CHECK_NE(activations_new, nullptr);
CHECK_NE(kernel_new, nullptr);
const int64_t new_spatial_dimension =
activations_new->shape().dimensions_size();
auto permuted_conv_dims_numbers = original_conv_dims;
int64_t activations_batch_dim =
DimLookUp(permute_dims, original_conv_dims.input_feature_dimension());
int64_t activations_feature_dim =
DimLookUp(permute_dims, original_conv_dims.input_batch_dimension());
const int64_t previous_spatial_dim_count =
original_conv_dims.input_spatial_dimensions_size();
for (int64_t i = 0; i < previous_spatial_dim_count; ++i) {
permuted_conv_dims_numbers.set_input_spatial_dimensions(
i, DimLookUp(permute_dims,
original_conv_dims.input_spatial_dimensions(i)));
permuted_conv_dims_numbers.set_kernel_spatial_dimensions(
i, DimLookUp(permute_dims_kernel,
original_conv_dims.kernel_spatial_dimensions(i)));
}
permuted_conv_dims_numbers.add_input_spatial_dimensions(
new_spatial_dimension);
permuted_conv_dims_numbers.add_kernel_spatial_dimensions(
new_spatial_dimension);
permuted_conv_dims_numbers.add_output_spatial_dimensions(
new_spatial_dimension);
const int64_t previous_chosen_spatial_dim_in_output =
permuted_conv_dims_numbers.output_spatial_dimensions(
GetFirstChosenSpatialDim(convolution));
permuted_conv_dims_numbers.set_output_spatial_dimensions(
GetFirstChosenSpatialDim(convolution), new_spatial_dimension);
permuted_conv_dims_numbers.set_output_spatial_dimensions(
previous_spatial_dim_count, previous_chosen_spatial_dim_in_output);
const int64_t kernel_input_feature_dim = DimLookUp(
permute_dims_kernel, original_conv_dims.kernel_input_feature_dimension());
const int64_t kernel_output_feature_dim =
DimLookUp(permute_dims_kernel,
original_conv_dims.kernel_output_feature_dimension());
permuted_conv_dims_numbers.set_kernel_input_feature_dimension(
kernel_input_feature_dim);
permuted_conv_dims_numbers.set_kernel_output_feature_dimension(
kernel_output_feature_dim);
std::vector<int64_t> spatial_dimensions_to_split(
ctrl_.count_of_dimensions_to_convert);
const int64_t first_dim_to_split = GetFirstChosenSpatialDim(convolution);
for (int64_t i = 0; i < ctrl_.count_of_dimensions_to_convert; ++i) {
spatial_dimensions_to_split[i] =
permuted_conv_dims_numbers.input_spatial_dimensions(first_dim_to_split +
i);
}
const int64_t kernel_spatial_dimension_to_split =
permuted_conv_dims_numbers.kernel_spatial_dimensions(
GetFirstChosenSpatialDim(convolution));
int64_t new_split_dim_size =
activations_new->shape().dimensions(spatial_dimensions_to_split[0]);
const int64_t kernel_new_split_dim_size =
kernel_new->shape().dimensions(kernel_spatial_dimension_to_split);
permuted_conv_dims_numbers.set_input_batch_dimension(activations_feature_dim);
permuted_conv_dims_numbers.set_input_feature_dimension(activations_batch_dim);
VLOG(1) << "Propagating on conv activations_batch_dim "
<< activations_batch_dim << " spatial_dimension_to_split "
<< spatial_dimensions_to_split[0] << " old_batch_size "
<< old_batch_size << " new_split_dim_size " << new_split_dim_size;
TF_ASSIGN_OR_RETURN(
auto retval,
BringSpaceNextToBatch(activations_new, permuted_conv_dims_numbers,
activations_batch_dim, &spatial_dimensions_to_split,
true));
int64_t spatial_dimension_to_split = spatial_dimensions_to_split[0];
std::vector<int64_t> transpose_dims = retval.transpose_dims;
CHECK(!transpose_dims.empty());
activations_new = retval.instr;
VLOG(1) << "Activations_new post BringSpaceNextToBatch "
<< activations_new->ToString();
VLOG(1) << "activations_batch_dim " << activations_batch_dim
<< " activations_feature_dim " << activations_feature_dim;
const int64_t expected_split_dim_size =
rhs_dilation * kernel_new_split_dim_size;
if (new_split_dim_size != expected_split_dim_size) {
CHECK_LT(new_split_dim_size, expected_split_dim_size);
new_split_dim_size = expected_split_dim_size;
TF_ASSIGN_OR_RETURN(
activations_new,
ChangeSpatialSizeOnSpaceToBatchedShape(
activations_new, activations_batch_dim, old_batch_size,
spatial_dimensions_to_split, new_split_dim_size, true));
}
spatial_dimension_to_split = spatial_dimensions_to_split[0];
auto select_val = computation_->AddInstruction(
HloInstruction::CreateConstant(
LiteralUtil::Zero(activations_new->shape().element_type())),
&activations_new->metadata(), &activations_new->frontend_attributes());
if (!activations_locally_space_to_batched) {
TF_ASSIGN_OR_RETURN(
activations_new,
SelectValidPortion(activations_new, activations_old, select_val,
activations_batch_dim, spatial_dimensions_to_split,
old_batch_dim, old_split_spatial_dims));
}
if (!kernel_locally_space_to_batched) {
VLOG(3) << "Selecting the valid kernel area";
std::vector<int64_t> new_kernel_split_spatial_dims(
ctrl_.dimension_from_end_to_convert);
new_kernel_split_spatial_dims[0] = kernel_spatial_dimension_to_split;
TF_ASSIGN_OR_RETURN(
kernel_new,
SelectValidPortion(kernel_new, kernel_old, select_val,
kernel_input_feature_dim,
new_kernel_split_spatial_dims,
original_conv_dims.kernel_input_feature_dimension(),
old_split_kernel_spatial_dims));
}
auto new_dim_numbers = permuted_conv_dims_numbers;
VLOG(2) << "New dim numbers " << new_dim_numbers.DebugString();
const int64_t inherent_low_padding =
convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.padding_low();
const int64_t inherent_high_padding =
convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.padding_high();
std::vector<HloInstruction*> activations_chunks;
for (int64_t i = 0; i < inherent_low_padding; ++i) {
HloInstruction* activations_to_use = nullptr;
if (i == 0) {
activations_to_use = activations_new;
} else {
activations_to_use = activations_chunks.back();
}
TF_ASSIGN_OR_RETURN(
HloInstruction * activations_slice,
HaloDuplicateWithSlice(activations_to_use, spatial_dimensions_to_split,
activations_batch_dim, 1,
0));
activations_chunks.push_back(activations_slice);
}
absl::c_reverse(activations_chunks);
const int64_t expanded_kernel =
old_kernel_split_dim_size * rhs_dilation - (rhs_dilation - 1);
const int64_t overlap_count =
old_split_dim_size - expanded_kernel + 1 +
(inherent_low_padding < 0 ? inherent_low_padding : 0) +
(inherent_high_padding < 0 ? inherent_high_padding : 0);
VLOG(1) << "overlap_count " << overlap_count << " inherent_low_padding "
<< inherent_low_padding << " inherent_high_padding "
<< inherent_high_padding;
const int64_t total_overlap_count =
overlap_count + (inherent_low_padding > 0 ? inherent_low_padding : 0) +
(inherent_high_padding > 0 ? inherent_high_padding : 0);
for (int64_t i = 0; i < overlap_count; ++i) {
HloInstruction* activations_to_use = nullptr;
HloInstruction* activations_slice = nullptr;
if (i == 0) {
activations_to_use = activations_new;
if (inherent_low_padding < 0) {
TF_ASSIGN_OR_RETURN(
activations_slice,
HaloDuplicateWithSlice(
activations_to_use, spatial_dimensions_to_split,
activations_batch_dim,
inherent_low_padding, 0));
} else {
activations_slice = activations_to_use;
}
} else {
activations_to_use = activations_chunks.back();
TF_ASSIGN_OR_RETURN(activations_slice,
HaloDuplicateWithSlice(
activations_to_use, spatial_dimensions_to_split,
activations_batch_dim, -1,
0));
}
activations_chunks.push_back(activations_slice);
}
int64_t high_padding_to_materialize = 0;
if (inherent_high_padding > 0) {
high_padding_to_materialize =
std::max(total_overlap_count -
(std::max(overlap_count, static_cast<int64_t>(0)) +
std::max(inherent_low_padding, static_cast<int64_t>(0))),
static_cast<int64_t>(0));
}
for (int64_t i = 0; i < high_padding_to_materialize; ++i) {
HloInstruction* activations_to_use = nullptr;
activations_to_use = activations_chunks.back();
TF_ASSIGN_OR_RETURN(
HloInstruction * activations_slice,
HaloDuplicateWithSlice(activations_to_use, spatial_dimensions_to_split,
activations_batch_dim,
-1, 0));
activations_chunks.push_back(activations_slice);
}
for (int64_t i = 0; i < activations_chunks.size(); ++i) {
std::vector<int64_t> input_sizes(
activations_chunks[i]->shape().dimensions().begin(),
activations_chunks[i]->shape().dimensions().end());
input_sizes.push_back(1);
TF_ASSIGN_OR_RETURN(activations_chunks[i],
MakeReshapeHlo(input_sizes, activations_chunks[i]));
VLOG(1) << "new_spatial_dimension " << new_spatial_dimension << " slice "
<< activations_chunks[i]->ToString();
}
TF_ASSIGN_OR_RETURN(
activations_new,
MakeConcatHlo(absl::MakeSpan(activations_chunks), new_spatial_dimension,
&activations_old->metadata(),
&activations_old->frontend_attributes()));
std::vector<int64_t> kernel_sizes(kernel_new->shape().dimensions().begin(),
kernel_new->shape().dimensions().end());
kernel_sizes.push_back(1);
TF_ASSIGN_OR_RETURN(kernel_new, MakeReshapeHlo(kernel_sizes, kernel_new));
auto new_window = convolution->window();
new_window.mutable_dimensions(GetFirstChosenSpatialDim(convolution))
->set_padding_high(-(rhs_dilation - 1));
new_window.mutable_dimensions(GetFirstChosenSpatialDim(convolution))
->set_padding_low(0);
new_window.mutable_dimensions(GetFirstChosenSpatialDim(convolution))
->set_size(CeilOfRatio(new_split_dim_size, rhs_dilation));
auto window_dim = new_window.add_dimensions();
window_dim->set_base_dilation(1);
window_dim->set_size(1);
int64_t stride = 1;
if (inherent_low_padding > total_overlap_count) {
stride = activations_chunks.size();
}
window_dim->set_stride(stride);
window_dim->set_padding_low(0);
window_dim->set_padding_high(0);
window_dim->set_window_reversal(false);
window_dim->set_window_dilation(1);
TF_ASSIGN_OR_RETURN(
HloInstruction * new_conv,
MakeConvolveHlo(
activations_new, kernel_new, convolution->feature_group_count(),
convolution->batch_group_count(), new_window, new_dim_numbers,
convolution->precision_config(),
convolution->shape().element_type()));
convolution->SetupDerivedInstruction(new_conv);
VLOG(2) << "New backprop filter convolution " << new_conv->ToString();
std::vector<int64_t> output_sizes(new_conv->shape().dimensions().begin(),
new_conv->shape().dimensions().end());
output_sizes.erase(output_sizes.begin() +
new_dim_numbers.output_spatial_dimensions(
GetFirstChosenSpatialDim(convolution)));
TF_ASSIGN_OR_RETURN(new_conv, MakeReshapeHlo(output_sizes, new_conv));
old_to_new_instrs_[convolution] = new_conv;
VLOG(1) << "Space-to-featured convolution " << new_conv->ToString();
std::vector<int64_t> dim_map(kNumMappedDims);
dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] =
original_conv_dims.output_batch_dimension();
dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] =
original_conv_dims.output_feature_dimension();
dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] =
original_conv_dims.output_spatial_dimensions(
GetFirstChosenSpatialDim(convolution));
instr_to_dim_map_[convolution] = dim_map;
std::vector<int64_t> trans_dims(convolution->shape().dimensions_size());
absl::c_iota(trans_dims, 0);
instr_to_dim_permute_map_[new_conv] = trans_dims;
return absl::OkStatus();
}
HloInstruction*
ConvolutionVisitor::DoesConvolutionFeedReduceWindowOrSelectAndScatter(
HloInstruction* instr, int64_t depth = kReduceWindowSearchDepth) {
if (depth == 0) {
return nullptr;
}
for (auto user : instr->users()) {
if (user->opcode() == HloOpcode::kReduceWindow ||
user->opcode() == HloOpcode::kSelectAndScatter) {
return user;
}
if (user->opcode() == HloOpcode::kConvolution ||
user->opcode() == HloOpcode::kPad ||
user->opcode() == HloOpcode::kTranspose ||
user->opcode() == HloOpcode::kDot) {
continue;
}
auto ret =
DoesConvolutionFeedReduceWindowOrSelectAndScatter(user, depth - 1);
if (ret != nullptr) {
return ret;
}
}
return nullptr;
}
bool ConvolutionVisitor::DoesConvolutionFeedUnpropagatableOp(
HloInstruction* instr, int64_t depth) {
auto key = std::make_pair(instr, depth);
if (unpropagatability_cache_.contains(key)) {
return unpropagatability_cache_[key];
}
if (depth == 0 || instr->user_count() == 0) {
unpropagatability_cache_[key] = false;
return false;
}
for (auto user : instr->users()) {
if (IsOpcodeNonPropagatable(user)) {
unpropagatability_cache_[key] = true;
return true;
}
int64_t depth_to_use = depth;
if (user->opcode() == HloOpcode::kConvolution ||
user->opcode() == HloOpcode::kDot) {
depth_to_use--;
}
if (DoesConvolutionFeedUnpropagatableOp(user, depth_to_use)) {
unpropagatability_cache_[key] = true;
return true;
}
}
unpropagatability_cache_[key] = false;
return false;
}
bool ConvolutionVisitor::IsSpaceToBatchedSpaceSizeSuitable(
HloInstruction* instr) {
CHECK(instr->opcode() == HloOpcode::kSelectAndScatter ||
instr->opcode() == HloOpcode::kReduceWindow);
auto old_producer = instr->mutable_operand(0);
auto dim_map_val_op = instr_to_dim_map_[old_producer];
const int64_t old_space_dim =
dim_map_val_op[DimMapper(SpaceToBatchDimMap::kSpace0)];
auto first_operand = old_to_new_instrs_[old_producer];
auto permute_dims_first_operand = instr_to_dim_permute_map_[first_operand];
const int64_t new_space_dim =
DimLookUp(permute_dims_first_operand, old_space_dim);
const int64_t window_size = instr->window().dimensions(old_space_dim).size();
if (first_operand->shape().dimensions(new_space_dim) < window_size) {
return false;
}
return true;
}
ConvolutionVisitor::ConvDetails ConvolutionVisitor::GetConvolutionDetails(
HloInstruction* convolution, ConvolutionDimensionNumbers& dim_numbers) {
auto activations = convolution->mutable_operand(0);
auto kernel = convolution->mutable_operand(1);
const auto& kernel_shape = kernel->shape();
const int64_t kernel_spatial_dim = dim_numbers.kernel_spatial_dimensions(
GetFirstChosenSpatialDim(convolution));
int64_t kernel_spatial_dim_size = kernel_shape.dimensions(kernel_spatial_dim);
if (IsForwardWindowDilatedConv(convolution, dim_numbers)) {
const int64_t window_dilation_factor =
convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.window_dilation();
kernel_spatial_dim_size =
(kernel_spatial_dim_size - 1) * (window_dilation_factor - 1) +
kernel_spatial_dim_size;
}
std::vector<int64_t> spatial_dimensions_to_split =
GetChosenSpatialDims(convolution);
const int64_t spatial_dimension_to_split = spatial_dimensions_to_split[0];
const int64_t input_dim_size =
activations->shape().dimensions(spatial_dimension_to_split);
const int64_t inherent_low_padding =
convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.padding_low();
const int64_t inherent_high_padding =
convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.padding_high();
const int64_t stride = convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.stride();
const int64_t base_dilation_factor =
convolution->window()
.dimensions(GetFirstChosenSpatialDim(convolution))
.base_dilation();
bool is_base_dilated = base_dilation_factor > 1;
const int64_t spatial_size = input_dim_size +
(is_base_dilated ? 0 : inherent_low_padding) +
inherent_high_padding;
const int64_t last_overlap = base_dilation_factor == inherent_low_padding
? kernel_spatial_dim_size
: kernel_spatial_dim_size - 1;
const int64_t halo_size = is_base_dilated
? last_overlap / base_dilation_factor
: kernel_spatial_dim_size - 1;
const int64_t high_padding_for_base_dilation =
inherent_low_padding == 0 ? base_dilation_factor - 1
: last_overlap % base_dilation_factor;
const int64_t high_padding_for_conv =
is_base_dilated ? high_padding_for_base_dilation : 0;
const int64_t low_padding_for_conv =
is_base_dilated && (base_dilation_factor != inherent_low_padding)
? inherent_low_padding
: 0;
return ConvDetails{spatial_dimensions_to_split,
inherent_low_padding,
inherent_high_padding,
stride,
spatial_size,
base_dilation_factor,
halo_size,
high_padding_for_conv,
low_padding_for_conv,
kernel_spatial_dim_size,
input_dim_size};
}
absl::Status ConvolutionVisitor::PerformSpaceToBatchOnConvolution(
HloInstruction* convolution) {
if (!ConsumeFuel("space-to-batch-converter", [&] {
return "Skipping space-to-batch propagation because fuel over\n";
})) {
return absl::OkStatus();
}
VLOG(1) << "Handling conv " << convolution->ToString();
ConvolutionDimensionNumbers dim_numbers =
convolution->convolution_dimension_numbers();
ConvDetails c = GetConvolutionDetails(convolution, dim_numbers);
int64_t activations_batch_dim = dim_numbers.input_batch_dimension();
auto activations = convolution->mutable_operand(0);
VLOG(1) << "spatial size " << c.spatial_size;
if (c.spatial_size < 2 * ctrl_.number_of_splits) {
return absl::OkStatus();
}
auto original_conv = convolution;
const int64_t output_spatial_dim = dim_numbers.output_spatial_dimensions(
GetFirstChosenSpatialDim(convolution));
const int64_t output_offsets =
convolution->shape().dimensions(output_spatial_dim);
const int64_t output_offsets_per_split =
CeilOfRatio(output_offsets, ctrl_.number_of_splits);
int64_t spatial_split_size =
CeilOfRatio(output_offsets_per_split, c.base_dilation_factor) * c.stride;
while (spatial_split_size * ctrl_.number_of_splits - c.spatial_size < 0) {
spatial_split_size += c.stride;
}
auto reduce_window_or_select_and_scatter =
DoesConvolutionFeedReduceWindowOrSelectAndScatter(convolution);
if (reduce_window_or_select_and_scatter != nullptr &&
reduce_window_or_select_and_scatter->shape().IsArray() &&
reduce_window_or_select_and_scatter->shape().rank() ==
convolution->shape().rank()) {
VLOG(2)
<< "DoesConvolutionFeedReduceWindowOrSelectAndScatter returned true";
const int64_t win_stride =
std::max(reduce_window_or_select_and_scatter->window()
.dimensions(output_spatial_dim)
.stride(),
static_cast<int64_t>(1));
CHECK_NE(win_stride, 0)
<< "Bad op " << reduce_window_or_select_and_scatter->ToString();
CHECK_NE(c.stride, 0) << "Bad op " << convolution->ToString();
while ((spatial_split_size / c.stride) % win_stride != 0) {
spatial_split_size += c.stride;
}
}
const int64_t slice_size = spatial_split_size + c.halo_size;
const int64_t low_pad_to_handle_base_dilation =
(c.base_dilation_factor > 1 &&
c.base_dilation_factor == c.inherent_low_padding)
? 1
: 0;
int64_t pad_size =
spatial_split_size * ctrl_.number_of_splits - c.spatial_size;
bool handle_low_pad_in_first_reshape = false;
if (pad_size > low_pad_to_handle_base_dilation) {
pad_size -= low_pad_to_handle_base_dilation;
handle_low_pad_in_first_reshape = true;
}
VLOG(1) << "spatial_split_size " << spatial_split_size << " stride "
<< c.stride << " slice_size " << slice_size;
VLOG(1) << "spatial_dimension_to_split " << c.spatial_dimensions_to_split[0]
<< " num_splits " << ctrl_.number_of_splits
<< " kernel_spatial_dim_size " << c.kernel_spatial_dim_size;
std::vector<int64_t> spatial_dimensions_to_split =
c.spatial_dimensions_to_split;
TF_ASSIGN_OR_RETURN(
auto retval,
SplitSpace(
activations, dim_numbers, activations_batch_dim,
c.inherent_high_padding + pad_size,
c.base_dilation_factor == 1 ? c.inherent_low_padding
: handle_low_pad_in_first_reshape ? low_pad_to_handle_base_dilation
: 0,
spatial_split_size, ctrl_.number_of_splits,
&spatial_dimensions_to_split));
HloInstruction* batch_increased_reshape = retval.first;
convolution->SetupDerivedInstruction(batch_increased_reshape);
VLOG(1) << "First reshape done " << batch_increased_reshape->ToString();
TF_ASSIGN_OR_RETURN(
activations,
HaloDuplicateWithSlice(
batch_increased_reshape, spatial_dimensions_to_split,
activations_batch_dim,
handle_low_pad_in_first_reshape ? 0 : low_pad_to_handle_base_dilation,
c.halo_size));
VLOG(1) << "Batch merge done " << activations->ToString();
auto new_dim_numbers = dim_numbers;
const int64_t rank = convolution->shape().rank();
std::vector<int64_t> transpose_dims(rank);
int dim_count = 0;
std::map<int64_t, int64_t> dim_translator;
for (int j = 0; j < dim_numbers.output_spatial_dimensions_size(); ++j) {
if (j == GetFirstChosenSpatialDim(convolution)) {
dim_translator[dim_numbers.output_batch_dimension()] = dim_count;
new_dim_numbers.set_output_batch_dimension(dim_count++);
}
dim_translator[dim_numbers.output_spatial_dimensions(j)] = dim_count;
new_dim_numbers.set_output_spatial_dimensions(j, dim_count);
dim_count++;
}
dim_translator[dim_numbers.output_feature_dimension()] = dim_count;
new_dim_numbers.set_output_feature_dimension(dim_count);
int p = 0;
for (const auto& entry : dim_translator) {
transpose_dims[p] = entry.second;
p++;
}
VLOG(1) << "New dim numbers " << new_dim_numbers.DebugString()
<< " batch dim " << new_dim_numbers.input_batch_dimension();
auto new_window = convolution->window();
const int64_t first_dim = GetFirstChosenSpatialDim(convolution);
for (int i = 0; i < ctrl_.count_of_dimensions_to_convert; ++i) {
new_window.mutable_dimensions(first_dim + i)
->set_padding_high(c.high_padding_for_conv);
new_window.mutable_dimensions(first_dim + i)
->set_padding_low(c.low_padding_for_conv);
}
TF_ASSIGN_OR_RETURN(
HloInstruction * new_conv,
MakeConvolveHlo(
activations, convolution->mutable_operand(1),
convolution->feature_group_count(), convolution->batch_group_count(),
new_window, new_dim_numbers, convolution->precision_config(),
convolution->shape().element_type(),
&convolution->metadata(), &convolution->frontend_attributes()));
convolution->SetupDerivedInstruction(new_conv);
batch_to_space_map_[convolution->mutable_operand(0)] =
convolution->mutable_operand(0);
VLOG(1) << "Space-to-batched convolution " << new_conv->ToString();
std::vector<int64_t> new_output_split_spatial_dims(
ctrl_.count_of_dimensions_to_convert),
old_output_split_spatial_dims(ctrl_.count_of_dimensions_to_convert);
for (int i = 0; i < ctrl_.count_of_dimensions_to_convert; ++i) {
old_output_split_spatial_dims[i] =
dim_numbers.output_spatial_dimensions(first_dim + i);
new_output_split_spatial_dims[i] =
new_dim_numbers.output_spatial_dimensions(first_dim + i);
}
const int64_t output_batch_dim = new_dim_numbers.output_batch_dimension();
auto select_val = computation_->AddInstruction(
HloInstruction::CreateConstant(
LiteralUtil::Zero(new_conv->shape().element_type())),
&convolution->metadata(), &convolution->frontend_attributes());
TF_ASSIGN_OR_RETURN(
new_conv,
SelectValidPortion(new_conv, original_conv, select_val, output_batch_dim,
new_output_split_spatial_dims,
dim_numbers.output_batch_dimension(),
old_output_split_spatial_dims));
old_to_new_instrs_[original_conv] = new_conv;
std::vector<int64_t> dim_map(kNumMappedDims);
dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] =
dim_numbers.output_batch_dimension();
dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] =
dim_numbers.output_feature_dimension();
dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] =
dim_numbers.output_spatial_dimensions(
GetFirstChosenSpatialDim(convolution));
instr_to_dim_map_[original_conv] = dim_map;
instr_to_dim_permute_map_[new_conv] = std::vector<int64_t>(transpose_dims);
if (non_propagatable_instrs_.count(convolution) > 0) {
non_propagatable_instrs_.erase(convolution);
}
TF_CHECK_OK(PropagateOnUsers(original_conv));
return absl::OkStatus();
}
}
absl::StatusOr<bool> SpaceToBatchConverter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
2, "SpaceToBatchConverter::Run(), before:\n" + module->ToString());
bool changed = false;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
ConvolutionVisitor visitor(ctrl_, comp);
if (visitor.Run().value()) {
changed = true;
}
VLOG(1) << "Done operating on computation";
}
XLA_VLOG_LINES(2,
"SpaceToBatchConverter::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/space_to_batch_converter.h"
#include <memory>
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
namespace xla {
namespace {
using SpaceToBatchConverterTest = HloTestBase;
namespace op = testing::opcode_matchers;
TEST_F(SpaceToBatchConverterTest, SimpleBatch1) {
std::string hlo_string = R"(
HloModule module
ENTRY computation {
%p0 = bf16[1,258,258,32] parameter(0)
%p1 = bf16[3,3,32,32] parameter(1)
ROOT %convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3},
dim_labels=b01f_01io->b01f
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Transpose());
EXPECT_THAT(root->operand(0), op::Slice());
auto reshape = root->operand(0)->operand(0);
EXPECT_THAT(reshape, op::Reshape());
auto previous_reshape = reshape->operand(0);
EXPECT_THAT(previous_reshape, op::Reshape());
EXPECT_THAT(previous_reshape->operand(0)->operand(1), op::Convolution());
const int64_t batch_dim = previous_reshape->operand(0)
->operand(1)
->convolution_dimension_numbers()
.output_batch_dimension();
EXPECT_GT(previous_reshape->operand(0)->shape().dimensions(batch_dim), 1);
}
TEST_F(SpaceToBatchConverterTest, SimpleBatch1ConvXpose) {
std::string hlo_string = R"(
HloModule module
ENTRY computation {
%p0 = bf16[1,258,258,32] parameter(0)
%p1 = bf16[3,3,32,32] parameter(1)
%convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3},
dim_labels=b01f_01io->b01f
ROOT tr = bf16[1,256,256,32] transpose(%convolution), dimensions={0,2,1,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Transpose());
EXPECT_THAT(root->operand(0), op::Slice());
auto reshape = root->operand(0)->operand(0);
EXPECT_THAT(reshape, op::Reshape());
auto previous_reshape = reshape->operand(0);
EXPECT_THAT(previous_reshape, op::Reshape());
EXPECT_THAT(previous_reshape->operand(0), op::Select());
EXPECT_THAT(previous_reshape->operand(0)->operand(1), op::Convolution());
}
TEST_F(SpaceToBatchConverterTest, SimpleBatch1WithReduceWindow) {
std::string hlo_string = R"(
HloModule module
adder (lhs: bf16[], rhs: bf16[]) -> bf16[] {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
ENTRY computation {
%p0 = bf16[1,258,258,32] parameter(0)
%p1 = bf16[3,3,32,32] parameter(1)
%convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3},
dim_labels=b01f_01io->b01f
%constant = bf16[3] constant({1.0, 2.0, 3.0})
%tuple = (bf16[1,256,256,32], bf16[3])tuple(%convolution, %constant)
ROOT %gte = bf16[1,256,256,32] get-tuple-element(%tuple), index=0
%gte2 = bf16[3]get-tuple-element(%tuple), index=1
%init = bf16[] constant(1.0)
%reduce-window = bf16[3] reduce-window(bf16[3] %gte2, bf16[] %init),
window={size=1}, to_apply=%adder
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
}
TEST_F(SpaceToBatchConverterTest, SimpleBatch2) {
std::string hlo_string = R"(
HloModule module
ENTRY computation {
%p0 = bf16[2,258,258,32] parameter(0)
%p1 = bf16[3,3,32,32] parameter(1)
ROOT %convolution = bf16[2,256,256,32] convolution(%p0, %p1), window={size=3x3},
dim_labels=b01f_01io->b01f
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 1});
ASSERT_FALSE(converter.Run(module.get()).value());
}
TEST_F(SpaceToBatchConverterTest, UnpropagatableOp) {
std::string hlo_string = R"(
HloModule module
ENTRY comp {
%reduce-window = bf16[1,76,76,64]{3,2,1,0} parameter(0)
%convert.13 = bf16[3,3,64,64]{3,2,1,0} parameter(1)
%convolution.1 = bf16[64,76,76,1]{0,2,1,3} convolution(
%reduce-window, %convert.13), window={size=3x3 pad=1_1x1_1},
dim_labels=b01f_01io->f01b
ROOT custom-call.5079 = bf16[64,152,152,1]{0,2,1,3} custom-call(%convolution.1),
custom_call_target="ResizeNearest"
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 1});
ASSERT_FALSE(converter.Run(module.get()).value());
}
TEST_F(SpaceToBatchConverterTest, Batch1WithStrideAndPad) {
std::string hlo_string = R"(
HloModule module
ENTRY computation {
%p0 = bf16[1,224,224,3]{3,2,1,0} parameter(0)
%p1 = bf16[7,7,3,64]{3,2,1,0} parameter(1)
ROOT %convolution.3 = bf16[1,112,112,64]{3,2,1,0} convolution(%p0, %p1),
window={size=7x7 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 4});
ASSERT_TRUE(converter.Run(module.get()).value());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Transpose());
EXPECT_THAT(root->operand(0), op::Slice());
auto reshape = root->operand(0)->operand(0);
EXPECT_THAT(reshape, op::Reshape());
auto previous_reshape = reshape->operand(0);
EXPECT_THAT(previous_reshape, op::Reshape());
EXPECT_THAT(previous_reshape->operand(0)->operand(1), op::Convolution());
const int64_t batch_dim = previous_reshape->operand(0)
->operand(1)
->convolution_dimension_numbers()
.output_batch_dimension();
EXPECT_GT(previous_reshape->operand(0)->shape().dimensions(batch_dim), 4);
}
TEST_F(SpaceToBatchConverterTest, Batch1WithBaseDilation) {
std::string hlo_string = R"(
HloModule module
ENTRY computation {
%p2 = bf16[1,28,28,128]{3,0,2,1} parameter(0)
%p3 = bf16[1,1,512,128]{3,2,1,0} parameter(1)
ROOT %c = bf16[1,56,56,512]{3,0,2,1} convolution(%p2, %p3),
window={size=1x1 pad=0_1x0_1 lhs_dilate=2x2 rhs_reversal=1x1},
dim_labels=b01f_01oi->b01f
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Transpose());
EXPECT_THAT(root->operand(0), op::Slice());
auto reshape = root->operand(0)->operand(0);
EXPECT_THAT(reshape, op::Reshape());
auto previous_reshape = reshape->operand(0);
EXPECT_THAT(previous_reshape, op::Reshape());
EXPECT_THAT(previous_reshape->operand(0)->operand(1), op::Convolution());
const int64_t batch_dim = previous_reshape->operand(0)
->operand(1)
->convolution_dimension_numbers()
.output_batch_dimension();
EXPECT_GT(previous_reshape->operand(0)->shape().dimensions(batch_dim), 4);
}
TEST_F(SpaceToBatchConverterTest, PropagateThroughDot) {
std::string hlo_string = R"(
HloModule module
ENTRY computation {
%p0 = bf16[1,258,258,32] parameter(0)
%p1 = bf16[3,3,32,32] parameter(1)
%convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3},
dim_labels=b01f_01io->b01f
%p2 = bf16[32,32] parameter(2)
ROOT %dot.5010 = bf16[1,256,256,32] dot(%convolution, %p2),
lhs_contracting_dims={3},
rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
}
TEST_F(SpaceToBatchConverterTest, PropagateOnTrivialReduce) {
std::string hlo_string = R"(
HloModule module
%region_1.37 (Arg_0.38: f32[], Arg_1.39: f32[]) -> f32[] {
%Arg_0.38 = f32[] parameter(0)
%Arg_1.39 = f32[] parameter(1)
ROOT %add.40 = f32[] add(f32[] %Arg_0.38, f32[] %Arg_1.39)
}
ENTRY computation {
%p0 = bf16[7,320,800,3]{3,2,1,0} parameter(0)
%p1 = bf16[3,3,3,32]{3,2,1,0} parameter(1)
%c = f32[7,160,400,32]{3,2,1,0} convolution( %p0, %p1),
window={size=3x3 stride=2x2 pad=0_1x0_1}, dim_labels=b01f_01io->b01f
%constant.5 = f32[] constant(0)
ROOT %reduce.41 = f32[7,160,400]{2,1,0} reduce(%c, %constant.5), dimensions={3}, to_apply=%region_1.37
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Transpose());
EXPECT_THAT(root->operand(0)->operand(0)->operand(0)->operand(0),
op::Reduce());
auto new_reduce = root->operand(0)->operand(0)->operand(0)->operand(0);
EXPECT_EQ(new_reduce->shape().dimensions(1),
7 * 8);
}
TEST_F(SpaceToBatchConverterTest, DoNotPropagateOnTupleReduce) {
std::string hlo_string = R"(
HloModule module
%minmax_func.2717 {
%lhs_value.2718 = f32[] parameter(0)
%rhs_value.2720 = f32[] parameter(2)
%compare.2722 = pred[] compare(f32[] %lhs_value.2718, f32[] %rhs_value.2720), direction=GE
%select.2723 = f32[] select(pred[] %compare.2722, f32[] %lhs_value.2718, f32[] %rhs_value.2720)
%compare.2725 = pred[] compare(f32[] %lhs_value.2718, f32[] %rhs_value.2720), direction=EQ
%lhs_index.2719 = f32[] parameter(1)
%rhs_index.2721 = f32[] parameter(3)
%minimum.2726 = f32[] minimum(f32[] %lhs_index.2719, f32[] %rhs_index.2721)
%select.2724 = f32[] select(pred[] %compare.2722, f32[] %lhs_index.2719, f32[] %rhs_index.2721)
%select.2727 = f32[] select(pred[] %compare.2725, f32[] %minimum.2726, f32[] %select.2724)
ROOT %tuple.4 = (f32[], f32[]) tuple(f32[] %select.2723, f32[] %select.2727)
}
ENTRY computation {
%p0 = bf16[7,320,800,3]{3,2,1,0} parameter(0)
%p1 = bf16[3,3,3,32]{3,2,1,0} parameter(1)
%c = f32[7,160,400,32]{3,2,1,0} convolution( %p0, %p1),
window={size=3x3 stride=2x2 pad=0_1x0_1}, dim_labels=b01f_01io->b01f
%constant.5 = f32[] constant(0)
%constant.6 = f32[] constant(1)
ROOT %reduce.36 = (f32[7,160,400]{2,1,0}, f32[7,160,400]{2,1,0}) reduce(%c, %c,
%constant.5, %constant.6), dimensions={3}, to_apply=%minmax_func.2717
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Reduce());
}
TEST_F(SpaceToBatchConverterTest, ReduceDegenerateDim) {
std::string hlo_string = R"(
HloModule module
%region_42.4982 {
%Arg_0.38 = f32[] parameter(0)
%Arg_1.39 = f32[] parameter(1)
ROOT %add.40 = f32[] add(f32[] %Arg_0.38, f32[] %Arg_1.39)
}
ENTRY computation {
%p0 = f32[2,1,84,84,3]{4,3,2,1,0} parameter(0)
%p1 = f32[3,3,3,3,32]{4,3,2,1,0} parameter(1)
%constant.10559 = f32[] constant(0)
%convolution.98 = f32[2,1,84,84,32]{4,3,2,1,0} convolution(%p0, %p1),
window={size=3x3x3 pad=1_1x1_1x1_1}, dim_labels=b012f_012io->b012f
ROOT %reduce.2606 = f32[2,84,84]{2,1,0} reduce(f32[2,1,84,84,32]{4,3,2,1,0}
%convolution.98, f32[] %constant.10559), dimensions={1,4}, to_apply=%region_42.4982
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Transpose());
EXPECT_THAT(root->operand(0), op::Slice());
}
TEST_F(SpaceToBatchConverterTest, PropagateOnReduce) {
std::string hlo_string = R"(
HloModule xla_computation_unknown.14
region_0.134 {
Arg_0.135 = f32[] parameter(0)
Arg_1.136 = f32[] parameter(1)
ROOT add.137 = f32[] add(Arg_0.135, Arg_1.136)
}
ENTRY main.140 {
p0 = bf16[1,512,32,128]{3,2,1,0} parameter(0)
p1 = f32[3,3,128,128]{3,2,1,0} parameter(1)
%convolution.755 = f32[1,512,32,128]{3,2,1,0}
convolution(p0, p1),
window={size=3x3 pad=1_1x1_1 rhs_reversal=1x1}, dim_labels=b01f_01oi->b01f
%constant.19458 = f32[] constant(0)
ROOT %reduce.1354 = f32[128]{0} reduce(%convolution.755, %constant.19458),
dimensions={0,1,2}, to_apply=%region_0.134
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
SpaceToBatchConverter converter(
SpaceToBatchController{true, true, true, true, 8});
ASSERT_TRUE(converter.Run(module.get()).value());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Reduce());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/space_to_batch_converter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/space_to_batch_converter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b5a9efe7-04e9-4757-ada9-15a588372915 | cpp | tensorflow/tensorflow | host_offload_utils | tensorflow/core/profiler/utils/host_offload_utils.cc | third_party/xla/xla/service/host_offload_utils_test.cc | #include "tensorflow/core/profiler/utils/host_offload_utils.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/timespan.h"
#include "tensorflow/core/profiler/utils/trace_utils.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
bool HostOffloadEventProcessor::IsHostOffloadOpName(
const XEventVisitor& event) const {
static constexpr absl::string_view keywords[] = {"copy-start",
"copy-done",
"dynamic-slice-start",
"dynamic-slice-done",
"dynamic-update-slice-start",
"dynamic-update-slice-done"};
for (const auto& keyword : keywords) {
if (absl::StrContains(event.DisplayName(), keyword) &&
absl::StrContains(event.Name(), host_memory_label_)) {
return true;
}
}
return false;
}
std::string HostOffloadEventProcessor::GetOffloadInstructionID(
absl::string_view op_name) const {
std::vector<std::string> op_name_vec = absl::StrSplit(op_name, '.');
if (op_name_vec.size() < 2) {
return "0";
}
return op_name_vec.back();
}
std::string HostOffloadEventProcessor::GetOffloadInstructionName(
absl::string_view op_name) const {
std::string display_id = GetOffloadInstructionID(op_name);
size_t startPos = op_name.find("-start");
size_t donePos = op_name.find("-done");
absl::string_view display_opname;
if (startPos != absl::string_view::npos) {
display_opname = op_name.substr(0, startPos);
} else if (donePos != absl::string_view::npos) {
display_opname = op_name.substr(0, donePos);
} else {
LOG(WARNING) << "Invalid op name: " << op_name;
display_opname = op_name;
}
return absl::StrCat("offload-", display_opname, ".", display_id);
}
void HostOffloadEventProcessor::ProcessHostOffloadOpEvent(
const XEventVisitor& event, std::optional<int64_t> group_id) {
std::string display_opname = GetOffloadInstructionName(event.DisplayName());
auto [iter, inserted] = seen_events_.try_emplace(display_opname);
std::queue<const XEventVisitor*>& events = iter->second;
if (absl::StrContains(event.DisplayName(), "-start")) {
events.push(&event);
return;
} else if (absl::StrContains(event.DisplayName(), "-done")) {
if (events.empty()) {
LOG(INFO) << "No corresponding start event found for "
<< event.DisplayName();
return;
}
const XEventVisitor* start_event = events.front();
events.pop();
tsl::profiler::Timespan event_span = tsl::profiler::Timespan::FromEndPoints(
start_event->GetTimespan().begin_ps(), event.GetTimespan().end_ps());
int line_builder_index = -1;
uint64_t minimum_end_time_frontier = event_span.begin_ps();
for (int i = 0; i < host_offload_op_line_builders_.size(); ++i) {
if (host_offload_op_line_builders_[i].event_end_time_frontier_ns <=
minimum_end_time_frontier) {
line_builder_index = i;
minimum_end_time_frontier =
host_offload_op_line_builders_[i].event_end_time_frontier_ns;
}
}
constexpr int kMaxHostOffloadOpLinesSize =
kThreadIdHostOffloadOpEnd - kThreadIdHostOffloadOpStart + 1;
if (line_builder_index == -1) {
if (host_offload_op_line_builders_.size() < kMaxHostOffloadOpLinesSize) {
XLineBuilder lb = plane_builder_->GetOrCreateLine(
kThreadIdHostOffloadOpStart +
host_offload_op_line_builders_.size());
lb.SetName(absl::StrFormat("%s row %d", kHostOffloadOpLineName,
host_offload_op_line_builders_.size()));
lb.SetTimestampNs(start_timestamp_ns_);
host_offload_op_line_builders_.push_back(
{std::move(lb), event_span.end_ps()});
}
line_builder_index = host_offload_op_line_builders_.size() - 1;
}
host_offload_op_line_builders_[line_builder_index]
.event_end_time_frontier_ns =
std::max(host_offload_op_line_builders_[line_builder_index]
.event_end_time_frontier_ns,
event_span.end_ps());
XEventMetadata* host_offload_copy_metadata =
plane_builder_->CreateEventMetadata();
host_offload_copy_metadata->set_display_name(display_opname);
XEventBuilder event_builder =
host_offload_op_line_builders_[line_builder_index]
.line_builder.AddEvent(*host_offload_copy_metadata);
event_builder.SetTimespan(event_span);
const XStatMetadata& async_stat = *plane_builder_->GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kIsAsync));
event_builder.AddStatValue(async_stat, 1);
}
}
}
} | #include "xla/service/host_offload_utils.h"
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace host_offload_utils {
namespace {
class HostOffloadUtilsTest : public HloTestBase {};
TEST_F(HostOffloadUtilsTest, SimpleGetSuccessorsGetPredecessorsTest) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[1,2048,2048] parameter(0)
index_param = s32[] parameter(1)
constant_f32_0 = f32[] constant(0)
constant_s32_0 = s32[] constant(0)
broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={}
offload_custom_call = f32[1,2048,2048] custom-call(data_param), custom_call_target="MoveToHost"
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(broadcast, offload_custom_call, index_param, constant_s32_0, constant_s32_0)
dynamic_slice = f32[1,2048,2048] dynamic-slice(dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
ROOT load_custom_call = f32[1,2048,2048] custom-call(dynamic_slice), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* data_param = FindInstruction(module.get(), "data_param");
ASSERT_NE(data_param, nullptr);
HloInstruction* offload_custom_call =
FindInstruction(module.get(), "offload_custom_call");
ASSERT_NE(offload_custom_call, nullptr);
TF_ASSERT_OK_AND_ASSIGN(
std::vector<InstructionAndShapeIndex> succ,
GetSuccessors(InstructionAndShapeIndex(data_param, {})));
std::vector<InstructionAndShapeIndex> expected_succ = {
InstructionAndShapeIndex(offload_custom_call, {})};
EXPECT_EQ(succ, expected_succ);
std::vector<InstructionAndShapeIndex> pred =
GetPredecessors(InstructionAndShapeIndex(offload_custom_call, {}));
std::vector<InstructionAndShapeIndex> expected_pred = {
InstructionAndShapeIndex(data_param, {})};
EXPECT_EQ(pred, expected_pred);
}
TEST_F(HostOffloadUtilsTest, ComputationGetSuccessorsGetPredecessorsTest) {
const std::string& hlo_string = R"(
HloModule my_module
other_computation {
param_0 = f32[2048] parameter(0)
param_1 = f32[2048] parameter(1)
ROOT tuple = (f32[2048], f32[2048]) tuple(param_0, param_1)
}
ENTRY main {
data_param = f32[2048] parameter(0)
other_param = f32[2048] parameter(1)
offload_custom_call = f32[2048] custom-call(data_param), custom_call_target="MoveToHost"
call = (f32[2048], f32[2048]) call(offload_custom_call, other_param), to_apply=other_computation
gte_0 = f32[2048] get-tuple-element(call), index=0
gte_1 = f32[2048] get-tuple-element(call), index=1
ROOT load_custom_call = f32[2048] custom-call(gte_0), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* call = FindInstruction(module.get(), "call");
ASSERT_NE(call, nullptr);
HloInstruction* gte_0 = FindInstruction(module.get(), "gte_0");
ASSERT_NE(gte_0, nullptr);
HloInstruction* tuple = FindInstruction(module.get(), "tuple");
ASSERT_NE(tuple, nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::vector<InstructionAndShapeIndex> succ,
GetSuccessors(InstructionAndShapeIndex(call, {0})));
std::vector<InstructionAndShapeIndex> expected_succ = {
InstructionAndShapeIndex(gte_0, {})};
EXPECT_EQ(succ, expected_succ);
std::vector<InstructionAndShapeIndex> pred =
GetPredecessors(InstructionAndShapeIndex(call, {0}));
std::vector<InstructionAndShapeIndex> expected_pred = {
InstructionAndShapeIndex(tuple, {0})};
EXPECT_EQ(pred, expected_pred);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/host_offload_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_offload_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
824e9a92-d73e-40d9-bb47-186cbd4b9da5 | cpp | tensorflow/tensorflow | host_memory_transfer_asyncifier | third_party/xla/xla/service/host_memory_transfer_asyncifier.cc | third_party/xla/xla/service/host_memory_transfer_asyncifier_test.cc | #include "xla/service/host_memory_transfer_asyncifier.h"
#include <cstdint>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class HostMemoryTransferAsyncifierVisitor : public DfsHloVisitorWithDefault {
public:
explicit HostMemoryTransferAsyncifierVisitor(int64_t host_memory_space_color)
: kHostMemorySpaceColor(host_memory_space_color) {}
bool Changed() const { return changed_; }
absl::Status DefaultAction(HloInstruction* hlo_instruction) override {
return absl::OkStatus();
}
absl::Status HandleDynamicSlice(HloInstruction* dynamic_slice) override {
HloInstruction* dynamic_slice_operand = dynamic_slice->mutable_operand(0);
if (!dynamic_slice->shape().has_layout()) {
return InternalStrCat(dynamic_slice->name(), " does not have a layout.");
}
if (!dynamic_slice_operand->shape().has_layout()) {
return InternalStrCat(dynamic_slice->name(), "'s operand, ",
dynamic_slice_operand->name(),
", does not have a layout.");
}
VLOG(3) << absl::StreamFormat(
"\"%s\" from S(%d) to S(%d)", dynamic_slice->name(),
dynamic_slice_operand->shape().layout().memory_space(),
dynamic_slice->shape().layout().memory_space());
if (dynamic_slice_operand->shape().layout().memory_space() !=
kHostMemorySpaceColor) {
return absl::OkStatus();
}
if (dynamic_slice->shape().layout().memory_space() !=
xla::Layout::kDefaultMemorySpace) {
return absl::OkStatus();
}
const Shape context_shape = ShapeUtil::MakeScalarShape(U32);
const Shape transfer_bytes_shape = ShapeUtil::MakeScalarShape(S32);
TF_ASSIGN_OR_RETURN(
HloInstruction * async_done,
dynamic_slice->parent()->CreateAsyncInstructions(
dynamic_slice, {context_shape, transfer_bytes_shape}));
VLOG(1) << "DynamicSlice \"" << dynamic_slice->ToString()
<< "\" is slicing from host memory. Converting to async "
<< async_done->ToString();
MarkAsChanged();
return absl::OkStatus();
}
absl::Status HandleDynamicUpdateSlice(
HloInstruction* dynamic_update_slice) override {
HloInstruction* dynamic_update_slice_operand =
dynamic_update_slice->mutable_operand(0);
HloInstruction* dynamic_update_slice_update =
dynamic_update_slice->mutable_operand(1);
if (!dynamic_update_slice->shape().has_layout()) {
return InternalStrCat(dynamic_update_slice->name(),
" does not have a layout.");
}
if (!dynamic_update_slice_operand->shape().has_layout()) {
return InternalStrCat(dynamic_update_slice->name(), "'s operand, ",
dynamic_update_slice_operand->name(),
", does not have a layout.");
}
if (!dynamic_update_slice_update->shape().has_layout()) {
return InternalStrCat(dynamic_update_slice->name(), "'s update, ",
dynamic_update_slice_update->name(),
", does not have a layout.");
}
if (dynamic_update_slice_update->shape().layout().memory_space() !=
xla::Layout::kDefaultMemorySpace) {
return absl::OkStatus();
}
if (dynamic_update_slice->shape().layout().memory_space() !=
kHostMemorySpaceColor) {
return absl::OkStatus();
}
if (dynamic_update_slice_operand->shape().layout().memory_space() !=
dynamic_update_slice->shape().layout().memory_space()) {
return InternalStrCat(
"Unexpected that ", dynamic_update_slice_operand->name(),
"'s memory space is not the same as the dynamic-update-slice.");
}
const Shape context_shape = ShapeUtil::MakeScalarShape(U32);
TF_ASSIGN_OR_RETURN(HloInstruction * async_done,
dynamic_update_slice->parent()->CreateAsyncInstructions(
dynamic_update_slice, {context_shape}));
VLOG(1) << "DynamicUpdateSlice \"" << dynamic_update_slice->ToString()
<< "\" is slicing into host memory space. Converting to async "
<< async_done->ToString();
MarkAsChanged();
return absl::OkStatus();
}
absl::Status HandleCopy(HloInstruction* copy) override {
HloInstruction* operand = copy->mutable_operand(0);
if (!operand->shape().has_layout()) {
return InternalStrCat(operand->name(), " does not have a layout.");
}
if (!copy->shape().has_layout()) {
return InternalStrCat(copy->name(), " does not have a layout.");
}
const auto copy_src_memory_space = operand->shape().layout().memory_space();
const auto copy_dst_memory_space = copy->shape().layout().memory_space();
if (!((copy_src_memory_space == kHostMemorySpaceColor &&
copy_dst_memory_space == xla::Layout::kDefaultMemorySpace) ||
(copy_src_memory_space == xla::Layout::kDefaultMemorySpace &&
copy_dst_memory_space == kHostMemorySpaceColor))) {
VLOG(2)
<< "Skipping copy because it is not a copy between device memory and "
"host memory: "
<< copy->ToString();
return absl::OkStatus();
}
const Shape context_shape = ShapeUtil::MakeScalarShape(U32);
TF_ASSIGN_OR_RETURN(
HloInstruction * async_done,
copy->parent()->CreateAsyncInstructions(copy, {context_shape}));
VLOG(1)
<< "Copy \"" << copy->name()
<< "\" is between device and host memory space. Converting to async "
<< async_done->ToString();
MarkAsChanged();
return absl::OkStatus();
}
private:
const int64_t kHostMemorySpaceColor;
bool changed_ = false;
void MarkAsChanged() { changed_ = true; }
};
}
absl::StatusOr<bool> HostMemoryTransferAsyncifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
HostMemoryTransferAsyncifierVisitor visitor(kHostMemorySpaceColor);
for (HloComputation* computation : module->MakeNonfusionComputations()) {
TF_RETURN_IF_ERROR(computation->Accept(&visitor));
}
return visitor.Changed();
}
} | #include "xla/service/host_memory_transfer_asyncifier.h"
#include <cstdint>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = ::xla::match;
class HostMemoryTransferAsyncifierTest : public HloTestBase {
protected:
absl::StatusOr<bool> RunAsyncifier(absl::string_view hlo_string) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSIGN_OR_RETURN(bool changed, RunAsyncifier(module.get()));
return changed;
}
absl::StatusOr<bool> RunAsyncifier(HloModule* module) {
TF_EXPECT_OK(verifier().Run(module).status());
if (module->has_schedule()) {
return absl::InternalError("Expected a non-scheduled module");
}
HostMemoryTransferAsyncifier asyncifier(kHostMemorySpaceColor);
return asyncifier.Run(module);
}
private:
static constexpr int64_t kHostMemorySpaceColor{5};
};
TEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromHostToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_operand = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
host_update = f32[1,1,1]{2,1,0:T(2,128)S(5)} parameter(1)
constant_0 = s32[] constant(0)
ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)S(5)} dynamic-update-slice(host_operand, host_update, constant_0, constant_0, constant_0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicUpdateSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromDeviceToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
operand = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
update = f32[1,1,1]{2,1,0:T(2,128)} parameter(1)
constant_0 = s32[] constant(0)
ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)} dynamic-update-slice(operand, update, constant_0, constant_0, constant_0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicUpdateSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromHostToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
operand = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
host_update = f32[1,1,1]{2,1,0:T(2,128)S(5)} parameter(1)
constant_0 = s32[] constant(0)
ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)} dynamic-update-slice(operand, host_update, constant_0, constant_0, constant_0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicUpdateSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromDeviceToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_operand = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
update = f32[1,1,1]{2,1,0:T(2,128)} parameter(1)
constant_0 = s32[] constant(0)
ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)S(5)} dynamic-update-slice(host_operand, update, constant_0, constant_0, constant_0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* dynamic_update_slice_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Op()
.WithOpcode(HloOpcode::kAsyncDone)
.WithOperand(0, m::Op(&dynamic_update_slice_start)
.WithOpcode(HloOpcode::kAsyncStart))));
ASSERT_EQ(dynamic_update_slice_start->called_computations().size(), 1);
HloComputation* async_dynamic_slice_computation =
dynamic_update_slice_start->called_computations().at(0);
EXPECT_THAT(async_dynamic_slice_computation->root_instruction(),
GmockMatch(m::DynamicUpdateSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromHostToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
constant_0 = s32[] constant(0)
ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)S(5)} dynamic-slice(host_memory, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromDeviceToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
constant_0 = s32[] constant(0)
ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)} dynamic-slice(device, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromDeviceToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
constant_0 = s32[] constant(0)
ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)S(5)} dynamic-slice(device, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromHostToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
constant_0 = s32[] constant(0)
ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)} dynamic-slice(host_memory, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* dynamic_slice_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Op()
.WithOpcode(HloOpcode::kAsyncDone)
.WithOperand(0, m::Op(&dynamic_slice_start)
.WithOpcode(HloOpcode::kAsyncStart))));
ASSERT_EQ(dynamic_slice_start->called_computations().size(), 1);
HloComputation* async_dynamic_slice_computation =
dynamic_slice_start->called_computations().at(0);
EXPECT_THAT(async_dynamic_slice_computation->root_instruction(),
GmockMatch(m::DynamicSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, CopyFromHostToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)S(5)} copy(host_memory)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy()));
}
TEST_F(HostMemoryTransferAsyncifierTest, CopyFromDeviceToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)} copy(device)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DISABLED_CopyFromDeviceToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)S(5)} copy(device)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* copy_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Op()
.WithOpcode(HloOpcode::kAsyncDone)
.WithOperand(
0, m::Op(©_start).WithOpcode(HloOpcode::kAsyncStart))));
ASSERT_EQ(copy_start->called_computations().size(), 1);
HloComputation* async_copy_computation =
copy_start->called_computations().at(0);
EXPECT_THAT(async_copy_computation->root_instruction(),
GmockMatch(m::Copy()));
}
TEST_F(HostMemoryTransferAsyncifierTest, OldCopyFromDeviceToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)S(5)} copy(device)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* copy_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Op()
.WithOpcode(HloOpcode::kCopyDone)
.WithOperand(
0, m::Op(©_start).WithOpcode(HloOpcode::kCopyStart))));
}
TEST_F(HostMemoryTransferAsyncifierTest, DISABLED_CopyFromHostToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)} copy(host_memory)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* copy_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Op()
.WithOpcode(HloOpcode::kAsyncDone)
.WithOperand(
0, m::Op(©_start).WithOpcode(HloOpcode::kAsyncStart))));
ASSERT_EQ(copy_start->called_computations().size(), 1);
HloComputation* async_copy_computation =
copy_start->called_computations().at(0);
EXPECT_THAT(async_copy_computation->root_instruction(),
GmockMatch(m::Copy()));
}
TEST_F(HostMemoryTransferAsyncifierTest, OldCopyFromHostToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)} copy(host_memory)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* copy_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Op()
.WithOpcode(HloOpcode::kCopyDone)
.WithOperand(
0, m::Op(©_start).WithOpcode(HloOpcode::kCopyStart))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_memory_transfer_asyncifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_memory_transfer_asyncifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8d39f7fb-274f-498c-acfb-4154cac83495 | cpp | tensorflow/tensorflow | stream_pool | third_party/xla/xla/service/stream_pool.cc | third_party/xla/xla/service/stream_pool_test.cc | #include "xla/service/stream_pool.h"
#include <memory>
#include <utility>
#include "absl/strings/str_format.h"
namespace xla {
StreamPool::Ptr StreamPool::BorrowStream(se::StreamPriority priority) {
std::unique_ptr<se::Stream> stream;
{
absl::MutexLock lock(&mu_);
if (streams_with_pri_.find(priority) == streams_with_pri_.end()) {
stream = nullptr;
} else {
while (!streams_with_pri_[priority].empty() && !stream) {
stream = std::move(streams_with_pri_[priority].back());
streams_with_pri_[priority].pop_back();
if (stream->ok()) {
VLOG(1) << absl::StrFormat(
"StreamPool reusing existing stream (%p) with priority: %s",
stream.get(), se::StreamPriorityToString(priority));
} else {
VLOG(1) << absl::StrFormat(
"Stream (%p) was not ok, deleting with : %s", stream.get(),
se::StreamPriorityToString(priority));
stream = nullptr;
}
}
}
}
if (!stream) {
stream = executor_->CreateStream(priority).value();
stream->set_name(absl::StrFormat("%s pool stream",
se::StreamPriorityToString(priority)));
VLOG(1) << absl::StrFormat("Created new stream (%p) with priority = %s",
stream.get(),
se::StreamPriorityToString(priority));
}
PtrDeleter deleter = {this};
return Ptr(stream.release(), deleter);
}
void StreamPool::ReturnStream(se::Stream* stream) {
if (stream->ok()) {
VLOG(1) << absl::StrFormat("StreamPool returning ok stream (%p)", stream);
absl::MutexLock lock(&mu_);
auto priority = std::get<se::StreamPriority>(stream->priority());
streams_with_pri_[priority].emplace_back(stream);
} else {
VLOG(1) << absl::StrFormat("StreamPool deleting !ok stream (%p)", stream);
delete stream;
}
}
} | #include "xla/service/stream_pool.h"
#include <memory>
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/test_helpers.h"
namespace xla {
namespace {
class StreamPoolTest : public ::testing::Test {
protected:
se::StreamExecutor* NewStreamExecutor() {
se::Platform* platform =
se::PlatformManager::PlatformWithName("Host").value();
return platform->ExecutorForDevice(0).value();
}
};
TEST_F(StreamPoolTest, EmptyPool) {
se::StreamExecutor* executor = NewStreamExecutor();
StreamPool pool(executor);
}
TEST_F(StreamPoolTest, OneStreamPool) {
se::StreamExecutor* executor = NewStreamExecutor();
StreamPool pool(executor);
StreamPool::Ptr stream1 = pool.BorrowStream();
se::Stream* stream1_ptr = stream1.get();
EXPECT_TRUE(stream1->ok());
stream1 = nullptr;
StreamPool::Ptr stream2 = pool.BorrowStream();
se::Stream* stream2_ptr = stream2.get();
EXPECT_TRUE(stream2->ok());
stream2 = nullptr;
EXPECT_EQ(stream1_ptr, stream2_ptr);
}
TEST_F(StreamPoolTest, TwoStreamPool) {
se::StreamExecutor* executor = NewStreamExecutor();
StreamPool pool(executor);
StreamPool::Ptr stream1 = pool.BorrowStream();
se::Stream* stream1_ptr = stream1.get();
EXPECT_TRUE(stream1->ok());
StreamPool::Ptr stream2 = pool.BorrowStream();
se::Stream* stream2_ptr = stream2.get();
EXPECT_TRUE(stream2->ok());
EXPECT_NE(stream1_ptr, stream2_ptr);
stream1 = nullptr;
StreamPool::Ptr stream3 = pool.BorrowStream();
se::Stream* stream3_ptr = stream3.get();
EXPECT_TRUE(stream3->ok());
EXPECT_EQ(stream1_ptr, stream3_ptr);
EXPECT_NE(stream2_ptr, stream3_ptr);
stream2 = nullptr;
StreamPool::Ptr stream4 = pool.BorrowStream();
se::Stream* stream4_ptr = stream4.get();
EXPECT_TRUE(stream4->ok());
EXPECT_EQ(stream2_ptr, stream4_ptr);
EXPECT_NE(stream3_ptr, stream4_ptr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/stream_pool.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/stream_pool_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3896e059-52e8-44fa-aa37-6d162956acdf | cpp | tensorflow/tensorflow | hlo_rematerialization | third_party/xla/xla/service/hlo_rematerialization.cc | third_party/xla/xla/service/hlo_rematerialization_test.cc | #include "xla/service/hlo_rematerialization.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <limits>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/layout_util.h"
#include "xla/map_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/logical_buffer.h"
#include "xla/service/tuple_points_to_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::tsl::strings::HumanReadableNumBytes;
bool IsRematerializable(const HloInstruction* instruction) {
if (instruction->opcode() == HloOpcode::kCopy) {
if (LayoutUtil::Equal(instruction->shape().layout(),
instruction->operand(0)->shape().layout())) {
return false;
}
}
if (auto collective = DynCast<HloCollectiveInstruction>(instruction)) {
return !collective->constrain_layout();
}
switch (instruction->opcode()) {
case HloOpcode::kCall:
case HloOpcode::kConstant:
case HloOpcode::kConditional:
case HloOpcode::kCustomCall:
case HloOpcode::kParameter:
case HloOpcode::kWhile:
return false;
default:
return !instruction->HasSideEffect();
}
}
bool CanBeRematerialized(
const HloInstruction* instruction,
absl::flat_hash_map<const HloInstruction*, bool>* rematerializable_map) {
auto it = rematerializable_map->find(instruction);
if (it != rematerializable_map->end()) {
return it->second;
}
bool rematerializable = IsRematerializable(instruction);
(*rematerializable_map)[instruction] = rematerializable;
return rematerializable;
}
bool IsSupportedIndirectUser(const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kBitcast ||
instruction->opcode() == HloOpcode::kGetTupleElement;
}
using BufferId = int64_t;
using BufferIdList = absl::InlinedVector<BufferId, 3>;
struct RematStrategy {
enum {
kRecompute,
kCompress,
kHostOffload,
} kind;
Shape compact_shape;
};
struct Item {
HloInstruction* instruction;
bool placed = false;
bool denylisted = false;
BufferIdList buffers_defined;
BufferIdList buffers_output;
BufferIdList buffers_used;
bool is_skip_node = false;
private:
friend class InstructionList;
Item* next = nullptr;
Item* prev = nullptr;
Item* prev_skip_node = nullptr;
Item* next_skip_node = nullptr;
int64_t position;
};
struct ItemUse {
Item* user;
int64_t operand_number;
std::optional<int64_t> index;
ItemUse(Item* user, int64_t op_num, std::optional<int64_t> index)
: user(user), operand_number(op_num), index(index) {}
bool operator==(const ItemUse& other) const {
return user == other.user && operand_number == other.operand_number &&
index == other.index;
}
};
using ItemList = absl::InlinedVector<Item*, 3>;
using UsesList = absl::InlinedVector<ItemUse, 3>;
class InstructionList {
public:
explicit InstructionList(const HloInstructionSequence& order) {
int64_t position = 0;
Item* last = nullptr;
last_skip_node_ = nullptr;
first_skip_node_ = nullptr;
for (HloInstruction* inst : order.instructions()) {
Item* item = new Item;
item->next = nullptr;
item->prev = last;
if (last == nullptr) {
first_ = item;
} else {
last->next = item;
}
last = item;
item->instruction = inst;
item->position = position;
position++;
item_map_[inst] = item;
}
}
~InstructionList() {
for (Item* item = first_; item != nullptr;) {
Item* next = item->next;
delete item;
item = next;
}
}
size_t size() const { return item_map_.size(); }
Item* first() const { return first_; }
Item* next(Item* item) const { return item->next; }
const Item* next(const Item* item) const { return item->next; }
Item* prev(Item* item) const { return item->prev; }
const Item* prev(const Item* item) const { return item->prev; }
Item* first_skip_node() const { return first_skip_node_; }
Item* next_skip_node(Item* item) const { return item->next_skip_node; }
Item* CreateItem(HloInstruction* inst) {
Item* item = new Item;
item->instruction = inst;
CHECK(item_map_.insert({inst, item}).second)
<< "inserting inst twice " << inst->name();
return item;
}
Item* GetItem(const HloInstruction* inst) const {
auto iter = item_map_.find(inst);
CHECK(iter != item_map_.end()) << "Did not find " << inst->name();
return iter->second;
}
void InsertBeforeInstructions(Item* to_insert,
absl::Span<Item* const> before_instructions) {
VLOG(3) << "InsertBeforeInstructions: " << to_insert->instruction->name()
<< " before {"
<< absl::StrJoin(before_instructions, ", ",
[](std::string* out, Item* item) {
absl::StrAppend(out, item->instruction->name());
})
<< "}";
CHECK(!before_instructions.empty());
Item* min_position_item = nullptr;
for (Item* item : before_instructions) {
if (min_position_item == nullptr ||
item->position < min_position_item->position) {
min_position_item = item;
}
}
while (min_position_item->prev != nullptr &&
min_position_item->position == min_position_item->prev->position) {
min_position_item = min_position_item->prev;
}
while (!absl::c_linear_search(before_instructions, min_position_item)) {
min_position_item = min_position_item->next;
}
return InsertBefore(to_insert, min_position_item);
}
void PromoteNodesToSkip(absl::FunctionRef<bool(Item*)> should_promote) {
int64_t count = 0;
for (auto* item = first(); item != nullptr; item = next(item)) {
if (should_promote(item)) {
count += 1;
if (first_skip_node_ == nullptr) {
first_skip_node_ = item;
}
item->is_skip_node = true;
item->prev_skip_node = last_skip_node_;
if (last_skip_node_ != nullptr) {
last_skip_node_->next_skip_node = item;
}
last_skip_node_ = item;
}
}
VLOG(1) << " Rematerialization has " << count << " items in express lane";
}
void InsertAfterInstructions(Item* to_insert,
absl::Span<Item* const> after_instructions) {
VLOG(3) << "InsertAfterInstructions: " << to_insert->instruction->name()
<< " after {"
<< absl::StrJoin(after_instructions, ", ",
[](std::string* out, Item* item) {
absl::StrAppend(out, item->instruction->name());
})
<< "}";
CHECK(!after_instructions.empty());
Item* max_position_item = nullptr;
for (Item* item : after_instructions) {
if (max_position_item == nullptr ||
item->position > max_position_item->position) {
max_position_item = item;
}
}
CHECK(max_position_item->next != nullptr);
InsertBeforeInstructions(to_insert, {max_position_item->next});
}
void Denylist(const HloInstruction* inst) {
GetItem(inst)->denylisted = true;
}
private:
void InsertBefore(Item* item, Item* before) {
VLOG(3) << "InsertBefore: " << item->instruction->name() << " before "
<< before->instruction->name();
item->is_skip_node = true;
Item* cursor = before;
while (cursor != nullptr && !cursor->is_skip_node) {
cursor = cursor->next;
}
CHECK(cursor == nullptr || cursor->is_skip_node);
if (cursor == nullptr) {
item->prev_skip_node = last_skip_node_;
item->next_skip_node = nullptr;
last_skip_node_ = item;
} else {
CHECK(cursor->is_skip_node);
item->prev_skip_node = cursor->prev_skip_node;
if (item->prev_skip_node != nullptr) {
item->prev_skip_node->next_skip_node = item;
}
item->next_skip_node = cursor;
cursor->prev_skip_node = item;
}
if (first_skip_node_ == cursor) {
first_skip_node_ = item;
}
item->prev = before->prev;
item->next = before;
before->prev = item;
if (item->prev != nullptr) {
item->prev->next = item;
} else {
first_ = item;
}
item->position = before->position;
}
Item* first_;
Item* first_skip_node_;
Item* last_skip_node_;
absl::flat_hash_map<const HloInstruction*, Item*> item_map_;
};
UsesList GetUsers(const InstructionList& instruction_list,
const LogicalBuffer* logical_buffer,
const TuplePointsToAnalysis& points_to_analysis,
bool* has_indirect_users) {
UsesList users;
*has_indirect_users = false;
for (const BufferAlias& buffer_alias :
points_to_analysis.GetBufferAliases(*logical_buffer)) {
for (const HloInstruction* user : buffer_alias.instruction()->users()) {
if (points_to_analysis.DoesNotUseOperandBuffer(
buffer_alias.instruction(), buffer_alias.index(), user)) {
continue;
}
if (buffer_alias.instruction() != logical_buffer->instruction() &&
!IsSupportedIndirectUser(buffer_alias.instruction())) {
*has_indirect_users = true;
}
Item* user_item = instruction_list.GetItem(user);
std::optional<int64_t> user_index =
logical_buffer->index().size() != 1
? std::nullopt
: std::make_optional(logical_buffer->index().back());
for (int64_t op_idx : user->OperandIndices(buffer_alias.instruction())) {
if (!absl::c_linear_search(
users,
ItemUse{user_item, static_cast<int>(op_idx), user_index})) {
users.push_back(
ItemUse{user_item, static_cast<int>(op_idx), user_index});
}
}
}
}
return users;
}
class MemoryUsageTracker {
public:
MemoryUsageTracker(const HloRematerialization::Options& options,
const HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const InstructionList& instruction_list);
absl::Status BeginInstruction(Item* item);
int64_t RematerializationCost(const std::vector<Item*>& items,
int64_t memory_reduced,
int64_t memory_limit_bytes) const {
bool zero_cost_move = true;
for (auto* item : items) {
auto* instruction = item->instruction;
if (absl::c_any_of(
instruction->users(),
[this](const HloInstruction* inst) { return IsPlaced(inst); })) {
zero_cost_move = false;
break;
}
}
if (zero_cost_move) {
return 0;
}
CHECK_GT(memory_reduced, 0);
return memory_limit_bytes / memory_reduced;
}
absl::Status EndInstruction();
int64_t MemoryReducedIfCompressed(const Item* item,
const Shape& compact_shape) const;
int64_t MemoryReducedIfRematerialized(
absl::Span<const Item* const> items) const;
absl::Status AddCompressInstructions(Item* original_item,
Item* compressed_item,
Item* uncompressed_item);
absl::Status AddRematerializedInstruction(Item* original_item,
Item* remat_item,
absl::Span<Item*> indirect_users);
std::tuple<UsesList, UsesList> GetPlacedAndUnplacedUsers(
const UsesList& uses) const;
public:
absl::Status AddHostOffloadCopyInstructions(Item* original_item,
Item* copy_start_to_host_item,
Item* copy_done_to_host_item,
Item* copy_start_to_device_item,
Item* copy_done_to_device_item);
int64_t BytesUsedByBuffers(const Item* item,
bool only_count_unplaced_users) const;
std::optional<int64_t> GetCostOfCompression(const Item* candidate_item,
int64_t memory_limit_bytes,
int64_t peak_memory_bytes);
std::optional<int64_t> GetCostOfHostOffload(const Item* candidate_item,
int64_t memory_limit_bytes) const;
std::optional<int64_t> GetCostOfRecompute(
const std::vector<Item*>& candidate_items,
int64_t memory_limit_bytes) const;
std::tuple<std::vector<Item*>, RematStrategy, int>
PickRematerializationCandidates(
const InstructionList& instruction_list, int64_t memory_limit_bytes,
absl::flat_hash_map<const HloInstruction*, bool>* rematerializable_map,
int min_block_size, int max_block_size, int64_t peak_memory_bytes);
bool IsPlaced(const HloInstruction* instruction) const {
return instruction_list_.GetItem(instruction)->placed;
}
bool HasUnplacedUsers(Item* item) const;
UsesList GetItemUses(Item* item) const;
bool IsInProgressItem(Item* item) const { return item == in_progress_item_; }
int64_t memory_usage() const { return memory_usage_; }
int64_t AllocatedSize(Item* item) const {
int64_t size = 0;
for (auto buffer_id : item->buffers_defined) {
size += AllocatedSize(buffer_id);
}
return size;
}
const HloComputation* computation() const { return computation_; }
const HloRematerialization::Options& options() const { return options_; }
bool Check() const;
std::string ToString() const;
private:
struct Buffer {
const BufferId id;
Item* defining_instruction;
const int64_t size;
Shape shape;
bool live_out;
bool has_indirect_uses;
ShapeIndex index;
UsesList users;
int64_t unfinished_user_count;
std::string ToString() const {
return absl::StrCat("Buffer ", id, " (defined by ",
defining_instruction->instruction->name(), ", size ",
size, " bytes)");
}
};
void CountAllocatedMemory(Item* item);
absl::Status CountFreedMemory(Item* item);
void ReplaceUsesInUsersOfBuffer(Buffer& buffer, BufferId old_id) const;
absl::StatusOr<const Shape*> GetCompactShape(const HloInstruction* hlo);
Buffer& CreateBufferFromLogicalBuffer(
const LogicalBuffer* logical_buffer,
const TuplePointsToAnalysis& points_to_analysis, bool live_out) {
bool has_indirect_uses = false;
UsesList users = GetUsers(instruction_list_, logical_buffer,
points_to_analysis, &has_indirect_uses);
return NewBuffer(instruction_list_.GetItem(logical_buffer->instruction()),
logical_buffer->shape(), logical_buffer->index(),
std::move(users), live_out, has_indirect_uses);
}
Buffer& RematerializeBuffer(const Buffer& original_buffer, Item* remat_item,
UsesList&& rematerialized_uses) {
CHECK(original_buffer.defining_instruction->placed)
<< original_buffer.defining_instruction->instruction->name();
CHECK(!original_buffer.has_indirect_uses) << original_buffer.ToString();
CHECK(!original_buffer.live_out) << original_buffer.ToString();
for (ItemUse& use : rematerialized_uses) {
CHECK(!use.user->placed) << use.user->instruction->name();
}
return NewBuffer(remat_item, original_buffer.shape, original_buffer.index,
std::move(rematerialized_uses), false,
false);
}
int64_t AllocatedSize(BufferId buffer_id) const {
const Buffer& buffer = buffers_.at(buffer_id);
HloInstruction* inst = buffer.defining_instruction->instruction;
HloOpcode def_opcode = inst->opcode();
if (buffer.live_out || def_opcode == HloOpcode::kParameter) {
return 0;
} else {
if (options_.host_memory_offload_config && buffer.shape.has_layout() &&
buffer.shape.layout().memory_space() ==
options_.host_memory_offload_config->host_memory_space) {
return 0;
}
return buffer.size;
}
}
bool IsFinished(Item* item) const {
return item->placed && item != in_progress_item_;
}
bool IsInUse(BufferId buffer_id) const {
if (in_progress_item_ == nullptr) {
return false;
}
const BufferIdList& in_progress_uses = in_progress_item_->buffers_used;
return absl::c_linear_search(in_progress_uses, buffer_id);
}
bool IsCurrentlyLive(BufferId buffer_id) const {
const Buffer& buffer = buffers_[buffer_id];
return (buffer.defining_instruction->placed &&
buffer.unfinished_user_count > 0);
}
bool IsInstructionCurrentlyLive(const Item* instruction) const {
if (!IsPlaced(instruction->instruction)) {
return false;
}
for (const HloInstruction* user : instruction->instruction->users()) {
if (!IsPlaced(user)) {
return true;
}
}
return false;
}
Buffer& NewBuffer(Item* defining_instruction, const Shape& shape,
const ShapeIndex& index, UsesList&& uses, bool live_out,
bool has_indirect_uses) {
int buffer_id = buffers_.size();
auto get_num_of_unique_users = [](const UsesList& uses) -> int64_t {
absl::flat_hash_set<Item*> users_set;
for (const ItemUse& use : uses) {
users_set.insert(use.user);
}
return users_set.size();
};
buffers_.push_back(Buffer{buffer_id, defining_instruction,
options_.hlo_cost_analysis.GetShapeSize(shape),
shape, live_out, has_indirect_uses, index, uses,
get_num_of_unique_users(uses)});
return buffers_.back();
}
const HloRematerialization::Options& options_;
const HloComputation* computation_;
const InstructionList& instruction_list_;
absl::flat_hash_map<const HloInstruction*, Shape> compact_shape_;
int64_t memory_usage_ = 0;
Item* in_progress_item_ = nullptr;
std::vector<Buffer> buffers_;
};
MemoryUsageTracker::MemoryUsageTracker(
const HloRematerialization::Options& options,
const HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const InstructionList& instruction_list)
: options_(options),
computation_(computation),
instruction_list_(instruction_list) {
PointsToSet::BufferSet live_out_set =
points_to_analysis.GetPointsToSet(computation_->root_instruction())
.CreateFlattenedSet();
absl::flat_hash_map<const LogicalBuffer*, BufferId>
logical_buffer_to_buffer_id;
for (auto* item = instruction_list_.first(); item != nullptr;
item = instruction_list_.next(item)) {
const HloInstruction* const instruction = item->instruction;
for (const LogicalBuffer* logical_buffer :
points_to_analysis.GetBuffersDefinedByInstruction(instruction)) {
Buffer* buffer;
if (instruction->opcode() == HloOpcode::kWhile) {
const PointsToSet& operand_points_to =
points_to_analysis.GetPointsToSet(instruction->operand(0));
CHECK_EQ(operand_points_to.element(logical_buffer->index()).size(), 1);
const LogicalBuffer* source_logical_buffer =
operand_points_to.element(logical_buffer->index())[0];
buffer =
&buffers_.at(logical_buffer_to_buffer_id.at(source_logical_buffer));
buffer->has_indirect_uses = true;
buffer->live_out =
buffer->live_out || ContainsKey(live_out_set, logical_buffer);
bool unused;
for (ItemUse& user_item : GetUsers(instruction_list_, logical_buffer,
points_to_analysis, &unused)) {
auto existing_user_it = absl::c_find_if(
buffer->users,
[&](const ItemUse& use) { return user_item.user == use.user; });
if (existing_user_it == buffer->users.end()) {
buffer->unfinished_user_count++;
user_item.user->buffers_used.push_back(buffer->id);
buffer->users.push_back(user_item);
}
}
} else {
buffer = &CreateBufferFromLogicalBuffer(
logical_buffer, points_to_analysis,
ContainsKey(live_out_set, logical_buffer));
item->buffers_defined.push_back(buffer->id);
for (ItemUse& user : buffer->users) {
if (!absl::c_linear_search(user.user->buffers_used, buffer->id)) {
user.user->buffers_used.push_back(buffer->id);
}
}
}
logical_buffer_to_buffer_id[logical_buffer] = buffer->id;
}
for (const LogicalBuffer* logical_buffer :
points_to_analysis.GetPointsToSet(instruction).CreateFlattenedSet()) {
item->buffers_output.push_back(
logical_buffer_to_buffer_id[logical_buffer]);
}
}
XLA_VLOG_LINES(10, ToString());
DCHECK(Check());
}
void MemoryUsageTracker::CountAllocatedMemory(Item* item) {
for (BufferId buffer_id : item->buffers_defined) {
VLOG(3) << " Buffer " << buffers_.at(buffer_id).ToString()
<< " is now live.";
memory_usage_ += AllocatedSize(buffer_id);
}
}
absl::Status MemoryUsageTracker::CountFreedMemory(Item* item) {
for (BufferId buffer_id : item->buffers_used) {
Buffer& buffer = buffers_.at(buffer_id);
buffer.unfinished_user_count--;
TF_RET_CHECK(buffer.unfinished_user_count >= 0)
<< buffer.ToString() << " has negative unfinished user count.";
if (buffer.unfinished_user_count == 0) {
VLOG(3) << " " << buffer.ToString() << " is now dead.";
memory_usage_ -= AllocatedSize(buffer_id);
}
}
for (BufferId buffer_id : item->buffers_defined) {
const Buffer& buffer = buffers_.at(buffer_id);
if (buffer.unfinished_user_count == 0) {
VLOG(3) << " " << buffer.ToString() << " is immediately dead.";
memory_usage_ -= AllocatedSize(buffer_id);
}
}
return absl::OkStatus();
}
absl::Status MemoryUsageTracker::BeginInstruction(Item* item) {
const HloInstruction* instruction = item->instruction;
VLOG(3) << "BeginInstruction " << instruction->name();
TF_RET_CHECK(in_progress_item_ == nullptr);
in_progress_item_ = item;
item->placed = true;
CountAllocatedMemory(item);
VLOG(3) << " memory usage = " << memory_usage_;
VLOG(10) << ToString();
if (VLOG_IS_ON(1)) {
DCHECK(Check());
}
return absl::OkStatus();
}
absl::Status MemoryUsageTracker::EndInstruction() {
TF_RET_CHECK(in_progress_item_ != nullptr);
VLOG(3) << "EndInstruction " << in_progress_item_->instruction->name();
TF_RETURN_IF_ERROR(CountFreedMemory(in_progress_item_));
in_progress_item_ = nullptr;
VLOG(3) << " memory usage = " << memory_usage_;
VLOG(10) << ToString();
if (VLOG_IS_ON(1)) {
DCHECK(Check());
}
return absl::OkStatus();
}
int64_t MemoryUsageTracker::MemoryReducedIfCompressed(
const Item* item, const Shape& compact_shape) const {
CHECK_NE(in_progress_item_, nullptr);
if (!item->placed || item == in_progress_item_) {
return 0;
}
int64_t memory_reduced = 0;
CHECK_EQ(item->buffers_output.size(), 1);
BufferId buffer_id = item->buffers_output[0];
if (IsCurrentlyLive(buffer_id) && !IsInUse(buffer_id) &&
IsInstructionCurrentlyLive(item)) {
const Buffer& buffer = buffers_.at(buffer_id);
memory_reduced += buffer.size;
int64_t compact_shape_size =
options_.hlo_cost_analysis.GetShapeSize(compact_shape);
memory_reduced -= compact_shape_size;
}
return memory_reduced;
}
int64_t MemoryUsageTracker::MemoryReducedIfRematerialized(
absl::Span<const Item* const> items) const {
CHECK_NE(in_progress_item_, nullptr);
int64_t memory_reduced = 0;
absl::flat_hash_set<const Item*> remat_candidates;
for (const Item* item : items) {
if (!item->placed || item == in_progress_item_) {
LOG(WARNING) << "Unplaced item or in progress item being checked for "
"rematerialization.";
return 0;
}
for (BufferId buffer_id : item->buffers_defined) {
const Buffer& buffer = buffers_.at(buffer_id);
if (buffer.has_indirect_uses || buffer.live_out ||
buffer.index.size() > 1) {
return 0;
}
if (IsInUse(buffer_id)) {
return 0;
}
if (IsCurrentlyLive(buffer_id)) {
memory_reduced += AllocatedSize(buffer_id);
}
}
for (BufferId buffer_id : item->buffers_used) {
if (!IsCurrentlyLive(buffer_id)) {
Item* defining_instruction =
buffers_.at(buffer_id).defining_instruction;
if (!remat_candidates.contains(defining_instruction)) {
memory_reduced -= AllocatedSize(buffer_id);
}
}
}
remat_candidates.insert(item);
}
return memory_reduced;
}
std::tuple<UsesList, UsesList> MemoryUsageTracker::GetPlacedAndUnplacedUsers(
const UsesList& uses) const {
UsesList placed_users, unplaced_users;
for (const ItemUse& use : uses) {
if (use.user->placed) {
DCHECK(IsFinished(use.user)) << use.user->instruction->name();
placed_users.push_back(use);
} else {
unplaced_users.push_back(use);
}
}
return {placed_users, unplaced_users};
}
void MemoryUsageTracker::ReplaceUsesInUsersOfBuffer(Buffer& buffer,
BufferId old_id) const {
for (ItemUse& use : buffer.users) {
BufferIdList& buffers_used = use.user->buffers_used;
absl::c_replace(buffers_used, old_id, buffer.id);
}
}
absl::Status MemoryUsageTracker::AddCompressInstructions(
Item* original_item, Item* compressed_item, Item* uncompressed_item) {
CHECK(original_item->placed)
<< "Compressing instruction, but the original is not yet placed.";
CHECK_EQ(original_item->buffers_output.size(), 1)
<< "Only compressing items which have a single output buffer";
memory_usage_ -= options_.hlo_cost_analysis.GetShapeSize(
original_item->instruction->shape());
memory_usage_ += options_.hlo_cost_analysis.GetShapeSize(
compressed_item->instruction->shape());
BufferId original_buffer_id = original_item->buffers_output[0];
Buffer& original_buffer = buffers_.at(original_buffer_id);
auto [placed_users, unplaced_users] =
GetPlacedAndUnplacedUsers(original_buffer.users);
original_buffer.users = std::move(placed_users);
original_buffer.unfinished_user_count = 0;
original_buffer.users.push_back(ItemUse{compressed_item, 0, std::nullopt});
ShapeIndex copied_index = original_buffer.index;
Buffer& compressed_buffer =
NewBuffer(compressed_item, compressed_item->instruction->shape(),
copied_index, {ItemUse{uncompressed_item, 0, std::nullopt}},
false,
false);
compressed_item->buffers_used = original_item->buffers_output;
compressed_item->buffers_output = {compressed_buffer.id};
compressed_item->buffers_defined.push_back(compressed_buffer.id);
Buffer& uncompressed_buffer =
NewBuffer(uncompressed_item, uncompressed_item->instruction->shape(),
copied_index, std::move(unplaced_users), false,
false);
uncompressed_item->buffers_used = {compressed_item->buffers_output[0]};
uncompressed_item->buffers_output = {uncompressed_buffer.id};
uncompressed_item->buffers_defined = {uncompressed_buffer.id};
ReplaceUsesInUsersOfBuffer(uncompressed_buffer, original_buffer_id);
return absl::OkStatus();
}
absl::Status MemoryUsageTracker::AddRematerializedInstruction(
Item* original_item, Item* remat_item, absl::Span<Item*> indirect_users) {
VLOG(3) << "AddRematerializedInstruction: original_instruction = "
<< original_item->instruction->name()
<< ", remat_instruction = " << remat_item->instruction->name();
TF_RET_CHECK(in_progress_item_ != nullptr);
TF_RET_CHECK(original_item->placed) << original_item->instruction->name();
TF_RET_CHECK(!remat_item->placed) << remat_item->instruction->name();
remat_item->buffers_used = original_item->buffers_used;
for (BufferId buffer_id : original_item->buffers_used) {
Buffer& buffer = buffers_.at(buffer_id);
if (buffer.unfinished_user_count == 0) {
memory_usage_ += AllocatedSize(buffer.id);
}
buffer.unfinished_user_count++;
absl::InlinedVector<ItemUse, 2> filtered_users;
std::copy_if(buffer.users.begin(), buffer.users.end(),
std::back_inserter(filtered_users),
[&](const ItemUse& iu) { return iu.user == original_item; });
for (ItemUse& u : filtered_users) {
buffer.users.push_back(ItemUse{remat_item, u.operand_number, u.index});
}
}
const absl::flat_hash_set<Item*> indirect_users_set(indirect_users.begin(),
indirect_users.end());
for (BufferId old_buffer_id : original_item->buffers_defined) {
Buffer& old_buffer = buffers_.at(old_buffer_id);
UsesList placed_users;
UsesList unplaced_users;
for (ItemUse& user : old_buffer.users) {
if (user.user->placed) {
placed_users.push_back(user);
} else {
if (!IsSupportedIndirectUser(user.user->instruction) ||
indirect_users_set.contains(user.user)) {
unplaced_users.push_back(user);
} else {
CHECK(user.user->buffers_defined.empty())
<< "Buffers defined expected to be empty for use passthrough "
"instructions";
user.user->buffers_output.clear();
user.user->buffers_used.clear();
}
}
}
old_buffer.users = std::move(placed_users);
old_buffer.unfinished_user_count = 0;
memory_usage_ -= AllocatedSize(old_buffer.id);
Buffer& new_buffer =
RematerializeBuffer(old_buffer, remat_item, std::move(unplaced_users));
remat_item->buffers_defined.push_back(new_buffer.id);
remat_item->buffers_output.push_back(new_buffer.id);
auto update_buffers = [old_buffer_id, new_buffer_id = new_buffer.id](
BufferIdList& to_update) {
std::replace(to_update.begin(), to_update.end(), old_buffer_id,
new_buffer_id);
};
for (ItemUse& user : new_buffer.users) {
update_buffers(user.user->buffers_used);
update_buffers(user.user->buffers_output);
}
}
for (Item* indirect_user : indirect_users) {
const Item* source_item =
instruction_list_.GetItem(indirect_user->instruction->operand(0));
switch (indirect_user->instruction->opcode()) {
case HloOpcode::kBitcast: {
if (IsSupportedIndirectUser(source_item->instruction)) {
indirect_user->buffers_used = source_item->buffers_output;
indirect_user->buffers_output = source_item->buffers_output;
} else {
indirect_user->buffers_used = source_item->buffers_defined;
indirect_user->buffers_output = source_item->buffers_defined;
}
break;
}
case HloOpcode::kGetTupleElement: {
const HloGetTupleElementInstruction* gte =
Cast<HloGetTupleElementInstruction>(indirect_user->instruction);
for (BufferId buffer_id : source_item->buffers_defined) {
const Buffer& def_buffer = buffers_.at(buffer_id);
if (def_buffer.index == ShapeIndex{gte->tuple_index()}) {
indirect_user->buffers_output.push_back(buffer_id);
}
if (def_buffer.index.empty()) {
indirect_user->buffers_used.push_back(buffer_id);
}
}
break;
}
default: {
LOG(FATAL) << "Unsupported indirect instruction with opcode "
<< indirect_user->instruction->opcode();
break;
}
}
for (BufferId buffer_id : indirect_user->buffers_used) {
Buffer& buffer = buffers_.at(buffer_id);
buffer.unfinished_user_count++;
buffer.users.push_back(ItemUse{indirect_user, 0, std::nullopt});
}
}
VLOG(3) << " memory usage = " << memory_usage_;
XLA_VLOG_LINES(10, ToString());
DCHECK(Check());
return absl::OkStatus();
}
absl::Status MemoryUsageTracker::AddHostOffloadCopyInstructions(
Item* original_item, Item* copy_start_to_host_item,
Item* copy_done_to_host_item, Item* copy_start_to_device_item,
Item* copy_done_to_device_item) {
CHECK_EQ(original_item->buffers_defined.size(), 1);
CHECK_EQ(original_item->buffers_output.size(), 1);
BufferId original_buffer_id = original_item->buffers_output[0];
Buffer& original_buffer = buffers_.at(original_buffer_id);
auto [placed_users, unplaced_users] =
GetPlacedAndUnplacedUsers(original_buffer.users);
original_buffer.users = std::move(placed_users);
original_buffer.users.emplace_back(copy_start_to_host_item, 0, std::nullopt);
original_buffer.unfinished_user_count = 1;
CHECK_EQ(copy_start_to_host_item->instruction->shape().tuple_shapes_size(), 3)
<< "copy_start_to_host_item's shape is "
<< copy_start_to_host_item->instruction->shape().ToString();
CHECK_EQ(copy_start_to_device_item->instruction->shape().tuple_shapes_size(),
3)
<< "copy_start_to_device_item's shape is "
<< copy_start_to_device_item->instruction->shape().ToString();
BufferId copy_start_to_host_device_buffer_id =
NewBuffer(copy_start_to_host_item,
copy_start_to_host_item->instruction->shape().tuple_shapes(1),
ShapeIndex(),
UsesList{ItemUse{copy_done_to_host_item, 0, std::nullopt}},
false, false)
.id;
BufferId copy_start_to_host_context_buffer_id =
NewBuffer(copy_start_to_host_item,
copy_start_to_host_item->instruction->shape().tuple_shapes(2),
ShapeIndex(),
UsesList{ItemUse{copy_done_to_host_item, 0, std::nullopt}},
false, false)
.id;
BufferId copy_start_to_device_device_buffer_id =
NewBuffer(copy_start_to_device_item,
copy_start_to_device_item->instruction->shape().tuple_shapes(0),
ShapeIndex(),
UsesList{ItemUse{copy_done_to_device_item, 0, std::nullopt}},
false, false)
.id;
BufferId copy_start_to_device_context_buffer_id =
NewBuffer(copy_start_to_device_item,
copy_start_to_device_item->instruction->shape().tuple_shapes(2),
ShapeIndex(),
UsesList{ItemUse{copy_done_to_device_item, 0, std::nullopt}},
false, false)
.id;
BufferId copy_done_to_device_buffer_id =
NewBuffer(copy_done_to_device_item,
copy_done_to_device_item->instruction->shape(), ShapeIndex(),
std::move(unplaced_users), false,
false)
.id;
copy_start_to_host_item->buffers_used = original_item->buffers_output;
copy_start_to_host_item->buffers_output = {
copy_start_to_host_device_buffer_id,
copy_start_to_host_context_buffer_id};
copy_start_to_host_item->buffers_defined = {
copy_start_to_host_device_buffer_id,
copy_start_to_host_context_buffer_id};
copy_done_to_host_item->buffers_used =
copy_start_to_host_item->buffers_output;
copy_done_to_host_item->buffers_output = {};
copy_done_to_host_item->buffers_defined = {};
copy_start_to_device_item->buffers_used =
copy_done_to_host_item->buffers_output;
copy_start_to_device_item->buffers_output = {
copy_start_to_device_device_buffer_id,
copy_start_to_device_context_buffer_id};
copy_start_to_device_item->buffers_defined = {
copy_start_to_device_device_buffer_id,
copy_start_to_device_context_buffer_id};
copy_done_to_device_item->buffers_used =
copy_start_to_device_item->buffers_output;
copy_done_to_device_item->buffers_output = {copy_done_to_device_buffer_id};
copy_done_to_device_item->buffers_defined = {copy_done_to_device_buffer_id};
Buffer& copy_done_to_device_buffer =
buffers_.at(copy_done_to_device_buffer_id);
ReplaceUsesInUsersOfBuffer(copy_done_to_device_buffer, original_buffer_id);
if (copy_start_to_host_item->placed) {
CountAllocatedMemory(copy_start_to_host_item);
TF_RETURN_IF_ERROR(CountFreedMemory(copy_start_to_host_item));
if (copy_done_to_host_item->placed) {
CountAllocatedMemory(copy_done_to_host_item);
TF_RETURN_IF_ERROR(CountFreedMemory(copy_done_to_host_item));
if (copy_start_to_device_item->placed) {
CountAllocatedMemory(copy_start_to_device_item);
TF_RETURN_IF_ERROR(CountFreedMemory(copy_start_to_device_item));
if (copy_done_to_device_item->placed) {
CountAllocatedMemory(copy_done_to_device_item);
TF_RETURN_IF_ERROR(CountFreedMemory(copy_done_to_device_item));
}
}
}
}
return absl::OkStatus();
}
std::string MemoryUsageTracker::ToString() const {
std::string output =
absl::StrCat("MemoryUsageTracker for ", computation_->name(), "\n");
absl::StrAppend(&output,
"Memory usage: ", HumanReadableNumBytes(memory_usage()), " (",
memory_usage(), " bytes)");
for (auto* item = instruction_list_.first(); item != nullptr;
item = instruction_list_.next(item)) {
const HloInstruction* instruction = item->instruction;
absl::string_view inprogress =
item == in_progress_item_ ? " in-progress" : "";
absl::string_view placed = item->placed ? " placed" : "";
absl::StrAppend(&output, " ", instruction->name(), inprogress, placed,
"\n Defines:\n");
for (BufferId buffer_id : item->buffers_defined) {
const Buffer& buffer = buffers_[buffer_id];
absl::string_view live = IsCurrentlyLive(buffer_id) ? " live" : "";
absl::StrAppend(&output, " ", buffer.ToString(), live, ", ",
buffer.unfinished_user_count, " unfinished uses\n");
}
absl::StrAppend(&output, " Outputs:\n");
for (BufferId buffer_id : item->buffers_output) {
absl::StrAppend(&output, " ", buffers_[buffer_id].ToString(), "\n");
}
absl::StrAppend(&output, " Uses:\n");
for (BufferId buffer_id : item->buffers_used) {
absl::StrAppend(&output, " ", buffers_[buffer_id].ToString(), "\n");
}
}
return output;
}
absl::StatusOr<const Shape*> MemoryUsageTracker::GetCompactShape(
const HloInstruction* hlo) {
auto it = compact_shape_.find(hlo);
if (it != compact_shape_.end()) {
return &it->second;
}
const Shape& original_shape = hlo->shape();
TF_ASSIGN_OR_RETURN(Shape min_shape,
options_.compact_shape_function(original_shape));
return &compact_shape_.emplace(hlo, min_shape).first->second;
}
bool MemoryUsageTracker::Check() const {
auto elements_are_unique = [](const BufferIdList& vec) {
return vec.size() == std::set<BufferId>(vec.begin(), vec.end()).size();
};
for (auto* instruction : computation_->instructions()) {
const BufferIdList& defined_buffers =
instruction_list_.GetItem(instruction)->buffers_defined;
CHECK(elements_are_unique(defined_buffers))
<< "Instruction " << instruction->name()
<< " does not have unique defined buffers: "
<< absl::StrJoin(defined_buffers, ", ",
[this](std::string* out, BufferId buffer_id) {
absl::StrAppend(out,
buffers_.at(buffer_id).ToString());
});
for (const Buffer& buffer : buffers_) {
if (buffer.defining_instruction->instruction == instruction) {
CHECK(absl::c_linear_search(defined_buffers, buffer.id))
<< "Instruction " << instruction->name()
<< " defined buffers is missing: " << buffer.ToString();
}
}
}
for (auto* instruction : computation_->instructions()) {
const BufferIdList& used_buffers =
instruction_list_.GetItem(instruction)->buffers_used;
CHECK(elements_are_unique(used_buffers))
<< "Instruction " << instruction->name()
<< " does not have unique used buffers: "
<< absl::StrJoin(used_buffers, ", ",
[this](std::string* out, BufferId buffer_id) {
absl::StrAppend(out,
buffers_.at(buffer_id).ToString());
});
}
for (const Buffer& buffer : buffers_) {
int64_t unfinished_uses = 0;
absl::flat_hash_set<Item*> already_counted_user;
for (const ItemUse& user : buffer.users) {
const BufferIdList& used_buffers = user.user->buffers_used;
CHECK(absl::c_linear_search(used_buffers, buffer.id))
<< "Instruction " << user.user->instruction->name()
<< " used buffers is missing " << buffer.ToString();
if (!IsFinished(user.user) &&
already_counted_user.insert(user.user).second) {
unfinished_uses++;
}
}
CHECK_EQ(buffer.unfinished_user_count, unfinished_uses)
<< "Incorrect unplaced use count for " << buffer.ToString();
}
return true;
}
std::vector<Item*> GetInitialBlock(const InstructionList& instruction_list,
const MemoryUsageTracker& tracker,
Item* start_item, int min_block_size) {
std::vector<Item*> item_block;
Item* curr_item = start_item;
for (int i = 0; i < min_block_size; ++i) {
if (curr_item == nullptr || !curr_item->placed ||
tracker.IsInProgressItem(curr_item)) {
break;
}
item_block.push_back(curr_item);
curr_item = instruction_list.next(curr_item);
}
return item_block;
}
bool AnyDenylistedOrNonRematerializable(
const std::vector<Item*>& block,
absl::flat_hash_map<const HloInstruction*, bool>* rematerializable_map) {
for (auto* item : block) {
if (item->denylisted) {
return true;
}
if (!CanBeRematerialized(item->instruction, rematerializable_map)) {
return true;
}
}
return false;
}
int64_t MemoryUsageTracker::BytesUsedByBuffers(
const Item* item, bool only_count_unplaced_users) const {
int64_t bytes_used_by_buffers = 0;
for (const auto& buffer_id : item->buffers_defined) {
VLOG(3) << " buffer " << buffer_id << "'s users are "
<< absl::StrJoin(buffers_.at(buffer_id).users, ", ",
[](std::string* str, const auto& use) {
str->append(use.user->instruction->name());
});
for (const auto& use : buffers_.at(buffer_id).users) {
if (!only_count_unplaced_users || !use.user->placed) {
bytes_used_by_buffers += AllocatedSize(buffer_id);
break;
}
}
}
return bytes_used_by_buffers;
}
std::optional<int64_t> MemoryUsageTracker::GetCostOfCompression(
const Item* candidate_item, int64_t memory_limit_bytes,
int64_t peak_memory_bytes) {
CHECK(candidate_item != nullptr);
if (candidate_item->buffers_output.size() != 1) {
HloInstruction* candidate_instruction = candidate_item->instruction;
VLOG(2) << " " << candidate_instruction->name()
<< " has more than one output buffer; cannot offload to host.";
return {};
}
const Buffer& output_buffer = buffers_.at(candidate_item->buffers_output[0]);
if (!candidate_item->placed || candidate_item == in_progress_item_ ||
output_buffer.live_out) {
return {};
}
const Shape& original_shape = candidate_item->instruction->shape();
if (!original_shape.IsArray()) {
return {};
}
const Shape* compact_shape =
GetCompactShape(candidate_item->instruction).value();
const int64_t memory_reduced =
MemoryReducedIfCompressed(candidate_item, *compact_shape);
const int64_t size = options_.hlo_cost_analysis.GetShapeSize(
candidate_item->instruction->shape());
const int64_t reduced_size =
options_.hlo_cost_analysis.GetShapeSize(*compact_shape);
if (memory_reduced > 0 && size + reduced_size < peak_memory_bytes) {
return memory_limit_bytes / memory_reduced;
} else {
return {};
}
}
std::optional<int64_t> MemoryUsageTracker::GetCostOfHostOffload(
const Item* candidate_item, int64_t memory_limit_bytes) const {
CHECK(candidate_item != nullptr);
HloInstruction* candidate_instruction = candidate_item->instruction;
VLOG(2)
<< "Considering host offload as an option for remat. looking at instr "
<< candidate_instruction->name();
if (candidate_item->buffers_output.size() != 1) {
VLOG(2) << " " << candidate_instruction->name()
<< " has more than one output buffer; cannot offload to host.";
return {};
}
for (auto buffer_id : candidate_item->buffers_defined) {
for (auto use : buffers_.at(buffer_id).users) {
if (use.user->instruction->opcode() == HloOpcode::kBitcast) {
VLOG(3) << " " << candidate_item->instruction->name()
<< " has a user which is a bitcast instruction("
<< use.user->instruction->name()
<< "); cannot offload "
"to host.";
return {};
} else if (use.user->instruction->opcode() == HloOpcode::kTuple) {
VLOG(3) << " " << candidate_item->instruction->name()
<< " has a user which is a tuple instruction("
<< use.user->instruction->name()
<< "); cannot offload "
"to host.";
return {};
}
}
}
const Buffer& output_buffer = buffers_.at(candidate_item->buffers_output[0]);
if (!candidate_item->placed || candidate_item == in_progress_item_ ||
output_buffer.live_out) {
VLOG(2) << " " << candidate_instruction->name()
<< " is not yet placed, is in progress, or is \"live_out\"; cannot "
"offload to host.";
return {};
}
const bool current_instruction_uses_this_item = [&]() {
if (in_progress_item_ == nullptr) {
return false;
}
const auto& output_buffer_ids = candidate_item->buffers_output;
for (const auto& output_buffer_id : output_buffer_ids) {
const Buffer& output_buffer = buffers_.at(output_buffer_id);
for (const auto& use : output_buffer.users) {
if (use.user == in_progress_item_) {
return true;
}
}
}
return false;
}();
if (current_instruction_uses_this_item) {
VLOG(2) << " " << candidate_instruction->name()
<< " is used by the current instruction in mem tracker ("
<< in_progress_item_->instruction->name()
<< "); cannot offload to host.";
return {};
}
const int64_t bytes_used_by_buffers =
BytesUsedByBuffers(candidate_item, true);
if (bytes_used_by_buffers == 0) {
VLOG(2) << " " << candidate_instruction->name()
<< " consumes no memory; no point in offloading.";
return {};
}
const auto [placed_uses, unplaced_uses] =
GetPlacedAndUnplacedUsers(output_buffer.users);
const Item* last_placed_user = nullptr;
const Item* first_unplaced_user = nullptr;
for (const auto* item = instruction_list_.first(); item != nullptr;
item = instruction_list_.next(item)) {
if (absl::c_find_if(placed_uses, [&](const auto& use) {
return use.user == item;
}) != placed_uses.end()) {
last_placed_user = item;
}
if (first_unplaced_user == nullptr &&
absl::c_find_if(unplaced_uses, [&](const auto& use) {
return use.user == item;
}) != unplaced_uses.end()) {
first_unplaced_user = item;
break;
}
}
if (last_placed_user == nullptr) {
VLOG(3) << " " << candidate_instruction->name()
<< " has no placed users, starting search at self.";
last_placed_user = candidate_item;
}
CHECK(first_unplaced_user != nullptr)
<< "Didn't find any unplaced user for instruction \""
<< candidate_instruction->name()
<< "\". There must be a "
"bug in how we calculate how much memory this item uses.";
float time_spent_before_next_use = 0.0;
for (auto* item = last_placed_user; item != first_unplaced_user;
item = instruction_list_.next(item)) {
time_spent_before_next_use += std::max(
0.0f, options_.hlo_cost_analysis.optimal_seconds(*item->instruction));
}
if (time_spent_before_next_use <= 0.0) {
return {};
}
const float time_spent_on_copies =
bytes_used_by_buffers / options_.host_memory_offload_config
->bandwidth_to_host_bytes_per_second +
bytes_used_by_buffers / options_.host_memory_offload_config
->bandwidth_from_host_bytes_per_second;
if (time_spent_before_next_use < time_spent_on_copies) {
return {};
}
VLOG(3) << " " << candidate_instruction->name() << " has enough time ("
<< time_spent_before_next_use
<< ") between itself and next use. The memcpy out and back will take "
<< time_spent_on_copies << "s";
return memory_limit_bytes / bytes_used_by_buffers;
}
std::optional<int64_t> MemoryUsageTracker::GetCostOfRecompute(
const std::vector<Item*>& candidate_items,
int64_t memory_limit_bytes) const {
for (auto* item : candidate_items) {
HloInstruction* candidate = item->instruction;
if (std::any_of(
candidate->control_successors().begin(),
candidate->control_successors().end(),
[this](const HloInstruction* inst) { return IsPlaced(inst); })) {
return {};
}
}
VLOG(5) << "Block contains:";
for (auto* hlo : candidate_items) {
VLOG(5) << hlo->instruction->name();
}
const int64_t memory_reduced = MemoryReducedIfRematerialized(candidate_items);
if (memory_reduced <= 0) {
return {};
}
return RematerializationCost(candidate_items, memory_reduced,
memory_limit_bytes);
}
std::tuple<std::vector<Item*>, RematStrategy, int>
MemoryUsageTracker::PickRematerializationCandidates(
const InstructionList& instruction_list, int64_t memory_limit_bytes,
absl::flat_hash_map<const HloInstruction*, bool>* rematerializable_map,
int min_block_size, int max_block_size, int64_t peak_memory_bytes) {
std::vector<Item*> best_items;
int64_t best_cost = std::numeric_limits<int64_t>::max();
RematStrategy best_strategy;
int effort = 0;
VLOG(5) << "Picking candidate block with size in [" << min_block_size << ", "
<< max_block_size << "]";
for (auto* start_item = instruction_list.first_skip_node();
start_item != nullptr;
start_item = instruction_list.next_skip_node(start_item)) {
std::vector<Item*> block =
GetInitialBlock(instruction_list, *this, start_item, min_block_size);
if (block.size() < min_block_size) {
break;
}
if (AnyDenylistedOrNonRematerializable(block, rematerializable_map)) {
continue;
}
if (options_.remat_mode_config.compress && block.size() == 1) {
auto cost =
GetCostOfCompression(block[0], memory_limit_bytes, peak_memory_bytes);
++effort;
if (cost && *cost < best_cost) {
VLOG(1) << "Found new best cost; from " << best_cost << " to " << *cost
<< " with strategy kCompress on block of size " << block.size();
best_strategy.kind = RematStrategy::kCompress;
best_strategy.compact_shape =
*GetCompactShape(block[0]->instruction).value();
best_items = block;
best_cost = *cost;
}
}
if (options_.remat_mode_config.host_offload && block.size() == 1) {
auto cost = GetCostOfHostOffload(block[0], memory_limit_bytes);
++effort;
if (cost && *cost < best_cost) {
VLOG(1) << "Found new best cost; from " << best_cost << " to " << *cost
<< " with strategy kHostOffload on block of size "
<< block.size();
best_strategy.kind = RematStrategy::kHostOffload;
best_items = block;
best_cost = *cost;
}
}
if (!options_.remat_mode_config.recompute) {
continue;
}
while (block.size() <= max_block_size) {
auto cost = GetCostOfRecompute(block, memory_limit_bytes);
++effort;
if (cost && *cost < best_cost) {
VLOG(1) << "Found new best cost; from " << best_cost << " to " << *cost
<< " with strategy kRecompute on block of size "
<< block.size();
best_strategy.kind = RematStrategy::kRecompute;
best_items = block;
best_cost = *cost;
}
auto* last_item = block[block.size() - 1];
auto* next_item = instruction_list.next(last_item);
if (next_item == nullptr || next_item->denylisted || !next_item->placed ||
next_item == in_progress_item_ ||
!CanBeRematerialized(next_item->instruction, rematerializable_map)) {
break;
}
block.push_back(next_item);
}
}
return {best_items, best_strategy, effort};
}
bool MemoryUsageTracker::HasUnplacedUsers(Item* item) const {
for (BufferId buffer_id : item->buffers_defined) {
const Buffer& buffer = buffers_.at(buffer_id);
for (const ItemUse& user : buffer.users) {
if (!user.user->placed) {
return true;
}
}
}
return false;
}
UsesList MemoryUsageTracker::GetItemUses(Item* item) const {
UsesList combined_users;
for (BufferId buffer_id : item->buffers_defined) {
const Buffer& buffer = buffers_.at(buffer_id);
for (const ItemUse& user : buffer.users) {
combined_users.push_back(user);
}
}
return combined_users;
}
absl::StatusOr<int64_t> RematerializeInstructions(
MemoryUsageTracker* memory_tracker, std::vector<Item*>* best_items,
absl::flat_hash_set<const HloInstruction*>* remat_move_instructions,
InstructionList* instruction_list, HloSchedule* schedule,
HloRematerialization* rematerialization) {
int64_t net_instructions_added = 0;
std::vector<std::string> instruction_names(best_items->size());
for (int i = best_items->size() - 1; i >= 0; --i) {
Item* best_item = (*best_items)[i];
HloInstruction* best = best_item->instruction;
instruction_names[i] = best->name();
HloComputation* computation = best->parent();
if (!memory_tracker->HasUnplacedUsers(best_item)) {
continue;
}
HloCloneContext context(computation->parent());
HloInstruction* remat =
computation->AddInstruction(best->Clone("remat", &context));
for (auto& cloned_computation_pair : context.cloned_computations()) {
if (!schedule->is_computation_scheduled(cloned_computation_pair.first)) {
continue;
}
HloInstructionSequence& sequence =
schedule->GetOrCreateSequence(cloned_computation_pair.second);
HloInstructionSequence& old_sequence =
schedule->GetOrCreateSequence(cloned_computation_pair.first);
for (HloInstruction* instr : old_sequence.instructions()) {
sequence.push_back(instr);
}
}
if (DynCast<HloChannelInstruction>(best) &&
DynCast<HloChannelInstruction>(best)->channel_id()) {
remat->set_channel_id(rematerialization->NextChannelId());
}
TF_RETURN_IF_ERROR(remat->CopyAllControlDepsFrom(best));
Item* remat_item = instruction_list->CreateItem(remat);
absl::InlinedVector<Item*, 4> indirect_users;
absl::flat_hash_map<int64_t, HloInstruction*> gte_cache;
for (auto& user : memory_tracker->GetItemUses(best_item)) {
if (!memory_tracker->IsPlaced(user.user->instruction)) {
VLOG(2) << " Replacing use of " << best->name() << " in "
<< user.user->instruction->name() << " with " << remat->name();
HloInstruction* remat_use = remat;
HloInstruction* const user_operand =
user.user->instruction->mutable_operand(user.operand_number);
if (remat_use == user_operand) {
continue;
}
if (user.index && remat_use->shape() != user_operand->shape()) {
auto cached_gte = gte_cache.find(*user.index);
if (cached_gte == gte_cache.end()) {
remat_use = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(
ShapeUtil::GetTupleElementShape(remat_use->shape(),
*user.index),
remat_use, *user.index),
"gte.remat");
indirect_users.push_back(instruction_list->CreateItem(remat_use));
gte_cache[*user.index] = remat_use;
} else {
remat_use = cached_gte->second;
}
}
if (user_operand->shape() != remat_use->shape()) {
remat_use = computation->AddInstruction(
HloInstruction::CreateBitcast(user_operand->shape(), remat_use),
"bitcast.remat");
indirect_users.push_back(instruction_list->CreateItem(remat_use));
}
TF_RETURN_IF_ERROR(user.user->instruction->ReplaceOperandWith(
user.operand_number, remat_use));
}
}
TF_RETURN_IF_ERROR(memory_tracker->AddRematerializedInstruction(
best_item, remat_item, absl::MakeSpan(indirect_users)));
ItemList place_before;
const absl::flat_hash_set<Item*> indirect_users_set(indirect_users.begin(),
indirect_users.end());
for (auto user : remat->users()) {
if (!indirect_users_set.contains(instruction_list->GetItem(user))) {
place_before.push_back(instruction_list->GetItem(user));
}
}
for (auto* indirect_user : indirect_users) {
for (auto user : indirect_user->instruction->users()) {
if (!indirect_users_set.contains(instruction_list->GetItem(user))) {
place_before.push_back(instruction_list->GetItem(user));
}
}
}
for (auto* operand : remat->operands()) {
for (auto* operand_user : operand->users()) {
if (operand_user != remat) {
Item* operand_user_item = instruction_list->GetItem(operand_user);
if (!operand_user_item->placed) {
place_before.push_back(operand_user_item);
}
}
}
}
for (auto successor : remat->control_successors()) {
Item* successor_item = instruction_list->GetItem(successor);
CHECK(!successor_item->placed) << successor_item->instruction->name();
place_before.push_back(successor_item);
}
instruction_list->InsertBeforeInstructions(remat_item, place_before);
for (auto* bitcast : indirect_users) {
instruction_list->InsertBeforeInstructions(bitcast, place_before);
}
std::function<bool(HloInstruction*)> uses_empty = [&](HloInstruction* i) {
for (auto* u : i->users()) {
if (!IsSupportedIndirectUser(u) || !uses_empty(u)) {
return false;
}
}
return true;
};
if (uses_empty(best)) {
VLOG(2) << best->name() << " is now dead";
if (ContainsKey(*remat_move_instructions, best)) {
instruction_list->Denylist(remat);
}
remat_move_instructions->insert(remat);
net_instructions_added += indirect_users.size();
} else {
net_instructions_added += indirect_users.size() + 1;
}
for (auto* indirect_user : indirect_users) {
instruction_list->Denylist(indirect_user->instruction);
}
if (HloDataflowAnalysis::IsAsynchronousOperationStart(best->opcode()) ||
HloDataflowAnalysis::IsAsynchronousOperationDone(best->opcode())) {
VLOG(2) << "The old instruction " << best->name()
<< " is an async op. Removing to maintain one start to one done "
"invariant to keep the HLO valid.";
TF_RETURN_IF_ERROR(best->DropAllControlDeps());
TF_RETURN_IF_ERROR(computation->RemoveInstruction(best));
}
}
return net_instructions_added;
}
absl::StatusOr<int64_t> CompressInstruction(MemoryUsageTracker* memory_tracker,
Item* best_item,
const Shape& compact_shape,
InstructionList* instruction_list) {
HloInstruction* best = best_item->instruction;
VLOG(5) << "Transposing instruction " << best->name() << " (saving "
<< HumanReadableNumBytes(memory_tracker->MemoryReducedIfCompressed(
best_item, compact_shape))
<< ") to" << compact_shape.ToString(true);
HloComputation* computation = best->parent();
HloInstruction* compressed = computation->AddInstruction(
HloInstruction::CreateUnary(compact_shape, HloOpcode::kCopy, best),
absl::StrCat(best->name(), ".remat_compressed"));
HloInstruction* uncompressed = computation->AddInstruction(
HloInstruction::CreateUnary(best->shape(), HloOpcode::kCopy, compressed),
absl::StrCat(best->name(), ".remat_uncompressed"));
Item* compressed_item = instruction_list->CreateItem(compressed);
compressed_item->placed = true;
Item* uncompressed_item = instruction_list->CreateItem(uncompressed);
std::vector<HloInstruction*> best_users_copy = best->users();
for (HloInstruction* user : best_users_copy) {
if (!memory_tracker->IsPlaced(user)) {
VLOG(5) << " Replacing use of " << best->name() << " in " << user->name()
<< " with " << uncompressed->name();
TF_RETURN_IF_ERROR(best->ReplaceUseWith(user, uncompressed));
}
}
TF_RETURN_IF_ERROR(memory_tracker->AddCompressInstructions(
best_item, compressed_item, uncompressed_item));
ItemList place_before;
for (auto user : uncompressed->users()) {
place_before.push_back(instruction_list->GetItem(user));
}
instruction_list->Denylist(compressed_item->instruction);
instruction_list->Denylist(uncompressed_item->instruction);
instruction_list->InsertBeforeInstructions(uncompressed_item, place_before);
instruction_list->InsertAfterInstructions(compressed_item, {best_item});
return 2;
}
absl::StatusOr<int64_t> OffloadInstruction(MemoryUsageTracker* memory_tracker,
Item* best_item,
InstructionList* instruction_list) {
HloInstruction* best_instruction = best_item->instruction;
HloComputation* computation = best_instruction->parent();
VLOG(2) << "Best_instruction's users: "
<< absl::StrJoin(best_instruction->users(), ", ",
[](std::string* str, const auto* x) {
return str->append(x->name());
});
Shape instruction_shape_device = best_instruction->shape();
Shape instruction_shape_host = best_instruction->shape();
instruction_shape_host.mutable_layout()->set_memory_space(
memory_tracker->options().host_memory_offload_config->host_memory_space);
Shape context_shape = ShapeUtil::MakeShape(U32, {});
HloInstruction* copy_start_to_host =
computation->AddInstruction(HloInstruction::CreateCopyStart(
ShapeUtil::MakeTupleShape({instruction_shape_host,
instruction_shape_device, context_shape}),
best_instruction));
HloInstruction* copy_done_to_host =
computation->AddInstruction(HloInstruction::CreateUnary(
instruction_shape_host, HloOpcode::kCopyDone, copy_start_to_host));
HloInstruction* copy_start_to_device =
computation->AddInstruction(HloInstruction::CreateCopyStart(
ShapeUtil::MakeTupleShape({instruction_shape_device,
instruction_shape_host, context_shape}),
copy_done_to_host));
HloInstruction* copy_done_to_device = computation->AddInstruction(
HloInstruction::CreateUnary(instruction_shape_device,
HloOpcode::kCopyDone, copy_start_to_device));
VLOG(3) << "Created copy_start_to_host instr: "
<< copy_start_to_host->ToString();
VLOG(3) << "Created copy_done_to_host instr: "
<< copy_done_to_host->ToString();
VLOG(3) << "Created copy_start_to_device instr: "
<< copy_start_to_device->ToString();
VLOG(3) << "Created copy_done_to_device instr: "
<< copy_done_to_device->ToString();
TF_RETURN_IF_ERROR(
copy_start_to_host->Visit(&memory_tracker->options().hlo_cost_analysis));
TF_RETURN_IF_ERROR(
copy_done_to_host->Visit(&memory_tracker->options().hlo_cost_analysis));
TF_RETURN_IF_ERROR(copy_start_to_device->Visit(
&memory_tracker->options().hlo_cost_analysis));
TF_RETURN_IF_ERROR(
copy_done_to_device->Visit(&memory_tracker->options().hlo_cost_analysis));
Item* copy_start_to_host_item =
instruction_list->CreateItem(copy_start_to_host);
Item* copy_done_to_host_item =
instruction_list->CreateItem(copy_done_to_host);
Item* copy_start_to_device_item =
instruction_list->CreateItem(copy_start_to_device);
Item* copy_done_to_device_item =
instruction_list->CreateItem(copy_done_to_device);
instruction_list->Denylist(copy_start_to_host);
instruction_list->Denylist(copy_done_to_host);
instruction_list->Denylist(copy_start_to_device);
instruction_list->Denylist(copy_done_to_device);
Item* place_before{nullptr};
{
ItemList place_before_list;
for (auto user : best_instruction->users()) {
if (user == copy_start_to_host) {
continue;
}
auto item_of_user = instruction_list->GetItem(user);
if (item_of_user->placed) {
continue;
}
place_before_list.push_back(item_of_user);
}
CHECK(!place_before_list.empty()) << "Have nothing to place this before!";
for (auto* item = instruction_list->first(); item != nullptr;
item = instruction_list->next(item)) {
if (absl::c_linear_search(place_before_list, item)) {
place_before = item;
break;
}
}
}
CHECK_NE(place_before, nullptr)
<< "Could not find an item to place this before.";
auto get_first_item_after_compute_time = [&](Item* start_item, Item* end_item,
auto successor_func,
float time_spent_on_copy) {
float time_so_far = 0.0;
auto* current_item = start_item;
while (time_so_far < time_spent_on_copy) {
auto next_item = successor_func(current_item);
if (next_item == end_item) {
LOG(WARNING) << "Didn't find enough computation before end of window";
break;
}
current_item = next_item;
CHECK_NE(current_item, nullptr) << "current_item is null";
CHECK_NE(current_item->instruction, nullptr)
<< "current_item's instruction is null";
time_so_far += std::max(
0.0f, memory_tracker->options().hlo_cost_analysis.optimal_seconds(
*current_item->instruction));
}
return current_item;
};
const int64_t bytes_used_by_buffers = memory_tracker->BytesUsedByBuffers(
best_item, false);
const float copy_to_host_time_seconds =
bytes_used_by_buffers /
memory_tracker->options()
.host_memory_offload_config->bandwidth_to_host_bytes_per_second;
const float copy_from_host_time_seconds =
bytes_used_by_buffers /
memory_tracker->options()
.host_memory_offload_config->bandwidth_from_host_bytes_per_second;
VLOG(2) << "Item uses " << bytes_used_by_buffers << "B and will take "
<< copy_to_host_time_seconds << "s to copy to host and "
<< copy_from_host_time_seconds << "s to copy from host.";
VLOG(2) << "Inserting " << copy_start_to_host_item->instruction->name()
<< " immediately after " << best_item->instruction->name();
instruction_list->InsertAfterInstructions(copy_start_to_host_item,
{best_item});
VLOG(2) << "Inserting " << copy_done_to_device_item->instruction->name()
<< " immediately before " << place_before->instruction->name();
instruction_list->InsertBeforeInstructions(copy_done_to_device_item,
{place_before});
auto first_item_after_to_host_copy = get_first_item_after_compute_time(
copy_start_to_host_item, copy_done_to_device_item,
[&instruction_list](Item* item) { return instruction_list->next(item); },
copy_to_host_time_seconds);
VLOG(2) << "Inserting " << copy_done_to_host_item->instruction->name()
<< " immediately after "
<< first_item_after_to_host_copy->instruction->name();
instruction_list->InsertAfterInstructions(copy_done_to_host_item,
{first_item_after_to_host_copy});
auto first_item_before_from_host_copy = get_first_item_after_compute_time(
copy_done_to_device_item, copy_done_to_host_item,
[&instruction_list](Item* item) { return instruction_list->prev(item); },
copy_from_host_time_seconds);
VLOG(2) << "Inserting " << copy_start_to_device_item->instruction->name()
<< " immediately before "
<< first_item_before_from_host_copy->instruction->name();
instruction_list->InsertBeforeInstructions(
copy_start_to_device_item, {first_item_before_from_host_copy});
{
auto item = instruction_list->first();
while (item != nullptr) {
if (item == copy_start_to_host_item || item == copy_done_to_host_item ||
item == copy_start_to_device_item ||
item == copy_done_to_device_item) {
item->placed = true;
} else if (memory_tracker->IsInProgressItem(item)) {
break;
}
item = instruction_list->next(item);
}
}
std::vector<HloInstruction*> best_users_copy = best_instruction->users();
for (HloInstruction* user : best_users_copy) {
if (!memory_tracker->IsPlaced(user)) {
VLOG(3) << " Replacing use of " << best_instruction->name() << " in "
<< user->name() << " with " << copy_done_to_device->name();
TF_RETURN_IF_ERROR(
best_instruction->ReplaceUseWith(user, copy_done_to_device));
} else {
VLOG(3) << user->name() << " is placed, not going to update";
}
}
TF_RETURN_IF_ERROR(memory_tracker->AddHostOffloadCopyInstructions(
best_item, copy_start_to_host_item, copy_done_to_host_item,
copy_start_to_device_item, copy_done_to_device_item));
return 4;
}
struct InstructionsAdded {
int remat_count;
int net_instructions_added;
int effort;
};
absl::StatusOr<InstructionsAdded> RematerializeBestBlock(
int min_block_size, int max_block_size, MemoryUsageTracker* memory_tracker,
InstructionList* instruction_list, HloSchedule* schedule,
int64_t memory_limit_bytes,
absl::flat_hash_map<const HloInstruction*, bool>* rematerializable_map,
absl::flat_hash_set<const HloInstruction*>* remat_move_instructions,
HloRematerialization* rematerialization) {
CHECK(min_block_size > 0) << "Negative block size.";
std::vector<Item*> best_items;
RematStrategy best_strategy;
int effort;
std::tie(best_items, best_strategy, effort) =
memory_tracker->PickRematerializationCandidates(
*instruction_list, memory_limit_bytes, rematerializable_map,
min_block_size, max_block_size,
rematerialization->ComputationPeakMemory(
memory_tracker->computation()));
InstructionsAdded num_instructions_added;
num_instructions_added.remat_count = best_items.size();
num_instructions_added.effort = effort;
if (best_items.empty()) {
num_instructions_added.net_instructions_added = 0;
return num_instructions_added;
}
if (best_strategy.kind == RematStrategy::kCompress) {
CHECK(best_items.size() == 1)
<< "More than one instruction compressed simultaneously.";
HloInstruction* best = best_items[0]->instruction;
VLOG(1) << "Remat via compression: " << best->name() << " (saving "
<< HumanReadableNumBytes(memory_tracker->MemoryReducedIfCompressed(
best_items[0], best_strategy.compact_shape))
<< ")";
TF_ASSIGN_OR_RETURN(
num_instructions_added.net_instructions_added,
CompressInstruction(memory_tracker, best_items[0],
best_strategy.compact_shape, instruction_list));
} else if (best_strategy.kind == RematStrategy::kHostOffload) {
CHECK_EQ(best_items.size(), 1)
<< "More than one buffer offloaded simultaneously.";
VLOG(1) << "Remat via offload: " << best_items[0]->instruction->name();
TF_ASSIGN_OR_RETURN(
num_instructions_added.net_instructions_added,
OffloadInstruction(memory_tracker, best_items[0], instruction_list));
VLOG(4) << "Offload done, hlo computation:\n"
<< memory_tracker->computation()->ToString();
VLOG(6) << "Memory tracker:\n" << memory_tracker->ToString();
} else {
CHECK_EQ(best_strategy.kind, RematStrategy::kRecompute)
<< "Expecting strategy to be Recompute";
VLOG(1) << "Remat via recomputation: {"
<< absl::StrJoin(best_items, ", ",
[](std::string* out, Item* item) {
absl::StrAppend(out, item->instruction->name());
})
<< '}';
TF_ASSIGN_OR_RETURN(
num_instructions_added.net_instructions_added,
RematerializeInstructions(memory_tracker, &best_items,
remat_move_instructions, instruction_list,
schedule, rematerialization));
}
return num_instructions_added;
}
}
absl::StatusOr<int64_t> HloRematerialization::ComputePeakMemory(
const HloComputation* computation, const HloInstructionSequence& order,
const absl::flat_hash_set<absl::string_view>& execution_threads) const {
InstructionList instruction_list(order);
MemoryUsageTracker tracker(options_, computation, *points_to_analysis_,
instruction_list);
int64_t peak_memory = tracker.memory_usage();
for (auto* item = instruction_list.first(); item != nullptr;
item = instruction_list.next(item)) {
const HloInstruction* instruction = item->instruction;
TF_RETURN_IF_ERROR(tracker.BeginInstruction(item));
TF_ASSIGN_OR_RETURN(
int64_t callee_usage,
CalledComputationsMemoryUsage(instruction, execution_threads));
peak_memory =
std::max<int64_t>(peak_memory, tracker.memory_usage() + callee_usage);
TF_RETURN_IF_ERROR(tracker.EndInstruction());
}
VLOG(1) << "Peak memory for " << computation->name() << ": "
<< HumanReadableNumBytes(peak_memory);
return peak_memory;
}
absl::StatusOr<int64_t> HloRematerialization::CalledComputationsMemoryUsage(
const HloInstruction* instruction,
const absl::flat_hash_set<absl::string_view>& execution_threads) const {
const CallSite* callsite =
call_graph_->GetNode(instruction->parent()).GetCallSite(instruction);
if (callsite == nullptr || callsite->context() == CallContext::kEmbedded) {
return 0;
}
int64_t callee_usage = 0;
for (const HloComputation* computation : callsite->called_computations()) {
if (!HloInstruction::IsThreadIncluded(computation->execution_thread(),
execution_threads)) {
continue;
}
TF_RET_CHECK(ContainsKey(computation_peak_memory_, computation));
callee_usage += computation_peak_memory_.at(computation);
}
return callee_usage;
}
absl::StatusOr<bool> HloRematerialization::RematerializeComputation(
HloComputation* computation, HloSchedule* schedule,
int64_t memory_limit_bytes, int64_t min_remat_size,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
const auto peak_memory_usage = computation_peak_memory_.at(computation);
if (peak_memory_usage <= memory_limit_bytes) {
VLOG(1) << "Asked to rematerialize computation of size "
<< peak_memory_usage
<< " but it already fits within the given memory limit ("
<< memory_limit_bytes << ")";
return false;
}
VLOG(1) << "Rematerializing computation " << computation->name()
<< " with limit " << HumanReadableNumBytes(memory_limit_bytes);
VLOG(1) << "peak memory usage is "
<< HumanReadableNumBytes(peak_memory_usage);
CHECK(!ContainsKey(rematerialized_computations_, computation));
InstructionList instruction_list(schedule->sequence(computation));
MemoryUsageTracker memory_tracker(options_, computation, *points_to_analysis_,
instruction_list);
instruction_list.PromoteNodesToSkip([&](Item* item) {
return memory_tracker.AllocatedSize(item) >= min_remat_size;
});
bool changed = false;
absl::flat_hash_set<const HloInstruction*> remat_move_instructions;
absl::flat_hash_map<const HloInstruction*, bool> rematerializable_map;
int64_t peak_memory = memory_tracker.memory_usage();
int64_t remat_count = 0;
int64_t net_instructions_added = 0;
const CallGraphNode& call_graph_node = call_graph_->GetNode(computation);
int64_t instruction_index = 0;
for (auto* item = instruction_list.first(); item != nullptr;
item = instruction_list.next(item)) {
const HloInstruction* instruction = item->instruction;
TF_ASSIGN_OR_RETURN(
int64_t callee_usage,
CalledComputationsMemoryUsage(instruction, execution_threads));
TF_RETURN_IF_ERROR(memory_tracker.BeginInstruction(item));
VLOG(2) << "Program point at " << instruction->name()
<< ", memory usage = " << memory_tracker.memory_usage()
<< ", callee usage = " << callee_usage << ", [" << instruction_index
<< "/" << instruction_list.size() << "]";
instruction_index++;
int min_block_size = 1;
int max_block_size = 1;
if (memory_tracker.AllocatedSize(item) + callee_usage > 0) {
bool is_first_phase = true;
int64_t first_phase_effort = 0;
int64_t second_phase_effort = 0;
while (memory_tracker.memory_usage() + callee_usage >
memory_limit_bytes) {
VLOG(2) << "Over memory limit at instruction " << instruction->name()
<< ", using "
<< HumanReadableNumBytes(memory_tracker.memory_usage() +
callee_usage)
<< ", limit is " << HumanReadableNumBytes(memory_limit_bytes);
TF_ASSIGN_OR_RETURN(
InstructionsAdded instructions_added,
RematerializeBestBlock(min_block_size, max_block_size,
&memory_tracker, &instruction_list, schedule,
memory_limit_bytes, &rematerializable_map,
&remat_move_instructions, this));
net_instructions_added += instructions_added.net_instructions_added;
remat_count += instructions_added.remat_count;
if (is_first_phase) {
first_phase_effort += instructions_added.effort;
} else {
second_phase_effort += instructions_added.effort;
}
if (instructions_added.net_instructions_added > 0) {
VLOG(1) << "memory_usage after rematerialization = "
<< HumanReadableNumBytes(memory_tracker.memory_usage());
}
if (instructions_added.remat_count == 0) {
min_block_size = max_block_size + 1;
max_block_size = 2 * max_block_size;
is_first_phase = false;
} else {
max_rematerialized_block_size_ =
std::max(max_rematerialized_block_size_, max_block_size);
changed = true;
min_block_size = 1;
max_block_size = 1;
}
if (max_block_size > options_.block_size_limit ||
second_phase_effort >
options_.block_rematerialization_factor * first_phase_effort) {
break;
}
}
}
const CallSite* callsite = call_graph_node.GetCallSite(instruction);
if (callsite != nullptr &&
callsite->context() == CallContext::kControlFlow &&
memory_tracker.memory_usage() + callee_usage > memory_limit_bytes) {
VLOG(1) << "Memory usage still over the limit ("
<< (memory_tracker.memory_usage() + callee_usage) << " > "
<< memory_limit_bytes
<< "). Rematerializing computations called by "
<< instruction->name();
for (HloComputation* called_computation :
callsite->called_computations()) {
if (!ContainsKey(rematerialized_computations_, called_computation) &&
HloInstruction::IsThreadIncluded(
called_computation->execution_thread(), execution_threads)) {
int64_t subcomputation_memory_limit_bytes = std::max<int64_t>(
0, memory_limit_bytes - memory_tracker.memory_usage());
TF_ASSIGN_OR_RETURN(
bool subcomputation_changed,
RematerializeComputation(called_computation, schedule,
subcomputation_memory_limit_bytes,
min_remat_size, execution_threads));
changed |= subcomputation_changed;
}
}
TF_ASSIGN_OR_RETURN(callee_usage, CalledComputationsMemoryUsage(
instruction, execution_threads));
}
peak_memory = std::max<int64_t>(
peak_memory, memory_tracker.memory_usage() + callee_usage);
VLOG(3) << "peak memory usage = " << HumanReadableNumBytes(peak_memory);
TF_RETURN_IF_ERROR(memory_tracker.EndInstruction());
}
for (auto* instruction : computation->instructions()) {
CHECK(memory_tracker.IsPlaced(instruction)) << instruction->name();
}
VLOG(1) << "In computation " << computation->name() << " rematerialized "
<< remat_count << " instructions; " << net_instructions_added
<< " net instructions added";
VLOG(1) << " peak memory usage now " << HumanReadableNumBytes(peak_memory)
<< " (was "
<< HumanReadableNumBytes(computation_peak_memory_.at(computation))
<< ")";
computation_peak_memory_.at(computation) = peak_memory;
HloInstructionSequence& sequence = schedule->GetOrCreateSequence(computation);
sequence.clear();
for (auto* item = instruction_list.first(); item != nullptr;
item = instruction_list.next(item)) {
HloInstruction* instruction = item->instruction;
sequence.push_back(instruction);
}
rematerialized_computations_.insert(computation);
instructions_rematerialized_ += remat_count;
net_instructions_added_ += net_instructions_added;
return changed;
}
absl::StatusOr<bool> HloRematerialization::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (options_.remat_mode_config.host_offload) {
CHECK(options_.host_memory_offload_config.has_value())
<< "Host memory config is required when host memory offload strategy "
"is specified";
}
VLOG(1) << "HloRematerialization() with memory limit of "
<< HumanReadableNumBytes(options_.memory_limit_bytes);
if (!options_.remat_mode_config.compress &&
!options_.remat_mode_config.recompute &&
!options_.remat_mode_config.host_offload) {
VLOG(1) << "All rematerialization strategies are disabled. Skipping.";
return false;
}
VLOG(2) << "HloRemat mode: compress: " << options_.remat_mode_config.compress
<< ", host_offload: " << options_.remat_mode_config.host_offload
<< ", recompute: " << options_.remat_mode_config.recompute;
XLA_VLOG_LINES(3, "Before HloRematerialization:\n" + module->ToString());
computation_peak_memory_.clear();
rematerialized_computations_.clear();
instructions_rematerialized_ = 0;
net_instructions_added_ = 0;
TF_RET_CHECK(module->has_schedule());
TF_ASSIGN_OR_RETURN(points_to_analysis_, TuplePointsToAnalysis::Run(module));
next_channel_id_ = hlo_query::NextChannelId(*module);
int64_t module_output_size = 0;
ShapeUtil::ForEachSubshape(
module->result_shape(),
[&module_output_size, this](const Shape& subshape,
const ShapeIndex& output_index) {
module_output_size += options_.hlo_cost_analysis.GetShapeSize(subshape);
});
int64_t adjusted_memory_limit_bytes =
std::max<int64_t>(0, options_.memory_limit_bytes - module_output_size);
VLOG(1) << "Adjusted memory limit accounting for output ("
<< HumanReadableNumBytes(module_output_size)
<< "): " << HumanReadableNumBytes(adjusted_memory_limit_bytes);
call_graph_ = CallGraph::Build(module);
int64_t total_async_peak_memory = 0;
if (!options_.async_computation_parallelism.empty()) {
absl::flat_hash_set<std::string_view> async_threads;
for (const auto& [computation, _] :
options_.async_computation_parallelism) {
async_threads.insert(computation->execution_thread());
}
TF_RETURN_IF_ERROR(call_graph_->VisitNodes(
[this, module,
&async_threads](const CallGraphNode& node) -> absl::Status {
auto callee_thread = node.computation()->execution_thread();
if (node.context() == CallContext::kControlFlow &&
HloInstruction::IsThreadIncluded(callee_thread, async_threads)) {
TF_ASSIGN_OR_RETURN(computation_peak_memory_[node.computation()],
ComputePeakMemory(node.computation(),
module->schedule().sequence(
node.computation()),
{callee_thread}));
}
return absl::OkStatus();
},
false));
int64_t async_peak_memory = 0;
for (const auto [entry_computation, parallel_threads] :
options_.async_computation_parallelism) {
const int64_t peak_memory =
computation_peak_memory_.at(entry_computation);
const int64_t parallel_peak_memory = peak_memory * parallel_threads;
async_peak_memory = std::max(async_peak_memory, parallel_peak_memory);
}
adjusted_memory_limit_bytes =
std::max<int64_t>(0, adjusted_memory_limit_bytes - async_peak_memory);
total_async_peak_memory += async_peak_memory;
VLOG(1) << "Adjusted memory limit accounting for async computations ("
<< HumanReadableNumBytes(async_peak_memory)
<< "): " << HumanReadableNumBytes(adjusted_memory_limit_bytes);
computation_peak_memory_.clear();
}
TF_RETURN_IF_ERROR(call_graph_->VisitNodes(
[this, module,
&execution_threads](const CallGraphNode& node) -> absl::Status {
if (node.context() == CallContext::kControlFlow &&
HloInstruction::IsThreadIncluded(
node.computation()->execution_thread(), execution_threads)) {
TF_ASSIGN_OR_RETURN(
computation_peak_memory_[node.computation()],
ComputePeakMemory(node.computation(),
module->schedule().sequence(node.computation()),
execution_threads));
}
return absl::OkStatus();
},
false));
const int64_t before_peak_memory =
computation_peak_memory_.at(module->entry_computation()) +
module_output_size + total_async_peak_memory;
VLOG(1) << "Peak memory usage of module (before): "
<< HumanReadableNumBytes(before_peak_memory);
for (auto* computation :
module->MakeComputationPostOrder(execution_threads)) {
TF_RETURN_IF_ERROR(computation->Accept(&options_.hlo_cost_analysis));
}
TF_ASSIGN_OR_RETURN(
bool changed,
RematerializeComputation(module->entry_computation(), &module->schedule(),
adjusted_memory_limit_bytes,
options_.min_remat_size, execution_threads));
HloSchedule saved_schedule = module->schedule();
module->clear_schedule();
TF_ASSIGN_OR_RETURN(bool dead_code_removed, HloDCE().Run(module));
changed |= dead_code_removed;
TF_RETURN_IF_ERROR(saved_schedule.Update(execution_threads));
TF_RETURN_IF_ERROR(module->set_schedule(std::move(saved_schedule)));
VLOG(1) << "Rematerialized " << instructions_rematerialized_
<< " instructions in module " << module->name() << "; "
<< net_instructions_added_ << " net instructions added";
const int64_t current_peak_memory =
computation_peak_memory_.at(module->entry_computation()) +
module_output_size + total_async_peak_memory;
VLOG(1) << "Peak memory usage of module now "
<< HumanReadableNumBytes(current_peak_memory) << " ("
<< current_peak_memory << " bytes), was "
<< HumanReadableNumBytes(before_peak_memory) << " ("
<< before_peak_memory << " bytes)";
const int64_t reduced_peak_memory = before_peak_memory - current_peak_memory;
VLOG(1) << "Reduced peak memory by "
<< HumanReadableNumBytes(reduced_peak_memory) << " ("
<< reduced_peak_memory << " bytes)";
sizes_.before_bytes = before_peak_memory;
sizes_.after_bytes = current_peak_memory;
XLA_VLOG_LINES(5, "After HloRematerialization:\n" + module->ToString());
if (current_peak_memory > options_.memory_limit_bytes) {
LOG(WARNING) << absl::StrFormat(
"Can't reduce memory use below %s (%d bytes) by rematerialization; "
"only reduced to %s (%d bytes), down from %s (%d bytes) originally",
HumanReadableNumBytes(options_.memory_limit_bytes),
options_.memory_limit_bytes, HumanReadableNumBytes(current_peak_memory),
current_peak_memory, HumanReadableNumBytes(before_peak_memory),
before_peak_memory);
}
return changed;
}
} | #include "xla/service/hlo_rematerialization.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/layout.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/hlo_rematerialization_test_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using ::testing::_;
class AsyncRematerializationTest : public RematerializationTestBase {
protected:
absl::StatusOr<bool> RunHloRematerialization(
int64_t memory_limit_bytes, HloModule* module,
const absl::flat_hash_map<HloComputation*, int64_t>&
async_computation_parallelism,
int64_t min_remat_size = 0) {
TF_EXPECT_OK(verifier().Run(module).status());
if (!module->has_schedule()) {
HloMemoryScheduler scheduler(
[](const BufferValue& buffer) { return ByteSizeOf(buffer.shape()); },
ComputationSchedulerToModuleScheduler(DefaultMemoryScheduler));
TF_EXPECT_OK(scheduler.Run(module).status());
}
HloRematerialization::RematerializationModeConfig config(
true, true, false);
auto shape_size_func = [](const Shape& shape) { return ByteSizeOf(shape); };
HloCostAnalysis cost_analysis(shape_size_func);
HloRematerialization::Options options(
cost_analysis, config, memory_limit_bytes,
1, 1,
min_remat_size, nullptr,
std::nullopt,
async_computation_parallelism);
HloRematerialization::RematerializationSizes sizes;
HloRematerialization remat(options, sizes);
return remat.Run(module, {HloInstruction::kMainExecutionThread});
}
static constexpr int64_t kNumParallelThreads = 16;
};
TEST_F(AsyncRematerializationTest, AsyncComputation) {
constexpr std::string_view hlo = R"(
HloModule async, is_scheduled=true
%offload_computation {
%param = f32[1]{0} parameter(0)
%reshape = f32[] reshape(f32[1]{0} %param)
%broadcast = f32[1024]{0} broadcast(f32[] %reshape), dimensions={}
%negate = f32[1024]{0} negate(f32[1024]{0} %broadcast)
%concatenate = f32[2048]{0} concatenate(f32[1024]{0} %negate, f32[1024]{0} %negate), dimensions={0}
%slice = f32[1]{0} slice(f32[2048]{0} %concatenate), slice={[0:1]}
%concatenate.1 = f32[1025]{0} concatenate(f32[1024]{0} %broadcast, f32[1]{0} %slice), dimensions={0}
ROOT %slice.1 = f32[1]{0} slice(f32[1025]{0} %concatenate.1), slice={[0:1]}
}
%main_computation {
%param = f32[1]{0} parameter(0)
%reshape = f32[] reshape(f32[1]{0} %param)
%broadcast = f32[1024]{0} broadcast(f32[] %reshape), dimensions={}
%negate = f32[1024]{0} negate(f32[1024]{0} %broadcast)
%concatenate = f32[2048]{0} concatenate(f32[1024]{0} %negate, f32[1024]{0} %negate), dimensions={0}
%slice = f32[1]{0} slice(f32[2048]{0} %concatenate), slice={[0:1]}
%concatenate.1 = f32[1025]{0} concatenate(f32[1024]{0} %broadcast, f32[1]{0} %slice), dimensions={0}
ROOT %slice.1 = f32[1]{0} slice(f32[1025]{0} %concatenate.1), slice={[0:1]}
}
ENTRY %main {
%param = f32[1]{0} parameter(0)
%call-start = ((f32[1]{0}), f32[1]{0}, s32[]) call-start(f32[1]{0} %param), to_apply=%offload_computation, async_execution_thread="offload"
%call-done = f32[1]{0} call-done(((f32[1]{0}), f32[1]{0}, s32[]) %call-start)
ROOT %call = f32[1]{0} call(f32[1]{0} %call-done), to_apply=%main_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
HloInstruction* call_start = FindInstruction(module.get(), "call-start");
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
RunHloRematerialization(
kNumParallelThreads * 16 * 1024 + 14 * 1024,
module.get(),
{{call_start->async_wrapped_computation(), kNumParallelThreads}}));
EXPECT_TRUE(changed);
}
class RecomputeAndCompressHloRematerializationTest
: public RematerializationTestBase {
protected:
absl::StatusOr<bool> RunHloRematerialization(int64_t memory_limit_bytes,
HloModule* module,
int64_t min_remat_size = 0) {
TF_EXPECT_OK(verifier().Run(module).status());
if (!module->has_schedule()) {
HloMemoryScheduler scheduler(
[](const BufferValue& buffer) { return ByteSizeOf(buffer.shape()); },
ComputationSchedulerToModuleScheduler(DefaultMemoryScheduler));
TF_EXPECT_OK(scheduler.Run(module).status());
}
for (const HloComputation* computation : module->computations()) {
before_computation_names_.insert(computation->name());
for (const HloInstruction* instruction : computation->instructions()) {
before_instruction_names_.insert(instruction->name());
}
}
HloRematerialization::RematerializationModeConfig config(
true, true, false);
auto shape_size_func = [](const Shape& shape) { return ByteSizeOf(shape); };
HloCostAnalysis cost_analysis(shape_size_func);
HloRematerialization::Options options(
cost_analysis, config, memory_limit_bytes,
1, 1,
min_remat_size, nullptr,
std::nullopt,
{});
HloRematerialization::RematerializationSizes sizes;
HloRematerialization remat(options, sizes);
absl::StatusOr<bool> result = remat.Run(module);
for (const HloComputation* computation : module->computations()) {
if (!before_computation_names_.contains(computation->name())) {
continue;
}
for (const HloInstruction* instruction : computation->instructions()) {
after_instruction_names_.insert(instruction->name());
}
}
return result;
}
void CheckForRematInInstructionNames(absl::string_view test_case_name) {
constexpr const absl::string_view kRematInstructionNameMustContain =
".remat";
for (const auto& instruction_name : after_instruction_names_) {
if (!before_instruction_names_.contains(instruction_name)) {
EXPECT_TRUE(absl::StrContains(instruction_name,
kRematInstructionNameMustContain))
<< "[" << test_case_name << "] Instruction \"" << instruction_name
<< "\" must contain \"" << kRematInstructionNameMustContain << "\"";
}
}
}
private:
absl::flat_hash_set<absl::string_view> before_computation_names_;
absl::flat_hash_set<absl::string_view> before_instruction_names_;
absl::flat_hash_set<absl::string_view> after_instruction_names_;
};
TEST_F(RecomputeAndCompressHloRematerializationTest, SingleComputation) {
auto module = CreateNewVerifiedModule();
HloComputation* computation =
module->AddEntryComputation(MakeRematerializableComputation());
const HloInstruction* slice = computation->root_instruction();
ASSERT_THAT(slice, op::Slice(op::Concatenate(op::Broadcast(_), _)));
const HloInstruction* concat = slice->operand(0);
const HloInstruction* bcast = concat->operand(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
14 * 1024, module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(computation->root_instruction(), slice);
const HloInstruction* remat_bcast = concat->operand(0);
EXPECT_THAT(remat_bcast, op::Broadcast(::testing::Ne(bcast)));
EXPECT_EQ(module->schedule()
.sequence(computation)
.instructions()[computation->instruction_count() - 2],
concat);
EXPECT_EQ(module->schedule()
.sequence(computation)
.instructions()[computation->instruction_count() - 3],
remat_bcast);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest,
SingleComputationNoWorthRemat) {
auto module = CreateNewVerifiedModule();
HloComputation* computation =
module->AddEntryComputation(MakeRematerializableComputation());
const HloInstruction* slice = computation->root_instruction();
ASSERT_THAT(slice, op::Slice(op::Concatenate(op::Broadcast(_), _)));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
14 * 1024, module.get(),
14 * 1024));
EXPECT_FALSE(changed);
}
TEST_F(RecomputeAndCompressHloRematerializationTest,
SingleComputationNoRematerialization) {
auto module = CreateNewVerifiedModule();
HloComputation* computation =
module->AddEntryComputation(MakeRematerializableComputation());
EXPECT_EQ(computation->instruction_count(), 8);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
20 * 1024, module.get()));
EXPECT_FALSE(changed);
EXPECT_EQ(computation->instruction_count(), 8);
}
TEST_F(RecomputeAndCompressHloRematerializationTest, RematerializeAroundWhile) {
auto module = CreateNewVerifiedModule();
auto cond_builder = HloComputation::Builder(TestName() + ".cond");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1_shape_, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloComputation* while_cond =
module->AddEmbeddedComputation(cond_builder.Build());
HloComputation* body_computation = module->AddEmbeddedComputation(
MakeRematerializableComputation(".body"));
HloComputation* entry_computation =
module->AddEntryComputation(MakeRematerializableWhileComputation(
while_cond, body_computation));
EXPECT_EQ(entry_computation->instruction_count(), 7);
EXPECT_EQ(body_computation->instruction_count(), 8);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
17 * 1024, module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(entry_computation->instruction_count(), 8);
EXPECT_EQ(body_computation->instruction_count(), 8);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest,
RematerializeEntryAndWhileBody) {
auto module = CreateNewVerifiedModule();
auto cond_builder = HloComputation::Builder(TestName() + ".cond");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1_shape_, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloComputation* while_cond =
module->AddEmbeddedComputation(cond_builder.Build());
HloComputation* body_computation = module->AddEmbeddedComputation(
MakeRematerializableComputation(".body"));
HloComputation* entry_computation =
module->AddEntryComputation(MakeRematerializableWhileComputation(
while_cond, body_computation));
EXPECT_EQ(entry_computation->instruction_count(), 7);
EXPECT_EQ(body_computation->instruction_count(), 8);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
15 * 1024, module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(entry_computation->instruction_count(), 9);
EXPECT_EQ(body_computation->instruction_count(), 9);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest,
RematerializeNestedComputations) {
auto module = CreateNewVerifiedModule();
auto cond_builder = HloComputation::Builder(TestName() + ".cond");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1_shape_, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloComputation* while_cond =
module->AddEmbeddedComputation(cond_builder.Build());
HloComputation* while_cond_copy =
module->AddEmbeddedComputation(while_cond->Clone());
HloComputation* inner_computation = module->AddEmbeddedComputation(
MakeRematerializableComputation(".inner"));
HloComputation* middle_computation =
module->AddEmbeddedComputation(MakeRematerializableWhileComputation(
while_cond, inner_computation,
".middle"));
HloComputation* entry_computation =
module->AddEntryComputation(MakeRematerializableWhileComputation(
while_cond_copy, middle_computation));
EXPECT_EQ(entry_computation->instruction_count(), 7);
EXPECT_EQ(middle_computation->instruction_count(), 7);
EXPECT_EQ(inner_computation->instruction_count(), 8);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
13 * 1024, module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(entry_computation->instruction_count(), 9);
EXPECT_EQ(middle_computation->instruction_count(), 9);
EXPECT_EQ(inner_computation->instruction_count(), 9);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest, RngNotRematerialized) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param"));
auto rng = builder.AddInstruction(HloInstruction::CreateRng(
vec1024_shape_, RandomDistribution::RNG_UNIFORM, {param, param}));
auto tanh = builder.AddInstruction(
HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kTanh, rng));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kExp, rng));
auto add_0 = builder.AddInstruction(
HloInstruction::CreateBinary(vec1024_shape_, HloOpcode::kAdd, rng, tanh));
auto add_1 = builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, rng,
builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, exp, add_0))));
builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, rng,
builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, tanh, add_1))));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
auto count_rngs = [](const HloComputation* computation) {
int64_t rng_count = 0;
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kRng) {
++rng_count;
}
}
return rng_count;
};
ASSERT_EQ(count_rngs(entry_computation), 1);
const int64_t original_instruction_count =
entry_computation->instruction_count();
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
RunHloRematerialization(
4 * ByteSizeOf(vec1024_shape_), module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(count_rngs(entry_computation), 1);
EXPECT_GT(entry_computation->instruction_count(), original_instruction_count);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest,
InstructionRematerializedMultipleTimes) {
auto module = CreateNewVerifiedModule();
HloComputation* subcomputation = nullptr;
{
auto builder = HloComputation::Builder(TestName() + ".subcomputation");
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1024_shape_, "param"));
auto concat = builder.AddInstruction(HloInstruction::CreateConcatenate(
ShapeUtil::MakeShape(xla::F32, {2048}), {param, param},
0));
builder.AddInstruction(HloInstruction::CreateSlice(
vec1024_shape_, concat, {0},
{1024}, {1}));
subcomputation = module->AddEmbeddedComputation(builder.Build());
}
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param"));
auto bcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(vec1024_shape_, param, {}));
auto add_1 = builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, bcast, bcast));
auto call_1 = builder.AddInstruction(
HloInstruction::CreateCall(vec1024_shape_, {add_1}, subcomputation));
auto add_2 = builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, bcast, call_1));
auto call_2 = builder.AddInstruction(
HloInstruction::CreateCall(vec1024_shape_, {add_2}, subcomputation));
auto add_3 = builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, bcast, call_2));
auto call_3 = builder.AddInstruction(
HloInstruction::CreateCall(vec1024_shape_, {add_3}, subcomputation));
auto add_4 = builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, bcast, call_3));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
auto count_broadcasts = [](const HloComputation* computation) {
int64_t bcast_count = 0;
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kBroadcast) {
bcast_count++;
}
}
return bcast_count;
};
EXPECT_EQ(count_broadcasts(entry_computation), 1);
EXPECT_EQ(entry_computation->instruction_count(), 9);
EXPECT_EQ(add_2->operand(0), bcast);
EXPECT_EQ(add_3->operand(0), bcast);
EXPECT_EQ(add_4->operand(0), bcast);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
22 * 1024, module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(count_broadcasts(entry_computation), 4);
EXPECT_EQ(entry_computation->instruction_count(), 12);
EXPECT_NE(add_2->operand(0), bcast);
EXPECT_THAT(add_2->operand(0), op::Broadcast(param));
EXPECT_NE(add_3->operand(0), bcast);
EXPECT_THAT(add_3->operand(0), op::Broadcast(param));
EXPECT_NE(add_4->operand(0), bcast);
EXPECT_THAT(add_4->operand(0), op::Broadcast(param));
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest, CopyNotRematerialized) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1024_shape_, "param"));
auto copy = builder.AddInstruction(
HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kCopy, param));
auto negate_a_1 = builder.AddInstruction(
HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kNegate, copy));
auto negate_a_2 = builder.AddInstruction(HloInstruction::CreateUnary(
vec1024_shape_, HloOpcode::kNegate, negate_a_1));
auto negate_b_1 = builder.AddInstruction(
HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kNegate, copy));
auto negate_b_2 = builder.AddInstruction(HloInstruction::CreateUnary(
vec1024_shape_, HloOpcode::kNegate, negate_b_1));
builder.AddInstruction(HloInstruction::CreateTuple({negate_a_2, negate_b_2}));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
1 * 1024, module.get()));
auto count_copies = [](const HloComputation* computation) {
int64_t copy_count = 0;
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kCopy) {
copy_count++;
}
}
return copy_count;
};
EXPECT_TRUE(changed);
EXPECT_EQ(count_copies(entry_computation), 1);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest, ThroughBitcastRemat) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
ENTRY %mycomp (param: f32[1]) -> f32[1] {
%param = f32[1]{0} parameter(0)
%reshape = f32[] reshape(f32[1]{0} %param)
%broadcast = f32[1024,1]{1,0} broadcast(f32[] %reshape), dimensions={}
%bitcast = f32[1024]{0} bitcast(f32[1024,1]{1,0} %broadcast)
%negate = f32[1024,1]{1,0} negate(f32[1024,1]{1,0} %broadcast)
%concatenate = f32[2048,1]{1,0} concatenate(f32[1024,1]{1,0} %negate, f32[1024,1]{1,0} %negate), dimensions={0}
%slice = f32[1,1]{1,0} slice(f32[2048,1]{1,0} %concatenate), slice={[0:1], [0:1]}
%bitcast.1 = f32[1]{0} bitcast(f32[1,1]{1,0} %slice)
%concatenate.1 = f32[1025]{0} concatenate(f32[1024]{0} %bitcast, f32[1]{0} %bitcast.1), dimensions={0}
ROOT %slice.1 = f32[1]{0} slice(f32[1025]{0} %concatenate.1), slice={[0:1]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto* computation = module->entry_computation();
const HloInstruction* slice = computation->root_instruction();
ASSERT_THAT(slice,
op::Slice(op::Concatenate(op::Bitcast(op::Broadcast(_)), _)));
const HloInstruction* concat = slice->operand(0);
const HloInstruction* bcast = concat->operand(0)->operand(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
14 * 1024, module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(computation->root_instruction(), slice);
const HloInstruction* remat_bitcast = concat->operand(0);
const HloInstruction* remat_broadcast = remat_bitcast->operand(0);
EXPECT_THAT(remat_broadcast, op::Broadcast(::testing::Ne(bcast)));
EXPECT_EQ(module->schedule()
.sequence(computation)
.instructions()[computation->instruction_count() - 2],
concat);
EXPECT_EQ(module->schedule()
.sequence(computation)
.instructions()[computation->instruction_count() - 3],
remat_bitcast);
EXPECT_EQ(module->schedule()
.sequence(computation)
.instructions()[computation->instruction_count() - 4],
remat_broadcast);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest,
ThroughBitcastRematInfiniteLoop) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
ENTRY %mycomp (param: f32[1]) -> f32[1024] {
%param = f32[1]{0} parameter(0)
%reshape = f32[] reshape(f32[1]{0} %param)
%broadcast = f32[1024,1]{1,0} broadcast(f32[] %reshape), dimensions={}
%bitcast = f32[1024]{0} bitcast(f32[1024,1]{1,0} %broadcast)
%broadcast2 = f32[1024,1]{1,0} broadcast(f32[] %reshape), dimensions={}
%bitcast2 = f32[1024]{0} bitcast(f32[1024,1]{1,0} %broadcast2)
ROOT %add = f32[1024]{0} add(f32[1024]{0} %bitcast, f32[1024]{0} %bitcast2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto* computation = module->entry_computation();
const HloInstruction* add = computation->root_instruction();
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
1024, module.get()));
ASSERT_THAT(add, op::Add(op::Bitcast(op::Broadcast(_)),
op::Bitcast(op::Broadcast(_))));
EXPECT_TRUE(changed);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest, RematTupleShape) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
%add_mul_comp {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
%x = f32[1024]{0} broadcast(f32[] %p0), dimensions={}
%y = f32[1024]{0} broadcast(f32[] %p1), dimensions={}
%add = f32[1024] add(%x, %y)
%mul = f32[1024] multiply(%x, %y)
ROOT %out = (f32[1024], f32[1024]) tuple(%add, %mul)
}
ENTRY %entry {
%param.0 = f32[] parameter(0)
%param.1 = f32[] parameter(1)
%fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop,
calls=%add_mul_comp
%gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0
%add = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.1)
%broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={}
%mul = f32[1024]{0} multiply(f32[1024]{0} %add, f32[1024]{0} %broadcast.1)
%gte.2 = f32[1024]{0} get-tuple-element(%fus), index=1
ROOT %add.2 = f32[1024]{0} add(f32[1024]{0} %mul, f32[1024]{0} %gte.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloComputation* computation = module->entry_computation();
const HloInstruction* add = computation->root_instruction();
ASSERT_THAT(add, op::Add(op::Multiply(), op::GetTupleElement(op::Fusion())));
const HloInstruction* fusion = add->operand(0)->operand(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
11 * 1024, module.get()));
EXPECT_TRUE(changed);
ASSERT_THAT(
add, op::Add(op::Multiply(), AllOf(op::Fusion(), ::testing::Ne(fusion))));
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest, RematTupleShapeDoubleUse) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
%add_mul_comp {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
%x = f32[1024]{0} broadcast(f32[] %p0), dimensions={}
%y = f32[1024]{0} broadcast(f32[] %p1), dimensions={}
%add = f32[1024] add(%x, %y)
%mul = f32[1024] multiply(%x, %y)
ROOT %out = (f32[1024], f32[1024]) tuple(%add, %mul)
}
ENTRY %entry {
%param.0 = f32[] parameter(0)
%param.1 = f32[] parameter(1)
%fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop,
calls=%add_mul_comp
%gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0
%add = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.1)
%broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={}
%mul = f32[1024]{0} multiply(f32[1024]{0} %add, f32[1024]{0} %broadcast.1)
%gte.2 = f32[1024]{0} get-tuple-element(%fus), index=1
%gte.3 = f32[1024]{0} get-tuple-element(%fus), index=0
%add.2 = f32[1024]{0} add(f32[1024]{0} %mul, f32[1024]{0} %gte.2)
ROOT %mul.2 = f32[1024]{0} multiply(f32[1024]{0} %add.2, f32[1024]{0} %gte.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloComputation* computation = module->entry_computation();
const HloInstruction* add = computation->root_instruction();
ASSERT_THAT(add, op::Multiply(op::Add(op::Multiply(),
op::GetTupleElement(op::Fusion())),
op::GetTupleElement(op::Fusion())));
const HloInstruction* fusion = add->operand(0)->operand(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
11 * 1024, module.get()));
EXPECT_TRUE(changed);
ASSERT_THAT(
add,
op::Multiply(
op::Add(op::Multiply(), op::GetTupleElement(AllOf(
op::Fusion(), ::testing::Ne(fusion)))),
op::GetTupleElement(AllOf(op::Fusion(), ::testing::Ne(fusion)))));
EXPECT_EQ(add->operand(0)->operand(1)->operand(0),
add->operand(1)->operand(0));
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest,
RematTupleShapeThroughBitcasts) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
%add_mul_comp {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
%x = f32[1024]{0} broadcast(f32[] %p0), dimensions={}
%y = f32[1024]{0} broadcast(f32[] %p1), dimensions={}
%add = f32[1024] add(%x, %y)
%mul = f32[1024] multiply(%x, %y)
ROOT %out = (f32[1024], f32[1024]) tuple(%add, %mul)
}
ENTRY %entry {
%param.0 = f32[] parameter(0)
%param.1 = f32[] parameter(1)
%fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop,
calls=%add_mul_comp
%gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0
%add = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.1)
%broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={}
%mul = f32[1024]{0} multiply(f32[1024]{0} %add, f32[1024]{0} %broadcast.1)
%gte.2 = f32[1024]{0} get-tuple-element(%fus), index=1
%bc.1 = f32[1024,1]{0,1} bitcast(%mul)
%bc.2 = f32[1024,1]{0,1} bitcast(%gte.2)
ROOT %add.2 = f32[1024,1]{0,1} add(f32[1024,1]{0,1} %bc.1,
f32[1024,1]{0,1} %bc.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloComputation* computation = module->entry_computation();
const HloInstruction* add = computation->root_instruction();
ASSERT_THAT(add, op::Add(op::Bitcast(op::Multiply()),
op::Bitcast(op::GetTupleElement(op::Fusion()))));
const HloInstruction* fusion = add->operand(0)->operand(0)->operand(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
11 * 1024, module.get()));
EXPECT_TRUE(changed);
ASSERT_THAT(add,
op::Add(op::Bitcast(op::Multiply()),
op::Bitcast(AllOf(op::Fusion(), ::testing::Ne(fusion)))));
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest, RematThroughTuple) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
%add_mul_comp {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
%x = f32[1024]{0} broadcast(f32[] %p0), dimensions={}
%y = f32[1024]{0} broadcast(f32[] %p1), dimensions={}
%add = f32[1024] add(%x, %y)
%mul = f32[1024] multiply(%x, %y)
ROOT %out = (f32[1024], f32[1024]) tuple(%add, %mul)
}
ENTRY %entry {
%param.0 = f32[] parameter(0)
%param.1 = f32[] parameter(1)
%fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop,
calls=%add_mul_comp
%gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0
%gte.3 = f32[1024]{0} get-tuple-element(%fus), index=1
%add = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.3)
%broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={}
%mul = f32[1024]{0} multiply(f32[1024]{0} %add, f32[1024]{0} %broadcast.1)
%tpl = (f32[1024]{0}, f32[1024]{0}) tuple(%gte.1, %add)
%bc.1 = f32[1024,1]{0,1} bitcast(%mul)
%gte.2 = f32[1024]{0} get-tuple-element(%tpl), index=0
ROOT %add.2 = f32[1024]{0} add(f32[1024]{0} %gte.2, f32[1024]{0} %add)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloComputation* computation = module->entry_computation();
const HloInstruction* add = computation->root_instruction();
ASSERT_THAT(add, op::Add(op::GetTupleElement(
op::Tuple(op::GetTupleElement(op::Fusion()), _)),
op::Add()));
const HloInstruction* tuple = add->operand(0)->operand(0);
const HloInstruction* fusion = tuple->operand(0)->operand(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
11 * 1024, module.get()));
EXPECT_TRUE(changed);
ASSERT_THAT(add, op::Add(AllOf(op::Fusion(), ::testing::Ne(tuple),
::testing::Ne(fusion)),
op::Add()));
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest, AllGatherChannelId) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
ENTRY %mycomp (param: f32[1]) -> f32[1] {
%param = f32[1]{0} parameter(0)
%reshape = f32[] reshape(f32[1]{0} %param)
%broadcast = f32[256,1]{1,0} broadcast(f32[] %reshape), dimensions={}
%ag = f32[1024,1]{1,0} all-gather(f32[256,1]{1,0} %broadcast), dimensions={0},
channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true
%bitcast = f32[1024]{0} bitcast(f32[1024,1]{1,0} %ag)
%negate = f32[1024,1]{1,0} negate(f32[1024,1]{1,0} %ag)
%concatenate = f32[2048,1]{1,0} concatenate(f32[1024,1]{1,0} %negate,
f32[1024,1]{1,0} %negate), dimensions={0}
%slice = f32[1,1]{1,0} slice(f32[2048,1]{1,0} %concatenate),
slice={[0:1], [0:1]}
%bitcast.1 = f32[1]{0} bitcast(f32[1,1]{1,0} %slice)
%concatenate.1 = f32[1025]{0} concatenate(f32[1024]{0} %bitcast,
f32[1]{0} %bitcast.1), dimensions={0}
ROOT %slice.1 = f32[1]{0} slice(f32[1025]{0} %concatenate.1), slice={[0:1]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto* computation = module->entry_computation();
const HloInstruction* slice = computation->root_instruction();
ASSERT_THAT(slice, op::Slice(op::Concatenate(
op::Bitcast(op::AllGather(op::Broadcast(_))), _)));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
14 * 1024, module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(computation->root_instruction(), slice);
const HloInstruction* original_ag = FindInstruction(module.get(), "ag");
const HloInstruction* remat_ag = FindInstruction(module.get(), "ag.remat");
EXPECT_NE(remat_ag, nullptr);
EXPECT_TRUE(original_ag->channel_id().has_value());
EXPECT_TRUE(remat_ag->channel_id().has_value());
EXPECT_EQ(*remat_ag->channel_id(), *original_ag->channel_id() + 1);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest, RematTupleArgFusion) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
%add_mul_comp {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
%x = f32[1024]{0} broadcast(f32[] %p0), dimensions={}
%y = f32[1024]{0} broadcast(f32[] %p1), dimensions={}
%add = f32[1024] add(%x, %y)
%mul = f32[1024] multiply(%x, %y)
ROOT %out = (f32[1024], f32[1024]) tuple(%add, %mul)
}
%add_comp {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
ROOT %add = add(%p0, %p1)
}
%add_tuple_comp {
%p = (f32[1024]{0}, f32[1024]{0}) parameter(0)
%p0 = get-tuple-element(%p), index=0
%p1 = get-tuple-element(%p), index=1
ROOT %add = add(%p0, %p1)
}
ENTRY %entry {
%param.0 = f32[] parameter(0)
%param.1 = f32[] parameter(1)
%fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop,
calls=%add_mul_comp
%gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0
%gte.3 = f32[1024]{0} get-tuple-element(%fus), index=1
%add.0 = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.3)
%broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={}
%add.1 = f32[1024]{0} add(f32[1024]{0} %add.0, f32[1024]{0} %broadcast.1)
%c = f32[] constant(0)
%reduce = f32[] reduce(%add.1, %c), dimensions={0}, to_apply=add_comp
%fus.1 = f32[1024]{0} fusion(%fus), kind=kLoop, calls=%add_tuple_comp
ROOT %tuple = tuple(%reduce, %fus.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloComputation* computation = module->entry_computation();
const HloInstruction* root = computation->root_instruction();
ASSERT_THAT(root, op::Tuple(op::Reduce(), op::Fusion(op::Fusion())));
const HloInstruction* fusion1 = root->operand(1);
const HloInstruction* fusion0 = fusion1->operand(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
11 * 1024, module.get()));
EXPECT_TRUE(changed);
ASSERT_THAT(
root, op::Tuple(op::Reduce(),
op::Fusion(AllOf(op::Fusion(), ::testing::Ne(fusion0)))));
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest,
RematFusionUpdateSchedule) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
%custom_call_comp {
%p = f32[1024]{0} parameter(0)
ROOT %n = f32[1024]{0} negate(p)
}
%add_mul_comp {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
%x = f32[1024]{0} broadcast(f32[] %p0), dimensions={}
%y = f32[1024]{0} broadcast(f32[] %p1), dimensions={}
%add = f32[1024] add(%x, %y)
%mul = f32[1024] multiply(%x, %y)
%c = f32[1024] custom-call(%mul), custom_call_target="SomeCall", called_computations={custom_call_comp}
ROOT %out = (f32[1024], f32[1024]) tuple(%add, %c)
}
ENTRY %entry {
%param.0 = f32[] parameter(0)
%param.1 = f32[] parameter(1)
%fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop,
calls=%add_mul_comp
%gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0
%add = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.1)
%broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={}
%mul = f32[1024]{0} multiply(f32[1024]{0} %add, f32[1024]{0} %broadcast.1)
%gte.2 = f32[1024]{0} get-tuple-element(%fus), index=1
%gte.3 = f32[1024]{0} get-tuple-element(%fus), index=0
%add.2 = f32[1024]{0} add(f32[1024]{0} %mul, f32[1024]{0} %gte.2)
ROOT %mul.2 = f32[1024]{0} multiply(f32[1024]{0} %add.2, f32[1024]{0} %gte.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloComputation* computation = module->entry_computation();
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
11 * 1024, module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* add = computation->root_instruction();
const HloInstruction* fusion = add->operand(0)->operand(0);
ASSERT_THAT(
add,
op::Multiply(
op::Add(op::Multiply(), op::GetTupleElement(AllOf(
op::Fusion(), ::testing::Ne(fusion)))),
op::GetTupleElement(AllOf(op::Fusion(), ::testing::Ne(fusion)))));
const HloInstruction* fusion0 = add->operand(0)->operand(1)->operand(0);
const HloInstruction* fusion1 = add->operand(1)->operand(0);
auto it = std::find_if(fusion0->fused_instructions().begin(),
fusion0->fused_instructions().end(),
[](const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kCustomCall;
});
ASSERT_NE(it, fusion0->fused_instructions().end());
auto it2 = std::find_if(fusion1->fused_instructions().begin(),
fusion1->fused_instructions().end(),
[](const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kCustomCall;
});
ASSERT_NE(it2, fusion1->fused_instructions().end());
EXPECT_TRUE(module->schedule().is_computation_scheduled(
(*it)->called_computations()[0]));
EXPECT_TRUE(module->schedule().is_computation_scheduled(
(*it2)->called_computations()[0]));
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
class CompressingRematerializationTest : public RematerializationTestBase {
protected:
static int64_t ShapeSizePadMinorTo64(const Shape& shape) {
if (shape.IsTuple()) {
return 4;
}
Shape descending_shape =
ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(shape);
int64_t size =
ShapeUtil::ByteSizeOfPrimitiveType(descending_shape.element_type());
for (int64_t i = 0; i < descending_shape.rank(); ++i) {
int64_t dim = descending_shape.dimensions(i);
if (i == descending_shape.rank() - 1) {
dim = RoundUpTo<int64_t>(dim, 64);
}
size *= dim;
}
return size;
}
static absl::StatusOr<Shape> ChooseCompactLayoutForShape(const Shape& shape) {
if (shape.rank() != 2) {
return shape;
}
Shape result = shape;
Layout layout = result.layout();
int64_t most_minor_index = layout.minor_to_major()[0];
int64_t second_minor_index = layout.minor_to_major()[1];
int64_t most_minor = result.dimensions(most_minor_index);
int64_t second_minor = result.dimensions(second_minor_index);
if (most_minor < second_minor) {
Layout new_layout = layout;
new_layout.set_minor_to_major(0, second_minor_index);
new_layout.set_minor_to_major(1, most_minor_index);
*result.mutable_layout() = new_layout;
}
return result;
}
absl::StatusOr<bool> RunHloRematerialization(int64_t memory_limit_bytes,
HloModule* module,
int64_t min_remat_size = 0) {
TF_EXPECT_OK(verifier().Run(module).status());
HloRematerialization::RematerializationModeConfig config(
false, true, false);
auto shape_size_func = [](const Shape& shape) {
return ShapeSizePadMinorTo64(shape);
};
HloCostAnalysis cost_analysis(shape_size_func);
HloRematerialization::Options options(
cost_analysis, config, memory_limit_bytes,
1, 1,
min_remat_size, ChooseCompactLayoutForShape,
std::nullopt,
{});
HloRematerialization::RematerializationSizes sizes;
HloRematerialization remat(options, sizes);
return remat.Run(module);
}
};
TEST_F(CompressingRematerializationTest, OnlyRematBigBuffer) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
%add_float {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
ENTRY %entry {
%param.0 = f32[] parameter(0)
%constant = f32[] constant(0)
%broadcast.0 = f32[64,2]{1,0} broadcast(f32[] %param.0), dimensions={}
%broadcast.1 = f32[10,2]{1,0} broadcast(f32[] %param.0), dimensions={}
%negate = f32[64,2]{1,0} negate(f32[64,2]{1,0} broadcast.0)
%reduce.0 = f32[] reduce(f32[64,2]{1,0} %negate, f32[] %constant), dimensions={1, 0}, to_apply=%add_float
%reduce.1 = f32[] reduce(f32[64,2]{1,0} %broadcast.0, f32[] %constant), dimensions={1, 0}, to_apply=%add_float
%reduce.2 = f32[] reduce(f32[10,2]{1,0} %broadcast.1, f32[] %constant), dimensions={1, 0}, to_apply=%add_float
%add = f32[] add(f32[] %reduce.0, f32[] %reduce.1)
ROOT %add.2 = f32[] add(f32[] %add, f32[] %reduce.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization(
30 * 1024,
module.get(), 10 * 1024));
EXPECT_TRUE(changed);
HloInstruction* broadcast =
module->entry_computation()->GetInstructionWithName("broadcast.0");
HloInstruction* broadcast_2 =
module->entry_computation()->GetInstructionWithName("broadcast.1");
HloInstruction* reduce =
module->entry_computation()->GetInstructionWithName("reduce.1");
HloInstruction* reduce_2 =
module->entry_computation()->GetInstructionWithName("reduce.2");
EXPECT_THAT(reduce,
op::Reduce(op::Copy(op::Copy(broadcast)), op::Constant()));
EXPECT_THAT(reduce_2, op::Reduce(broadcast_2, op::Constant()));
}
TEST_F(CompressingRematerializationTest, SingleRemat) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
%add_float {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
ENTRY %entry {
%param.0 = f32[] parameter(0)
%constant = f32[] constant(0)
%broadcast.0 = f32[64,2]{1,0} broadcast(f32[] %param.0), dimensions={}
%negate = f32[64,2]{1,0} negate(f32[64,2]{1,0} broadcast.0)
%reduce.0 = f32[] reduce(f32[64,2]{1,0} %negate, f32[] %constant), dimensions={1, 0}, to_apply=%add_float
%reduce.1 = f32[] reduce(f32[64,2]{1,0} %broadcast.0, f32[] %constant), dimensions={1, 0}, to_apply=%add_float
%add = f32[] add(f32[] %reduce.0, f32[] %reduce.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
30 * 1024, module.get()));
EXPECT_TRUE(changed);
HloInstruction* broadcast =
module->entry_computation()->GetInstructionWithName("broadcast.0");
HloInstruction* reduce =
module->entry_computation()->GetInstructionWithName("reduce.1");
EXPECT_THAT(reduce,
op::Reduce(op::Copy(op::Copy(broadcast)), op::Constant()));
}
TEST_F(CompressingRematerializationTest, AvoidPathologicalCompress) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
%add_float {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
ENTRY %entry {
%param.0 = f32[] parameter(0)
%constant = f32[] constant(0)
%broadcast.0 = f32[63,60]{1,0} broadcast(f32[] %param.0), dimensions={}
%broadcast.1 = f32[16,64]{1,0} broadcast(f32[] %param.0), dimensions={}
%reduce.0 = f32[] reduce(%broadcast.1, f32[] %constant), dimensions={1, 0}, to_apply=%add_float
%reduce.1 = f32[] reduce(%broadcast.0, f32[] %constant), dimensions={1, 0}, to_apply=%add_float
%add = f32[] add(f32[] %reduce.0, f32[] %reduce.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
16 * 1024, module.get()));
EXPECT_FALSE(changed);
HloInstruction* broadcast =
module->entry_computation()->GetInstructionWithName("broadcast.0");
HloInstruction* reduce =
module->entry_computation()->GetInstructionWithName("reduce.1");
EXPECT_THAT(reduce, op::Reduce(broadcast, op::Constant()));
}
TEST_F(CompressingRematerializationTest, AllUsersUseSameCopy) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
%add_float {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
ENTRY %entry {
%param.0 = f32[] parameter(0)
%constant = f32[] constant(0)
%broadcast.0 = f32[64,2]{1,0} broadcast(f32[] %param.0), dimensions={}
%negate = f32[64,2]{1,0} negate(f32[64,2]{1,0} broadcast.0)
%reduce.0 = f32[] reduce(f32[64,2]{1,0} %negate, f32[] %constant), dimensions={1, 0}, to_apply=%add_float
%reduce.1 = f32[] reduce(f32[64,2]{1,0} %negate, f32[] %constant), dimensions={1, 0}, to_apply=%add_float
%reduce.2 = f32[] reduce(f32[64,2]{1,0} %broadcast.0, f32[] %constant), dimensions={1, 0}, to_apply=%add_float
%add = f32[] add(f32[] %reduce.0, f32[] %reduce.1)
%reduce.3 = f32[] reduce(f32[64,2]{1,0} %broadcast.0, f32[] %constant), dimensions={1, 0}, to_apply=%add_float
%add.2 = f32[] add(f32[] %reduce.2, f32[] %reduce.3)
ROOT %tuple = (f32[], f32[]) tuple (f32[] add, f32[] add.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
30 * 1024, module.get()));
EXPECT_TRUE(changed);
HloInstruction* broadcast =
module->entry_computation()->GetInstructionWithName("broadcast.0");
HloInstruction* reduce_2 =
module->entry_computation()->GetInstructionWithName("reduce.2");
HloInstruction* reduce_3 =
module->entry_computation()->GetInstructionWithName("reduce.3");
EXPECT_THAT(reduce_2,
op::Reduce(op::Copy(op::Copy(broadcast)), op::Constant()));
EXPECT_THAT(reduce_3,
op::Reduce(op::Copy(op::Copy(broadcast)), op::Constant()));
}
class OffloadingRematerializationTest : public RematerializationTestBase {
protected:
absl::StatusOr<bool> RunHloRematerialization(int64_t memory_limit_bytes,
HloModule* module,
int64_t min_remat_size = 0) {
TF_EXPECT_OK(verifier().Run(module).status());
if (!module->has_schedule()) {
HloMemoryScheduler scheduler(
[](const BufferValue& buffer) { return ByteSizeOf(buffer.shape()); },
ComputationSchedulerToModuleScheduler(DefaultMemoryScheduler));
TF_EXPECT_OK(scheduler.Run(module).status());
}
HloCostAnalysis::Options hlo_cost_analysis_options;
hlo_cost_analysis_options.shape_size = [](const Shape& shape) {
return ByteSizeOf(shape);
};
hlo_cost_analysis_options.set_flops_per_second(flops_per_second_);
hlo_cost_analysis_options.set_transcendentals_per_second(
transcendentals_per_second_);
HloCostAnalysis cost_analysis(hlo_cost_analysis_options);
HloRematerialization::RematerializationModeConfig config(
false, false, true);
HloRematerialization::HostMemoryOffloadConfig host_memory_offload_config(
kHostMemorySpaceColor, copy_to_host_speed_, copy_from_host_speed_);
HloRematerialization::Options options(
cost_analysis, config, memory_limit_bytes,
1, 1,
min_remat_size, nullptr,
host_memory_offload_config,
{});
HloRematerialization::RematerializationSizes sizes;
HloRematerialization remat(options, sizes);
return remat.Run(module);
}
void SetCopyToHostSpeed(float val) { copy_to_host_speed_ = val; }
void SetCopyFromHostSpeed(float val) { copy_from_host_speed_ = val; }
void SetFlopsPerSecond(float val) { flops_per_second_ = val; }
void SetTranscendentalsPerSecond(float val) {
transcendentals_per_second_ = val;
}
static constexpr const int64_t kHostMemorySpaceColor{5};
private:
float copy_to_host_speed_{1.0f};
float copy_from_host_speed_{1.0f};
float flops_per_second_{1.0f};
float transcendentals_per_second_{1.0f};
};
TEST_F(OffloadingRematerializationTest, BasicSuccessfulHostOffload) {
const std::string& hlo_string = R"(
HloModule MyModule, is_scheduled=true, entry_computation_layout={(f32[1024]{0}, f32[1024]{0})->f32[1024]{0}}
ENTRY MyModule {
param_0 = f32[1024]{0} parameter(0)
param_1 = f32[1024]{0} parameter(1)
res_3 = f32[1024]{0} add(param_0, param_1)
res_4 = f32[1024]{0} tanh(res_3)
res_5 = f32[1024]{0} tanh(res_4)
res_6 = f32[1024]{0} tanh(res_5)
res_7 = f32[1024]{0} add(res_6, res_6)
res_8 = f32[1024]{0} add(res_7, res_5)
res_9 = f32[1024]{0} add(res_8, res_4)
res_10 = f32[1024]{0} add(res_9, res_3)
ROOT res_11 = f32[1024]{0} tanh(res_10)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
SetCopyToHostSpeed(4.0 * 1024);
SetCopyFromHostSpeed(4.0 * 1024);
SetFlopsPerSecond(2 * 1024);
SetTranscendentalsPerSecond(2 * 1024);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
10 * 1024, module.get()));
ASSERT_TRUE(changed);
ASSERT_TRUE(module->has_schedule());
auto res_3_matcher = op::Add(op::Parameter(), op::Parameter());
auto res_3_rematted_matcher = op::AsyncCopy(
xla::Layout::kDefaultMemorySpace, kHostMemorySpaceColor,
op::AsyncCopy(kHostMemorySpaceColor, xla::Layout::kDefaultMemorySpace,
res_3_matcher));
auto res_4_matcher = op::Tanh(res_3_matcher);
auto res_4_rematted_matcher = op::AsyncCopy(
xla::Layout::kDefaultMemorySpace, kHostMemorySpaceColor,
op::AsyncCopy(kHostMemorySpaceColor, xla::Layout::kDefaultMemorySpace,
res_4_matcher));
auto res_5_matcher = op::Tanh(res_4_matcher);
auto res_6_matcher = op::Tanh(res_5_matcher);
auto res_7_matcher = op::Add(res_6_matcher, res_6_matcher);
auto res_8_matcher = op::Add(res_7_matcher, res_5_matcher);
auto res_9_matcher = op::Add(res_8_matcher, res_4_rematted_matcher);
auto res_10_matcher = op::Add(res_9_matcher, res_3_rematted_matcher);
const auto instruction_sequence =
module->schedule().sequence(module->entry_computation());
ASSERT_THAT(instruction_sequence.instructions().back(),
op::Tanh(res_10_matcher));
}
TEST_F(OffloadingRematerializationTest, SkipOffloadWhenBitcastIsInvolved) {
const std::string& hlo_string = R"(
HloModule MyModule, is_scheduled=true, entry_computation_layout={(f32[1024]{0}, f32[1024]{0})->f32[1024]{0}}
ENTRY MyModule {
param_0 = f32[1024]{0} parameter(0)
param_1 = f32[1024]{0} parameter(1)
res_3 = f32[1024]{0} add(param_0, param_1)
bitcast = f32[1024]{0} bitcast(res_3)
res_4 = f32[1024]{0} tanh(res_3)
res_5 = f32[1024]{0} tanh(res_4)
res_6 = f32[1024]{0} tanh(res_5)
res_7 = f32[1024]{0} add(res_6, res_6)
res_8 = f32[1024]{0} add(res_7, res_5)
res_9 = f32[1024]{0} add(res_8, res_4)
res_10 = f32[1024]{0} add(res_9, bitcast)
ROOT res_11 = f32[1024]{0} tanh(res_10)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
SetCopyToHostSpeed(4.0 * 1024);
SetCopyFromHostSpeed(4.0 * 1024);
SetFlopsPerSecond(2 * 1024);
SetTranscendentalsPerSecond(2 * 1024);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
10 * 1024, module.get()));
ASSERT_TRUE(changed);
ASSERT_TRUE(module->has_schedule());
auto res_3_matcher = op::Add(op::Parameter(), op::Parameter());
auto res_4_matcher = op::Tanh(res_3_matcher);
auto res_4_rematted_matcher = op::AsyncCopy(
xla::Layout::kDefaultMemorySpace, kHostMemorySpaceColor,
op::AsyncCopy(kHostMemorySpaceColor, xla::Layout::kDefaultMemorySpace,
res_4_matcher));
auto res_5_matcher = op::Tanh(res_4_matcher);
auto res_6_matcher = op::Tanh(res_5_matcher);
auto res_7_matcher = op::Add(res_6_matcher, res_6_matcher);
auto res_8_matcher = op::Add(res_7_matcher, res_5_matcher);
auto res_9_matcher = op::Add(res_8_matcher, res_4_rematted_matcher);
auto res_10_matcher = op::Add(res_9_matcher, op::Bitcast(res_3_matcher));
const auto instruction_sequence =
module->schedule().sequence(module->entry_computation());
ASSERT_THAT(instruction_sequence.instructions().back(),
op::Tanh(res_10_matcher));
}
class IndirectUseTest : public RecomputeAndCompressHloRematerializationTest,
public ::testing::WithParamInterface<bool> {};
TEST_P(IndirectUseTest, IndirectUseRematerialized) {
const bool indirectly_used = GetParam();
auto module = CreateNewVerifiedModule();
HloComputation* subcomputation = nullptr;
{
auto builder = HloComputation::Builder(TestName() + ".subcomputation");
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1024_shape_, "param"));
auto concat = builder.AddInstruction(HloInstruction::CreateConcatenate(
ShapeUtil::MakeShape(xla::F32, {2048}), {param, param},
0));
builder.AddInstruction(HloInstruction::CreateSlice(
vec1024_shape_, concat, {0},
{1024}, {1}));
subcomputation = module->AddEmbeddedComputation(builder.Build());
}
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param"));
auto bcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(vec1024_shape_, param, {}));
auto add_1 = builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, bcast, bcast));
auto call_1 = builder.AddInstruction(
HloInstruction::CreateCall(vec1024_shape_, {add_1}, subcomputation));
auto add_2 = builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, bcast, call_1));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({bcast, add_2}));
auto gte = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
vec1024_shape_, tuple, indirectly_used ? 0 : 1));
builder.AddInstruction(
HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kNegate, gte));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_EQ(entry_computation->instruction_count(), 8);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
22 * 1024, module.get()));
if (indirectly_used) {
EXPECT_TRUE(changed);
EXPECT_EQ(entry_computation->instruction_count(), 3);
} else {
EXPECT_TRUE(changed);
EXPECT_EQ(entry_computation->instruction_count(), 9);
}
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
INSTANTIATE_TEST_SUITE_P(IndirectUseTestInstantiation, IndirectUseTest,
::testing::Values(true, false));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_rematerialization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_rematerialization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4c6920cc-3aba-4b4e-867d-febd43cab405 | cpp | tensorflow/tensorflow | llvm_compiler | third_party/xla/xla/service/llvm_compiler.cc | third_party/xla/xla/tests/llvm_compiler_test.cc | #include "xla/service/llvm_compiler.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_module_group.h"
#include "xla/service/executable.h"
#include "xla/service/stream_pool.h"
#include "tsl/platform/denormal.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/scoped_annotation.h"
#ifdef __FAST_MATH__
#error "Don't build XLA with -ffast-math"
#endif
namespace xla {
absl::StatusOr<std::vector<std::unique_ptr<Executable>>> LLVMCompiler::Compile(
std::unique_ptr<HloModuleGroup> module_group,
std::vector<std::vector<se::StreamExecutor*>> stream_execs,
const CompileOptions& options) {
tsl::port::ScopedDontFlushDenormal dont_flush_denormals;
std::vector<std::unique_ptr<Executable>> result;
std::vector<std::unique_ptr<HloModule>> modules =
module_group->ConsumeModules();
for (size_t i = 0; i < modules.size(); i++) {
tsl::profiler::ScopedAnnotation annotation{[&] {
return absl::StrFormat("XlaCompile:#module=%s,program_id=%d#",
modules[i]->name(), modules[i]->unique_id());
}};
TF_ASSIGN_OR_RETURN(modules[i], RunHloPasses(std::move(modules[i]),
stream_execs[i][0], options));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<Executable> executable,
RunBackend(std::move(modules[i]), stream_execs[i][0], options));
result.push_back(std::move(executable));
}
return std::move(result);
}
} | #include "xla/service/llvm_compiler.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "llvm/IR/Module.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module_group.h"
#include "xla/literal_util.h"
#include "xla/service/backend.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace {
using LLVMCompilerTest = HloTestBase;
const char* const kHloText = R"(
HloModule Add
ENTRY main {
constant.0 = f32[] constant(42.0)
constant.1 = f32[] constant(43.0)
ROOT add.0 = f32[] add(constant.0, constant.1)
}
)";
TEST_F(LLVMCompilerTest, HooksTest) {
int pre_opt_hook_call_count = 0;
int post_opt_hook_call_count = 0;
auto pre_opt_hook = [&pre_opt_hook_call_count](const llvm::Module&) {
++pre_opt_hook_call_count;
return absl::OkStatus();
};
auto post_opt_hook = [&post_opt_hook_call_count](const llvm::Module&) {
++post_opt_hook_call_count;
return absl::OkStatus();
};
auto hlo_module = ParseAndReturnVerifiedModule(kHloText).value();
LLVMCompiler* compiler =
tensorflow::down_cast<xla::LLVMCompiler*>(backend().compiler());
compiler->SetPreOptimizationHook(pre_opt_hook);
compiler->SetPostOptimizationHook(post_opt_hook);
ASSERT_TRUE(compiler
->RunBackend(std::move(hlo_module),
backend().default_stream_executor(),
nullptr)
.ok());
EXPECT_EQ(1, pre_opt_hook_call_count);
EXPECT_EQ(1, post_opt_hook_call_count);
}
TEST_F(LLVMCompilerTest, DISABLED_MultiModuleCompilation) {
auto hlo_module = ParseAndReturnVerifiedModule(kHloText).value();
auto hlo_module2 = ParseAndReturnVerifiedModule(kHloText).value();
std::vector<std::unique_ptr<HloModule>> modules;
modules.push_back(std::move(hlo_module));
modules.push_back(std::move(hlo_module2));
auto module_group =
std::make_unique<HloModuleGroup>("test_module_group", std::move(modules));
std::vector<std::vector<se::StreamExecutor*>> executors;
executors.push_back({backend().default_stream_executor()});
executors.push_back({backend().default_stream_executor()});
EXPECT_IS_OK(backend().compiler()->Compile(std::move(module_group),
std::move(executors),
backend().memory_allocator()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/llvm_compiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/llvm_compiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
134981e1-f110-441e-b364-4076f0bc05ec | cpp | tensorflow/tensorflow | gpu_p2p_pipeliner | third_party/xla/xla/service/gpu/gpu_p2p_pipeliner.cc | third_party/xla/xla/service/gpu/gpu_p2p_pipeliner_test.cc | #include "xla/service/gpu/gpu_p2p_pipeliner.h"
#include <cstdint>
#include <functional>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/collective_pipeliner.h"
#include "xla/service/hlo_parser.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
namespace {
bool ShouldPipeline(const HloInstruction* instr) {
if (!HloPredicateIsOp<HloOpcode::kRecvDone, HloOpcode::kSendDone>(instr)) {
return false;
}
auto it = instr->frontend_attributes().map().find(kSendRecvPipelineAttr);
if (it == instr->frontend_attributes().map().end()) {
return false;
}
auto allowed_predecessor = [&]() {
return instr->opcode() == HloOpcode::kRecvDone &&
instr->control_predecessors().size() == 1 &&
instr->control_predecessors()[0]->opcode() == HloOpcode::kSend;
};
if (!instr->control_successors().empty() ||
(!instr->control_predecessors().empty() && !allowed_predecessor())) {
return false;
}
bool is_pipelined =
(instr->user_count() == 1 && instr->parent() != nullptr &&
instr->users()[0] == instr->parent()->root_instruction());
return !is_pipelined;
}
bool ShouldAllowLoopVariantParameterInChain(const HloInstruction* instr) {
CHECK(instr->opcode() == HloOpcode::kGetTupleElement &&
instr->operand(0)->opcode() == HloOpcode::kParameter);
return true;
}
absl::Status PostprocessP2PImpl(
HloInstruction* instr,
std::function<std::string(std::vector<ReplicaGroup>&)> transformer) {
if (!HloPredicateIsOp<HloOpcode::kRecvDone, HloOpcode::kSendDone>(instr)) {
return Internal("Expected SendDone/RecvDone as the pipelined collective");
}
instr = instr->mutable_operand(0);
if (!HloPredicateIsOp<HloOpcode::kRecv, HloOpcode::kSend>(instr)) {
return Internal("Expected Send/Recv as the SendDone/RecvDone operand");
}
auto validation_it =
instr->frontend_attributes().map().find(kSendRecvValidationAttr);
if (validation_it == instr->frontend_attributes().map().end() ||
validation_it->second == "invalid") {
return absl::OkStatus();
}
auto statusor_bounds = ParseReplicaGroupsOnly(validation_it->second);
if (!statusor_bounds.ok()) {
return statusor_bounds.status();
}
std::string validation_attr = transformer(statusor_bounds.value());
xla::FrontendAttributes attributes = instr->frontend_attributes();
(*attributes.mutable_map())[kSendRecvValidationAttr] = validation_attr;
instr->set_frontend_attributes(attributes);
return absl::OkStatus();
}
absl::Status PostprocessPeeledP2P(HloInstruction* instr) {
auto transform_bounds = [&](std::vector<ReplicaGroup>& replica_groups) {
std::vector<std::pair<int64_t, int64_t>> bounds;
bounds.reserve(replica_groups.size());
bool all_invalid = true;
for (const auto& replica_group : replica_groups) {
int64_t lower_bound = replica_group.replica_ids(0);
int64_t upper_bound = replica_group.replica_ids(1);
if (lower_bound <= 0 && upper_bound >= 0) {
all_invalid = false;
bounds.push_back({0, 0});
} else {
bounds.push_back({1, 0});
}
}
std::string validation_attr;
if (all_invalid) {
validation_attr = "invalid";
} else {
validation_attr = "{" +
absl::StrJoin(bounds, ",",
absl::PairFormatter(
[](std::string* out, int64_t value) {
absl::StrAppend(out, "{", value);
},
",",
[](std::string* out, int64_t value) {
absl::StrAppend(out, value, "}");
})) +
"}";
}
return validation_attr;
};
return PostprocessP2PImpl(instr, transform_bounds);
};
absl::Status PostprocessRotatedP2P(HloInstruction* instr) {
auto transform_bounds = [&](std::vector<ReplicaGroup>& replica_groups) {
std::vector<std::pair<int64_t, int64_t>> bounds;
bounds.reserve(replica_groups.size());
bool all_invalid = true;
for (const auto& replica_group : replica_groups) {
int64_t lower_bound = replica_group.replica_ids(0);
int64_t upper_bound = replica_group.replica_ids(1);
if (lower_bound <= upper_bound) {
if (lower_bound >= 1) {
--lower_bound;
}
if (upper_bound >= 1) {
--upper_bound;
}
if (lower_bound <= upper_bound) {
all_invalid = false;
bounds.push_back({lower_bound, upper_bound});
} else {
bounds.push_back({1, 0});
}
} else {
bounds.push_back({lower_bound, upper_bound});
}
}
std::string validation_attr;
if (all_invalid) {
validation_attr = "invalid";
} else {
validation_attr = "{" +
absl::StrJoin(bounds, ",",
absl::PairFormatter(
[](std::string* out, int64_t value) {
absl::StrAppend(out, "{", value);
},
",",
[](std::string* out, int64_t value) {
absl::StrAppend(out, value, "}");
})) +
"}";
}
return validation_attr;
};
return PostprocessP2PImpl(instr, transform_bounds);
}
}
void AddP2PPipeliner(HloPassPipeline& pipeline) {
CollectivePipeliner::Config config{
0,
INT64_MAX,
true,
false,
true,
CollectivePipeliner::PipeliningDirection::kBackward,
ShouldPipeline,
HloPredicateTrue,
HloPredicateTrue,
ShouldAllowLoopVariantParameterInChain,
true,
PostprocessPeeledP2P,
PostprocessRotatedP2P};
pipeline.AddPass<CollectivePipeliner>(config);
}
}
} | #include "xla/service/gpu/gpu_p2p_pipeliner.h"
#include <cstdint>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_verifier.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
namespace {
class GpuP2PPipelinerTest : public HloTestBase {
public:
GpuP2PPipelinerTest() {
const int64_t kNumReplicas = 1;
const int64_t kNumComputations = 4;
config_ = GetModuleConfigForTest(kNumReplicas,
kNumComputations);
}
absl::StatusOr<bool> RunOptimizer(HloModule* module) {
HloPassPipeline pipeline("optimizer");
pipeline.AddPass<HloVerifier>(false,
false);
AddP2PPipeliner(pipeline);
pipeline.AddPass<HloVerifier>(false,
false);
return pipeline.Run(module);
}
protected:
HloModuleConfig config_;
};
TEST_F(GpuP2PPipelinerTest,
TransformRecvSendBackwardsWithMetaDataPostProcessing) {
const char* kHloStr = R"(
HloModule module
cond {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(10)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
send-data = get-tuple-element(param), index=1
after-all.0 = token[] after-all()
recv.0 = (u32[2], u32[], token[]) recv(after-all.0), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{1,0}}",
_xla_send_recv_pipeline="0",
_xla_send_recv_validation="{{1,7}}"
}
after-all.0.s = token[] after-all()
send.0 = (u32[2], u32[], token[]) send(send-data, after-all.0.s),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{1,0}}",
_xla_send_recv_pipeline="0",
_xla_send_recv_validation="{{1,7}}"
}
recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}, control-predecessors={send.0}
recv-data = u32[2] get-tuple-element(recv-done.0), index=0
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
r = u32[2] broadcast(c1), dimensions={}
s = u32[2] add(r, recv-data)
send-done.0 = token[] send-done(send.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
ROOT result = (u32[], u32[2]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
c1 = u32[] constant(1)
r = u32[] replica-id()
a = u32[] add(c1, r)
init = u32[2] broadcast(a), dimensions={}
while_init = (u32[], u32[2]) tuple(c0, init)
while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond
ROOT result = u32[2] get-tuple-element(while_result), index=1
})";
auto module = ParseAndReturnUnverifiedModule(kHloStr, config_).value();
EXPECT_TRUE(RunOptimizer(module.get()).value());
XLA_VLOG_LINES(10, module->ToString());
auto while_op = FindInstruction(module.get(), "while");
EXPECT_EQ(while_op->opcode(), HloOpcode::kWhile);
EXPECT_EQ(while_op->shape().tuple_shapes().size(), 5);
auto recv1 =
DynCast<HloRecvInstruction>(FindInstruction(module.get(), "recv.1"));
EXPECT_NE(recv1, nullptr);
auto recv2 =
DynCast<HloRecvInstruction>(FindInstruction(module.get(), "recv.2"));
EXPECT_NE(recv2, nullptr);
EXPECT_EQ(recv1->channel_id(), recv2->channel_id());
auto send1 =
DynCast<HloSendInstruction>(FindInstruction(module.get(), "send.1"));
EXPECT_NE(send1, nullptr);
auto send2 =
DynCast<HloSendInstruction>(FindInstruction(module.get(), "send.2"));
EXPECT_NE(send2, nullptr);
EXPECT_EQ(send1->channel_id(), send2->channel_id());
const char* kPeeledAttr = "_xla_send_recv_validation=\"invalid\"";
const char* kRotatedAttr = "_xla_send_recv_validation={{0,6}}";
EXPECT_THAT(send1->ToString(), ::testing::HasSubstr(kPeeledAttr));
EXPECT_THAT(recv1->ToString(), ::testing::HasSubstr(kPeeledAttr));
EXPECT_THAT(send2->ToString(), ::testing::HasSubstr(kRotatedAttr));
EXPECT_THAT(recv2->ToString(), ::testing::HasSubstr(kRotatedAttr));
}
TEST_F(GpuP2PPipelinerTest, SendRecvForwardCycle) {
const char* kHloStr = R"(
HloModule test
while_body {
inputs = (u32[], f32[2,2], f32[2,2]) parameter(0)
iter = u32[] get-tuple-element(inputs), index=0
iter_increment = u32[] constant(1)
next_iter = u32[] add(iter, iter_increment)
weights = f32[2,2] get-tuple-element(inputs), index=2
partition-id = u32[] partition-id()
zero = u32[] constant(0)
compare = pred[] compare(partition-id, zero), direction=EQ
broadcast = pred[2,2] broadcast(compare), dimensions={}
data = f32[2,2] get-tuple-element(inputs), index=1
after-all = token[] after-all()
send = (f32[2,2], u32[], token[]) send(data, after-all), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0",
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_validation="{{3,10}}"
}
recv = (f32[2,2], u32[], token[]) recv(after-all), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0",
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_validation="{{3,10}}"
}
recv-done = (f32[2,2], token[]) recv-done(recv), channel_id=1,
frontend_attributes={_xla_send_recv_pipeline="0"}, control-predecessors={send}
recv-done-data = f32[2,2] get-tuple-element(recv-done), index=0
after-all.1 = token[] after-all()
send.1 = (f32[2,2], u32[], token[]) send(data, after-all.1), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1",
_xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3}}",
_xla_send_recv_validation="{{0,7},{1,8},{2,9}}"
}
recv.1 = (f32[2,2], u32[], token[]) recv(after-all.1), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1",
_xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3}}",
_xla_send_recv_validation="{{0,7},{1,8},{2,9}}"
}
recv-done.1 = (f32[2,2], token[]) recv-done(recv.1), channel_id=2,
frontend_attributes={_xla_send_recv_pipeline="1"}, control-predecessors={send.1}
recv-done-1-data = f32[2,2] get-tuple-element(recv-done.1), index=0
select = f32[2,2] select(broadcast, recv-done-data, recv-done-1-data)
matmul = f32[2,2] dot(weights, select),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT result = (u32[], f32[2,2], f32[2,2]) tuple(next_iter, matmul, weights)
send-done = token[] send-done(send), channel_id=1,
frontend_attributes={_xla_send_recv_pipeline="0"}
send-done.1 = token[] send-done(send.1), channel_id=2,
frontend_attributes={_xla_send_recv_pipeline="1"}
}
while_cond {
inputs = (u32[], f32[2,2], f32[2,2]) parameter(0)
iter = u32[] get-tuple-element(inputs), index=0
max_iter = u32[] constant(3)
ROOT compare = pred[] compare(iter, max_iter), direction=LT
}
ENTRY test_computation {
start_iter = u32[] constant(0)
input_data = f32[2,2] parameter(0)
input_weights = f32[2,2] parameter(1)
input = (u32[], f32[2,2], f32[2,2]) tuple(start_iter, input_data, input_weights)
while_result = (u32[], f32[2,2], f32[2,2]) while(input), condition=while_cond, body=while_body
ROOT data_out = f32[2,2] get-tuple-element(while_result), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(kHloStr, config_).value();
EXPECT_TRUE(RunOptimizer(module.get()).value());
EXPECT_TRUE(RunFileCheck(module->ToString(), R"(
CHECK: %[[RECV_BWD_START:.*]] = {{.*}} after-all()
CHECK: %[[RECV_BWD:.*]] = {{.*}} recv(token[] %[[RECV_BWD_START:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs={{[{][{]}}3,0{{[}][}]}},_xla_send_recv_validation={{[{][{]}}2,9{{[}][}]}}}
CHECK: %[[RECV_DONE_BWD:.*]] = {{.*}} recv-done({{.*}} %[[RECV_BWD:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: %[[RECV_FWD_START:.*]] = {{.*}} after-all()
CHECK: %[[RECV_FWD:.*]] = {{.*}} recv(token[] %[[RECV_FWD_START:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1",_xla_send_recv_source_target_pairs={{[{][{]}}0,1},{1,2},{2,3{{[}][}]}},_xla_send_recv_validation={{[{][{]}}0,6},{0,7},{1,8{{[}][}]}}}
CHECK: %[[RECV_DONE_FWD:.*]] = {{.*}} recv-done((f32[2,2]{1,0}, u32[], token[]) %[[RECV_FWD:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1"}
CHECK: %[[SEND_BWD:.*]] = {{.*}} send({{.*}} %[[RECV_BWD_START:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs={{[{][{]}}3,0{{[}][}]}},_xla_send_recv_validation={{[{][{]}}2,9{{[}][}]}}}
CHECK: %[[SEND_DONE_BWD:.*]] = {{.*}} send-done({{.*}} %[[SEND_BWD:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: %[[SEND_FWD:.*]] = {{.*}} send({{.*}} %[[RECV_FWD_START:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1",_xla_send_recv_source_target_pairs={{[{][{]}}0,1},{1,2},{2,3{{[}][}]}},_xla_send_recv_validation={{[{][{]}}0,6},{0,7},{1,8{{[}][}]}}}
CHECK: %[[SEND_DONE_FWD:.*]] = {{.*}} send-done({{.*}} %[[SEND_FWD:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1"}
CHECK: %[[WHILE_COND:.*]] (cond_param: {{.*}}
CHECK-NEXT: %[[COND_PARAM:.*]] = {{.*}} parameter(0)
CHECK: %[[CURRENT_ITER:.*]] = {{.*}} get-tuple-element({{.*}} %[[COND_PARAM:.*]]), index=0
CHECK: %[[TWO:.*]] = {{.*}} constant(2)
CHECK: ROOT %[[COMPARE:.*]] = pred[] compare({{.*}} %[[CURRENT_ITER:.*]], {{.*}} %[[TWO:.*]]), direction=LT
CHECK: ENTRY %[[TEST_COMPUTATION:.*]] (input_data: {{.*}}
CHECK: %[[RECV_BWD_DUMMY_START:.*]] = {{.*}} after-all()
CHECK: %[[RECV_BWD_DUMMY:.*]] = {{.*}} recv(token[] %[[RECV_BWD_DUMMY_START:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs={{[{][{]}}3,0{{[}][}]}},_xla_send_recv_validation="invalid"}
CHECK: %[[RECV_DONE_BWD_DUMMY:.*]] = {{.*}} recv-done({{.*}} %[[RECV_BWD_DUMMY:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: %[[RECV_FWD_FIRST_ITER_START:.*]] = {{.*}} after-all()
CHECK: %[[RECV_FWD_FIRST_ITER:.*]] = {{.*}} recv(token[] %[[RECV_FWD_FIRST_ITER_START:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1",_xla_send_recv_source_target_pairs={{[{][{]}}0,1},{1,2},{2,3{{[}][}]}},_xla_send_recv_validation={{[{][{]}}0,0},{1,0},{1,0{{[}][}]}}}
CHECK: %[[RECV_DONE_FWD_FIRST_ITER:.*]] = {{.*}} recv-done({{.*}} %[[RECV_FWD_FIRST_ITER:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1"}
CHECK: %[[SEND_BWD_DUMMY:.*]] = {{.*}} send({{.*}} %[[RECV_DUMMY_START:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs={{[{][{]}}3,0{{[}][}]}},_xla_send_recv_validation="invalid"}
CHECK: %[[SEND_DONE_BWD_DUMMY:.*]] = {{.*}} send-done({{.*}} %[[SEND_BWD_DUMMY:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: %[[SEND_FWD_FIRST_ITER:.*]] = {{.*}} send({{.*}} %[[RECV_FWD_FIRST_ITER_START:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1",_xla_send_recv_source_target_pairs={{[{][{]}}0,1},{1,2},{2,3{{[}][}]}},_xla_send_recv_validation={{[{][{]}}0,0},{1,0},{1,0{{[}][}]}}}
CHECK: %[[SEND_DONE_FWD_FIRST_ITER:.*]] = {{.*}} send-done({{.*}} %[[SEND_FWD_FIRST_ITER:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1"}
CHECK: %[[START_LOOP_FROM_ITER_ONE:.*]] = u32[] constant(1)
CHECK: %[[LOOP_INPUT:.*]] = {{.*}} tuple({{.*}} %[[START_LOOP_FROM_ITER_ONE:.*]])
CHECK: %[[WHILE:.*]] = {{.*}} while({{.*}} %[[LOOP_INPUT:.*]]), {{.*}}
)")
.value());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_p2p_pipeliner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_p2p_pipeliner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
25f2c367-30d8-42b1-ae1a-153d697a43ab | cpp | tensorflow/tensorflow | gpu_float_support | third_party/xla/xla/service/gpu/gpu_float_support.cc | third_party/xla/xla/service/gpu/gpu_float_support_test.cc | #include "xla/service/gpu/gpu_float_support.h"
#include <utility>
#include <variant>
#include "absl/log/check.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/float_support.h"
#include "xla/service/gpu/fusions/triton/triton_support.h"
#include "xla/stream_executor/device_description.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
bool GpuFloatSupport::SupportsMixedPrecisions(const HloInstruction& hlo) const {
if (FloatSupport::SupportsMixedPrecisions(hlo)) return true;
switch (hlo.opcode()) {
case HloOpcode::kDot: {
CHECK_GE(hlo.operand_count(), HloDotInstruction::kOperands);
const PrimitiveType lhs_type = hlo.operand(0)->shape().element_type();
const PrimitiveType rhs_type = hlo.operand(1)->shape().element_type();
const PrimitiveType result_type = hlo.shape().element_type();
return (lhs_type == F16 && rhs_type == F16 && result_type == F32) ||
(lhs_type == BF16 && rhs_type == BF16 && result_type == F32);
}
default:
return false;
}
}
bool GpuFloatSupport::IsSupported(const HloInstruction& hlo) const {
switch (hlo.opcode()) {
case HloOpcode::kAllReduce:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllReduceDone:
case HloOpcode::kReduceScatter:
case HloOpcode::kDot:
using TypeAndCC = std::pair<
PrimitiveType,
stream_executor::CudaComputeCapability::CudaComputeCapabilities>;
for (auto [type, cc] :
{TypeAndCC(F8E4M3FN, se::CudaComputeCapability::AMPERE),
TypeAndCC(F8E5M2, se::CudaComputeCapability::HOPPER)}) {
if (LowPrecisionType() == type) {
auto* cuda_compute_capability =
std::get_if<se::CudaComputeCapability>(&compute_capability_);
return cuda_compute_capability &&
cuda_compute_capability->IsAtLeast(cc) &&
IsTritonFusedComputation(*hlo.parent());
}
}
return LowPrecisionType() == BF16;
case HloOpcode::kAllGather:
case HloOpcode::kAllToAll:
case HloOpcode::kBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kConcatenate:
case HloOpcode::kCopy:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kGather:
case HloOpcode::kPad:
case HloOpcode::kReshape:
case HloOpcode::kReverse:
case HloOpcode::kScatter:
case HloOpcode::kSelect:
case HloOpcode::kSelectAndScatter:
case HloOpcode::kSlice:
case HloOpcode::kTranspose:
case HloOpcode::kBitcast:
return true;
case HloOpcode::kAdd:
case HloOpcode::kSubtract:
case HloOpcode::kMultiply: {
if (LowPrecisionType() == BF16) {
auto* cuda_compute_capability =
std::get_if<se::CudaComputeCapability>(&compute_capability_);
return cuda_compute_capability != nullptr &&
cuda_compute_capability->IsAtLeastHopper();
}
return false;
}
default:
return false;
}
}
}
} | #include "xla/service/gpu/gpu_float_support.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/float_normalization.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/hlo_verifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla::gpu {
namespace {
class FloatSupportTest : public HloTestBase {
protected:
FloatSupportTest()
: HloTestBase(false,
true) {}
bool Normalize(HloModule* module, se::GpuComputeCapability cc,
PrimitiveType low_precision_type,
PrimitiveType high_precision_type) {
GpuFloatSupport float_support(cc, low_precision_type, high_precision_type);
FloatNormalization normalization(&float_support);
absl::StatusOr<bool> result = normalization.Run(module);
EXPECT_IS_OK(result.status());
HloVerifier verifier(false,
true);
EXPECT_IS_OK(verifier.Run(module).status());
return result.value();
}
std::unique_ptr<HloComputation> CreateComputation(PrimitiveType lhs_type,
PrimitiveType rhs_type,
PrimitiveType result_type) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShape(lhs_type, {3, 3});
Shape rhs_shape = ShapeUtil::MakeShape(rhs_type, {3, 3});
Shape result_shape = ShapeUtil::MakeShape(result_type, {3, 3});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, rhs_shape, "b"));
PrecisionConfig precision_config;
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(1);
builder.AddInstruction(HloInstruction::CreateDot(
result_shape, a, b, dot_dnums, precision_config));
return builder.Build();
}
void TestDotConversion(PrimitiveType lhs_type, PrimitiveType rhs_type,
PrimitiveType result_type, se::GpuComputeCapability cc,
bool should_convert_lhs, bool should_convert_rhs,
PrimitiveType low_precision_type,
PrimitiveType high_precision_type = F16) {
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(
CreateComputation(lhs_type, rhs_type, result_type));
EXPECT_EQ(
Normalize(module.get(), cc, low_precision_type, high_precision_type),
should_convert_lhs || should_convert_rhs);
EXPECT_EQ(computation->root_instruction()->opcode(), HloOpcode::kDot);
EXPECT_EQ(computation->root_instruction()->operand(0)->opcode() ==
HloOpcode::kConvert,
should_convert_lhs);
EXPECT_EQ(computation->root_instruction()->operand(1)->opcode() ==
HloOpcode::kConvert,
should_convert_rhs);
}
void TestTritonFusedDot(PrimitiveType lhs_type, PrimitiveType rhs_type,
PrimitiveType result_type,
se::GpuComputeCapability cc, bool should_convert_lhs,
bool should_convert_rhs,
PrimitiveType low_precision_type,
PrimitiveType high_precision_type = F16) {
auto module = CreateNewVerifiedModule();
auto computation = module->AddComputationAndUnifyNamesAndIds(
CreateComputation(lhs_type, rhs_type, result_type), false);
Shape lhs_shape = ShapeUtil::MakeShape(lhs_type, {3, 3});
Shape rhs_shape = ShapeUtil::MakeShape(rhs_type, {3, 3});
Shape result_shape = ShapeUtil::MakeShape(result_type, {3, 3});
auto builder = HloComputation::Builder("main");
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, rhs_shape, "b"));
HloInstruction* fusion =
builder.AddInstruction(HloInstruction::CreateFusion(
result_shape, HloInstruction::FusionKind::kCustom, {a, b},
computation));
GpuBackendConfig config;
config.mutable_fusion_backend_config()->set_kind(
std::string(kTritonGemmFusionKind));
CHECK_OK(fusion->set_backend_config(config));
module->AddEntryComputation(builder.Build());
EXPECT_EQ(
Normalize(module.get(), cc, low_precision_type, high_precision_type),
should_convert_lhs || should_convert_rhs);
EXPECT_EQ(computation->root_instruction()->opcode(), HloOpcode::kDot);
EXPECT_EQ(computation->root_instruction()->operand(0)->opcode() ==
HloOpcode::kConvert,
should_convert_lhs);
EXPECT_EQ(computation->root_instruction()->operand(1)->opcode() ==
HloOpcode::kConvert,
should_convert_rhs);
}
};
TEST_F(FloatSupportTest, ShouldAlwaysConvertFp8Dot) {
TestDotConversion(F8E4M3FN, F8E4M3FN, F16,
se::CudaComputeCapability::Hopper(),
true,
true, F8E4M3FN);
TestDotConversion(F8E4M3FN, F8E4M3FN, F32,
se::CudaComputeCapability::Hopper(),
true,
true, F8E4M3FN);
TestDotConversion(F8E4M3FN, F8E4M3FN, F16,
se::CudaComputeCapability::Ampere(),
true,
true, F8E4M3FN);
TestDotConversion(F8E4M3FN, F8E4M3FN, F32,
se::CudaComputeCapability::Hopper(),
true,
true, F8E4M3FN);
TestDotConversion(F8E5M2, F8E5M2, F16, se::CudaComputeCapability::Ampere(),
true,
true, F8E5M2);
TestDotConversion(F8E5M2, F8E5M2, F32, se::CudaComputeCapability::Ampere(),
true,
true, F8E5M2);
TestDotConversion(F8E5M2, F8E4M3FN, F16, se::CudaComputeCapability::Hopper(),
true,
false, F8E5M2);
TestDotConversion(F8E5M2, F8E4M3FN, F32, se::CudaComputeCapability::Hopper(),
true,
false, F8E5M2);
TestDotConversion(F8E5M2, F16, F16, se::CudaComputeCapability::Hopper(),
true,
false, F8E5M2);
TestDotConversion(F8E5M2, F16, F32, se::CudaComputeCapability::Hopper(),
true,
false, F8E5M2);
}
TEST_F(FloatSupportTest, ShouldConverTritonUnsupportedFp8Dot) {
TestTritonFusedDot(F8E4M3FN, F8E4M3FN, F16,
se::CudaComputeCapability::Hopper(),
true,
true, F8E4M3FN);
TestTritonFusedDot(F8E4M3FN, F8E4M3FN, F32,
se::CudaComputeCapability::Hopper(),
false,
false, F8E4M3FN);
TestTritonFusedDot(F8E4M3FN, F8E4M3FN, F16,
se::CudaComputeCapability::Ampere(),
true,
true, F8E4M3FN);
TestTritonFusedDot(F8E4M3FN, F8E4M3FN, F32,
se::CudaComputeCapability::Hopper(),
false,
false, F8E4M3FN);
TestTritonFusedDot(F8E5M2, F8E5M2, F16, se::CudaComputeCapability::Ampere(),
true,
true, F8E5M2);
TestTritonFusedDot(F8E5M2, F8E5M2, F32, se::CudaComputeCapability::Ampere(),
true,
true, F8E5M2);
TestTritonFusedDot(F8E5M2, F8E4M3FN, F16, se::CudaComputeCapability::Hopper(),
true,
false, F8E5M2);
TestTritonFusedDot(F8E5M2, F8E4M3FN, F32, se::CudaComputeCapability::Hopper(),
false,
false, F8E5M2);
TestTritonFusedDot(F8E5M2, F16, F16, se::CudaComputeCapability::Hopper(),
true,
false, F8E5M2);
TestTritonFusedDot(F8E5M2, F16, F32, se::CudaComputeCapability::Hopper(),
true,
false, F8E5M2);
}
TEST_F(FloatSupportTest, ShouldKeepBf16OnAmpere) {
TestDotConversion(BF16, BF16, F32, se::CudaComputeCapability::Ampere(),
false,
false, BF16);
}
TEST_F(FloatSupportTest, ShouldKeepBf16OnHopper) {
TestDotConversion(BF16, BF16, F32, se::CudaComputeCapability::Hopper(),
false,
false, BF16);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_float_support.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_float_support_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
914cb876-e268-4338-974a-bdc42eb9ee1a | cpp | tensorflow/tensorflow | triton_tiling_propagation | third_party/xla/xla/service/gpu/triton_tiling_propagation.cc | third_party/xla/xla/service/gpu/triton_tiling_propagation_test.cc | #include "xla/service/gpu/triton_tiling_propagation.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <list>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/layout.h"
#include "xla/permutation_util.h"
#include "xla/service/gpu/fusions/triton/triton_support.h"
#include "xla/service/gpu/fusions/triton/triton_support_legacy.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
namespace {
absl::flat_hash_map<int, TensorIterationSpec::DimIterationSpec>
FilterTrivialDims(
const absl::flat_hash_map<int, TensorIterationSpec::DimIterationSpec>&
dim_iter_specs) {
absl::flat_hash_map<int, TensorIterationSpec::DimIterationSpec>
non_trivial_dim_iteration_specs;
for (const auto& [dim, dim_spec] : dim_iter_specs) {
if (dim_spec.size() == 1 && dim_spec[0].count == 1) {
continue;
}
non_trivial_dim_iteration_specs[dim] = dim_spec;
}
return non_trivial_dim_iteration_specs;
}
}
const TensorIterationSpec::DimIterationSpec* TensorIterationSpec::Find(
const int dimension) const {
if (auto it = dim_iteration_specs_.find(dimension);
it != dim_iteration_specs_.end()) {
return &it->second;
}
return nullptr;
}
std::vector<int> TensorIterationSpec::GetDimensions() const {
std::vector<int> result;
result.reserve(dim_iteration_specs_.size());
for (const auto& [dim, _] : dim_iteration_specs_) {
result.push_back(dim);
}
return result;
}
bool TensorIterationSpec::IsPhysicallyEquivalent(
const TensorIterationSpec& other) const {
const absl::flat_hash_map<int, DimIterationSpec>
non_trivial_dim_iteration_specs = FilterTrivialDims(dim_iteration_specs_);
const absl::flat_hash_map<int, DimIterationSpec>
other_non_trivial_dim_iteration_specs =
FilterTrivialDims(other.dim_iteration_specs_);
if (non_trivial_dim_iteration_specs.size() !=
other_non_trivial_dim_iteration_specs.size()) {
return false;
}
for (const auto& pair : non_trivial_dim_iteration_specs) {
int dimension = pair.first;
const DimIterationSpec& dim_iter_spec = pair.second;
auto other_it = other_non_trivial_dim_iteration_specs.find(dimension);
if (other_it == other_non_trivial_dim_iteration_specs.end()) {
return false;
}
const DimIterationSpec& other_dim_iter_spec = other_it->second;
if (dim_iter_spec.size() != other_dim_iter_spec.size()) {
return false;
}
for (size_t i = 0; i < dim_iter_spec.size(); i++) {
if (!dim_iter_spec[i].IsPhysicallyEquivalent(other_dim_iter_spec[i])) {
return false;
}
}
}
return true;
}
std::string TensorIterationSpec::IterationSpecFragment::ToString() const {
return absl::StrCat("{stride=", stride, ", count=", count,
", slice_start=", slice_start,
", sliced_count=", sliced_count, ", subfragments=[",
absl::StrJoin(subfragments, ", "), "]}");
}
std::string TensorIterationSpec::ToString() const {
return absl::StrCat(
"{",
absl::StrJoin(dim_iteration_specs_, ", ",
[&](std::string* s, const auto& kv) {
absl::StrAppend(
s, kv.first, ": ", "[",
absl::StrJoin(kv.second, ", ",
[&](std::string* ss, const auto& v) {
absl::StrAppend(ss, v.ToString());
}),
"]");
}),
"}");
}
namespace triton_fusion {
using Fragment = DimensionOrder::Fragment;
using Fragments = DimensionOrder::Fragments;
using FragmentOrders = DimensionOrder::FragmentOrders;
DimensionOrder DimensionOrder::FromDotOperandOrOutput(
const HloInstruction& hlo, const int split_k_dimension_index) {
DimensionOrder dim_order;
dim_order.tensor_fragments_order_.reserve(hlo.shape().rank());
for (const int i : hlo.shape().layout().minor_to_major()) {
int target_dim_number = i;
if (i == split_k_dimension_index) {
CHECK(!dim_order.tensor_fragments_order_.empty())
<< "The split-K batch dimension has be preceded by the contracting "
"dimension it originates from by construction.";
target_dim_number =
dim_order.tensor_fragments_order_.back().dst_dim_number();
}
dim_order.dim_fragments_orders_[target_dim_number].push_back(
dim_order.tensor_fragments_order_.size());
dim_order.tensor_fragments_order_.push_back(
Fragment{target_dim_number, hlo.shape().dimensions(i)});
}
return dim_order;
}
std::string DimensionOrder::Fragment::ToString() const {
return absl::StrCat(dst_dim_number_, ":", count_, ":", slice_start_, "-",
sliced_count_);
}
std::string DimensionOrder::ToString() const {
std::string ret = absl::StrJoin(tensor_fragments_order_, " - ",
[](std::string* out, const Fragment& f) {
absl::StrAppend(out, f.ToString(), " ");
});
absl::StrAppend(&ret, "|");
for (const auto& [dim, fragments] : dim_fragments_orders_) {
absl::StrAppend(&ret, dim, ":", absl::StrJoin(fragments, ","), " ");
}
return ret;
}
TensorIterationSpec DimensionOrder::ToTensorIterationSpec() const {
const Fragments& dim_fragments = TensorFragmentsOrder();
TensorIterationSpec tensor_spec;
int64_t accumulated_stride = 1;
int last_dim = -1;
for (int dim_order_index = 0; dim_order_index < dim_fragments.size();
++dim_order_index) {
const DimensionOrder::Fragment& fragment = dim_fragments[dim_order_index];
VLOG(6) << fragment.ToString();
TensorIterationSpec::DimIterationSpec& dim_spec =
tensor_spec[fragment.dst_dim_number()];
if (last_dim == fragment.dst_dim_number()) {
if (!dim_spec.empty() && !dim_spec.back().subfragments.empty() &&
dim_spec.back().subfragments.back() == 1) {
dim_spec.back().subfragments.pop_back();
}
if (fragment.full_count() > 1) {
CHECK(!dim_spec.empty());
CHECK(!dim_spec.back().is_sliced())
<< "Only the major-most fragment can have an offset.";
dim_spec.back().slice_start =
fragment.slice_start() * dim_spec.back().count;
dim_spec.back().sliced_count =
fragment.sliced_count() * dim_spec.back().count;
dim_spec.back().count *= fragment.full_count();
dim_spec.back().subfragments.push_back(fragment.sliced_count());
}
} else {
dim_spec.push_back(TensorIterationSpec::IterationSpecFragment{
accumulated_stride,
fragment.full_count(),
fragment.slice_start(),
fragment.sliced_count(),
{fragment.sliced_count()}});
}
accumulated_stride *= fragment.full_count();
last_dim = fragment.dst_dim_number();
}
for (int dim_idx : tensor_spec.GetDimensions()) {
TensorIterationSpec::DimIterationSpec& dim_spec = tensor_spec[dim_idx];
if (dim_spec.size() <= 1) continue;
TensorIterationSpec::DimIterationSpec filtered_dim_spec;
absl::c_copy_if(dim_spec, std::back_inserter(filtered_dim_spec),
[](const TensorIterationSpec::IterationSpecFragment& f) {
return f.count != 1;
});
tensor_spec[dim_idx] = filtered_dim_spec;
}
tensor_spec.RemoveEmptyDimensions();
return tensor_spec;
}
namespace {
std::optional<int> LogicalIndexOfLabeledDimension(
const Shape& shape, const DimensionOrder& dim_order, const int label) {
auto fragment_it = dim_order.TensorFragmentsOrder().cbegin();
for (int dim : shape.layout().minor_to_major()) {
const int64_t dim_size = shape.dimensions()[dim];
int64_t fragments_size = 1;
while (fragments_size < dim_size) {
fragments_size *= fragment_it->full_count();
if (fragment_it->dst_dim_number() == label) {
return dim;
}
++fragment_it;
}
}
return std::nullopt;
}
using Int64OrError = std::variant<int64_t, FusionDecision>;
Int64OrError CombineSplitDimMajorPartSizeReqs(int64_t a, int64_t b) {
if (a == b || b == kNoSplitRequirement) {
return a;
}
if (a == kNoSplitRequirement) {
return b;
}
return FusionDecision::Forbid("Conflicting splits of splittable dimension");
}
}
DotRequirementsOrError CombineDotRequirements(
DotRequirements a, DotRequirementsOrError b_or_error) {
if (std::holds_alternative<FusionDecision>(b_or_error)) {
return b_or_error;
}
const DotRequirements& b = std::get<DotRequirements>(b_or_error);
Int64OrError combined_size_req =
CombineSplitDimMajorPartSizeReqs(a.splittable_dimension_major_part_size,
b.splittable_dimension_major_part_size);
if (std::holds_alternative<FusionDecision>(combined_size_req)) {
return std::get<FusionDecision>(combined_size_req);
}
return DotRequirements(std::get<int64_t>(combined_size_req));
}
namespace {
DotRequirementsOrError GetRequirementsIfSupportedOrder(
const DimensionOrder& order, const DotProperties& properties) {
VLOG(8) << order.ToString();
int64_t split_dim_major_part = kNoSplitRequirement;
const Fragments& tensor_dim_fragments = order.TensorFragmentsOrder();
for (const auto& [dim_index, dim_fragments] : order.DimFragmentsOrders()) {
CHECK(!dim_fragments.empty());
for (int i = 0; i < dim_fragments.size() - 1; ++i) {
if (tensor_dim_fragments[dim_fragments[i]].is_sliced()) {
return FusionDecision::Forbid("Sliced non-major-most fragment.");
}
}
int group_counter = 0;
int last_seen_group_last_fragment_index = -1;
auto fragment_it = dim_fragments.cbegin();
while (true) {
if (fragment_it == dim_fragments.cend()) {
break;
}
int64_t grouped_size = tensor_dim_fragments[*fragment_it].full_count();
while ((fragment_it + 1) != dim_fragments.cend() &&
*(fragment_it + 1) == *fragment_it + 1) {
++fragment_it;
grouped_size *= tensor_dim_fragments[*fragment_it].full_count();
}
if (grouped_size == 1) {
++fragment_it;
continue;
}
if (last_seen_group_last_fragment_index > *fragment_it) {
return FusionDecision::Forbid("Transpose within a dimension.");
}
++group_counter;
if (group_counter > 1) {
const int splittable_dimension_index =
properties.splittable_dimension_index;
if (dim_index == splittable_dimension_index) {
if (group_counter == 2) {
if (split_dim_major_part != kNoSplitRequirement &&
split_dim_major_part != grouped_size) {
return FusionDecision::Forbid(
"Conflicting splits of splittable dimension");
}
split_dim_major_part = grouped_size;
} else if (group_counter > 2) {
return FusionDecision::Forbid(
"2nd split of a splittable dimension.");
}
} else {
return FusionDecision::Forbid("Unsupported split of a dimension.");
}
}
last_seen_group_last_fragment_index = *fragment_it;
++fragment_it;
}
}
return DotRequirements(split_dim_major_part);
}
DotRequirementsOrError GetRequirementsIfSupportedOrders(
const HloInstruction& hlo, const DimOrderMap& dim_orders,
const DotProperties& properties) {
const DotRequirements empty_requirements(kNoSplitRequirement);
auto get_requirements =
[&](const HloInstruction& instr) -> DotRequirementsOrError {
if (auto it = dim_orders.find(&instr); it != dim_orders.end()) {
return GetRequirementsIfSupportedOrder(it->second, properties);
}
return empty_requirements;
};
DotRequirements requirements = empty_requirements;
for (const HloInstruction* operand : hlo.operands()) {
DotRequirementsOrError requirements_or_error =
CombineDotRequirements(requirements, get_requirements(*operand));
if (std::holds_alternative<FusionDecision>(requirements_or_error)) {
return requirements_or_error;
}
requirements = std::get<DotRequirements>(requirements_or_error);
}
return CombineDotRequirements(requirements, get_requirements(hlo));
}
DimOrderMap GetPropagatedDimOrdersForElementwise(
const HloInstruction& hlo, TransformDirection direction,
const DimensionOrder& src_dim_order) {
if (direction == TransformDirection::kOutputToInput) {
DimOrderMap map;
for (const HloInstruction* operand : hlo.operands()) {
map.insert({operand, src_dim_order});
}
return map;
}
return {{&hlo, src_dim_order}};
}
const HloInstruction& GetSourceHlo(const HloInstruction& hlo,
TransformDirection direction) {
CHECK_GE(hlo.operand_count(), 1);
if (direction == TransformDirection::kOutputToInput) {
return hlo;
}
return *hlo.operand(0);
}
using ConstInstructionVector = absl::InlinedVector<const HloInstruction*, 2>;
ConstInstructionVector GetDestHlos(const HloInstruction& hlo,
TransformDirection direction) {
if (direction == TransformDirection::kInputToOutput) {
return {&hlo};
}
ConstInstructionVector hlos;
hlos.reserve(hlo.operands().size());
for (const HloInstruction* operand : hlo.operands()) {
hlos.push_back(operand);
}
return hlos;
}
const HloInstruction& GetDestHlo(const HloInstruction& hlo,
TransformDirection direction) {
CHECK_EQ(hlo.operand_count(), 1);
if (direction == TransformDirection::kInputToOutput) {
return hlo;
}
return *hlo.operand(0);
}
DimOrderMapOrError GetPropagatedDimOrdersForBitcast(
const HloInstruction& hlo, const TransformDirection direction,
const DimensionOrder& src_dim_order, const DotProperties& properties) {
const HloInstruction& dst = GetDestHlo(hlo, direction);
const Shape& dst_shape = dst.shape();
const Fragments& src_fragments_order = src_dim_order.TensorFragmentsOrder();
DimOrderMap dst_dim_orders;
DimensionOrder& dst_dim_order =
dst_dim_orders.insert({&dst, DimensionOrder()}).first->second;
Fragments& dst_fragments_order = dst_dim_order.TensorFragmentsOrder();
int64_t dst_remaining_size = 1;
absl::flat_hash_map<const Fragment*, std::vector<int>> src_to_dst;
auto dst_dim_it = dst_shape.layout().minor_to_major().cbegin();
const auto dst_dim_end = dst_shape.layout().minor_to_major().cend();
for (auto src_dim = src_fragments_order.cbegin();
src_dim != src_fragments_order.cend(); ++src_dim) {
auto add_new_fragment = [&](const Fragment& fragment) {
dst_fragments_order.push_back(fragment);
src_to_dst[&*src_dim].push_back(dst_fragments_order.size() - 1);
};
if (dst_remaining_size >= src_dim->full_count()) {
if (dst_remaining_size % src_dim->full_count()) {
return FusionDecision::Forbid("Unsupported bitcast");
}
add_new_fragment(*src_dim);
dst_remaining_size /= src_dim->full_count();
} else {
int64_t src_remaining_size = src_dim->full_count();
if (dst_remaining_size > 1) {
if (src_remaining_size % dst_remaining_size || (src_dim->is_sliced())) {
return FusionDecision::Forbid("Unsupported bitcast");
}
add_new_fragment(
Fragment{src_dim->dst_dim_number(), dst_remaining_size});
src_remaining_size /= dst_remaining_size;
dst_remaining_size = 1;
}
while (src_remaining_size > 1) {
CHECK(dst_dim_it != dst_dim_end);
int64_t dst_dim_size = dst_shape.dimensions(*dst_dim_it);
int64_t new_fragment_size = dst_dim_size;
if (dst_dim_size > src_remaining_size) {
if (dst_dim_size % src_remaining_size) {
return FusionDecision::Forbid("Unsupported bitcast");
}
dst_remaining_size = dst_dim_size / src_remaining_size;
new_fragment_size = src_remaining_size;
}
if (src_dim->is_sliced()) {
return FusionDecision::Forbid("Unsupported bitcast");
}
add_new_fragment(
Fragment{src_dim->dst_dim_number(), new_fragment_size});
src_remaining_size /= new_fragment_size;
++dst_dim_it;
}
}
}
CHECK_EQ(dst_remaining_size, 1);
while (dst_dim_it != dst_dim_end) {
if (dst_shape.dimensions(*dst_dim_it) != 1) {
return FusionDecision::Forbid("Unsupported bitcast");
}
if (!dst_fragments_order.empty()) {
dst_fragments_order.push_back(
Fragment{dst_fragments_order.back().dst_dim_number(), 1});
src_to_dst[&src_fragments_order.back()].push_back(
dst_fragments_order.size() - 1);
}
++dst_dim_it;
}
FragmentOrders& dst_dim_fragment_orders = dst_dim_order.DimFragmentsOrders();
for (const auto& [dim_index, dim_sequence] :
src_dim_order.DimFragmentsOrders()) {
std::vector<int>& dst = dst_dim_fragment_orders[dim_index];
dst.reserve(dim_sequence.size());
for (const int src : dim_sequence) {
std::copy(src_to_dst[&src_fragments_order[src]].cbegin(),
src_to_dst[&src_fragments_order[src]].cend(),
std::back_inserter(dst));
}
}
return dst_dim_orders;
}
DimOrderMapOrError GetPropagatedDimOrdersForDimAlteringOp(
const HloInstruction& hlo, const TransformDirection direction,
const DimensionOrder& src_dim_order, const DotProperties& properties) {
std::list<Fragment> new_fragments;
const HloInstruction& src = GetSourceHlo(hlo, direction);
Fragments src_fragments_order = src_dim_order.TensorFragmentsOrder();
if (hlo.opcode() == HloOpcode::kSlice &&
ShapeUtil::IsEffectiveScalar(hlo.shape())) {
return FusionDecision::Forbid("Slice to scalar is not implemented yet.");
}
std::vector<std::vector<Fragment*>> src_physical;
src_physical.reserve(src.shape().rank());
if (src_fragments_order.size() < src.shape().rank()) {
return FusionDecision::Forbid(
"Cannot propagate further from trivial sized tensor");
}
auto src_fragment_it = src_fragments_order.begin();
for (int64_t dim_index : src.shape().layout().minor_to_major()) {
const int64_t dim_size = src.shape().dimensions(dim_index);
int64_t subdim_size_accumulator = 1;
std::vector<Fragment*> subdim_group;
do {
CHECK(src_fragment_it != src_fragments_order.end());
subdim_size_accumulator *= src_fragment_it->full_count();
subdim_group.push_back(&*src_fragment_it);
++src_fragment_it;
} while (subdim_size_accumulator < dim_size);
CHECK_EQ(subdim_size_accumulator, dim_size);
src_physical.push_back(subdim_group);
}
std::vector<std::vector<Fragment*>> src_logical;
src_logical.resize(src_physical.size());
for (int i = 0; i < src_physical.size(); ++i) {
src_logical[src.shape().layout().minor_to_major(i)] = src_physical[i];
}
DimOrderMap dst_dim_orders;
int64_t concat_accumulated_size = 0;
for (const HloInstruction* dst : GetDestHlos(hlo, direction)) {
DimensionOrder& dst_dim_order =
dst_dim_orders.insert({dst, DimensionOrder()}).first->second;
std::vector<std::vector<Fragment*>> dst_logical;
if (hlo.opcode() == HloOpcode::kTranspose) {
const auto* transpose = Cast<HloTransposeInstruction>(&hlo);
std::vector<int64_t> permutation(transpose->dimensions().cbegin(),
transpose->dimensions().cend());
if (direction == TransformDirection::kInputToOutput) {
permutation = InversePermutation(permutation);
}
dst_logical.resize(permutation.size());
for (int i = 0; i < permutation.size(); ++i) {
dst_logical[permutation[i]] = src_logical[i];
}
} else if (hlo.opcode() == HloOpcode::kBroadcast) {
const auto* broadcast = Cast<HloBroadcastInstruction>(&hlo);
dst_logical.resize(broadcast->dimensions().size());
for (int i = 0; i < broadcast->dimensions().size(); ++i) {
dst_logical[i] = src_logical[broadcast->dimensions()[i]];
}
} else if (hlo.opcode() == HloOpcode::kReduce) {
if (dst != &hlo && hlo.operand_index(dst) == 1) {
continue;
}
const auto* reduce = Cast<HloReduceInstruction>(&hlo);
dst_logical.resize(src_logical.size() + reduce->dimensions().size());
if (reduce->dimensions().size() != 1) {
return FusionDecision::Forbid("Unsupported reduction.");
} else if (reduce->dimensions().front() !=
reduce->operand(0)->shape().rank() - 1) {
return FusionDecision::Forbid("Only row reductions are supported.");
}
} else if (hlo.opcode() == HloOpcode::kConcatenate) {
dst_logical.resize(src_logical.size());
for (int i = 0; i < src_logical.size(); ++i) {
if (i == hlo.concatenate_dimension()) {
if (src_logical[i].size() != 1 || src_logical[i][0]->is_sliced()) {
return FusionDecision::Forbid("Unsupported concatenation.");
}
const Fragment& src_fragment = *src_logical[i][0];
Fragment& dst_fragment = new_fragments.emplace_back(
src_fragment.dst_dim_number(), dst->shape().dimensions(i));
dst_fragment.set_slice(-concat_accumulated_size,
dst->shape().dimensions(i));
concat_accumulated_size += dst->shape().dimensions(i);
dst_logical[i].push_back(&dst_fragment);
} else {
dst_logical[i] = src_logical[i];
}
}
} else if (hlo.opcode() == HloOpcode::kCopy) {
CHECK(ShapeUtil::SameDimensions(src.shape(), dst->shape()));
dst_logical = src_logical;
} else if (hlo.opcode() == HloOpcode::kPad) {
if (dst != &hlo && hlo.operand_index(dst) == 1) {
continue;
}
const auto* pad = Cast<HloPadInstruction>(&hlo);
dst_logical.resize(src_logical.size());
for (int i = 0; i < src_logical.size(); ++i) {
const int padding =
pad->padding_config().dimensions(i).edge_padding_high();
CHECK_EQ(pad->padding_config().dimensions(i).edge_padding_low(), 0);
CHECK_EQ(pad->padding_config().dimensions(i).interior_padding(), 0);
if (padding == 0) {
dst_logical[i] = src_logical[i];
} else {
const std::vector<Fragment*>& fragments = src_logical[i];
CHECK_GE(fragments.size(), 2);
CHECK(absl::c_all_of(fragments, [&](const Fragment* fragment) {
return fragment->dst_dim_number() ==
fragments.front()->dst_dim_number();
}));
std::vector<Fragment*> non_trivial_fragments;
absl::c_copy_if(fragments, std::back_inserter(non_trivial_fragments),
[](const Fragment* fragment) {
return fragment->full_count() > 1;
});
CHECK_EQ(non_trivial_fragments.size(), 2);
new_fragments.emplace_back(
non_trivial_fragments[0]->dst_dim_number(),
non_trivial_fragments[0]->full_count() *
non_trivial_fragments[1]->full_count() -
padding);
dst_logical[i] = {&new_fragments.back()};
}
}
} else if (hlo.opcode() == HloOpcode::kSlice) {
const auto slice = Cast<HloSliceInstruction>(&hlo);
dst_logical.resize(src_logical.size());
for (int dim = 0; dim < src_logical.size(); ++dim) {
dst_logical[dim] = src_logical[dim];
if (slice->slice_limits(dim) - slice->slice_starts(dim) !=
dst->shape().dimensions(dim)) {
if (dst_logical[dim].size() > 1) {
return FusionDecision::Forbid("Slicing of fragmented dimension.");
}
auto fragment = dst_logical[dim].front();
fragment->set_count(dst->shape().dimensions(dim));
fragment->set_slice(
fragment->slice_start() + slice->slice_starts(dim),
fragment->sliced_count());
}
}
} else if (hlo.opcode() == HloOpcode::kDynamicSlice) {
if (dst != &hlo && hlo.operand_index(dst) >= 1) {
continue;
}
const auto dynamic_slice = Cast<HloDynamicSliceInstruction>(&hlo);
dst_logical.resize(src_logical.size());
for (int dim = 0; dim < src_logical.size(); ++dim) {
dst_logical[dim] = src_logical[dim];
if (dynamic_slice->slice_sizes(dim) != dst->shape().dimensions(dim)) {
if (dst_logical[dim].size() > 1) {
return FusionDecision::Forbid("Slicing of fragmented dimension.");
}
auto fragment = dst_logical[dim].front();
fragment->set_count(dst->shape().dimensions(dim));
fragment->set_slice(fragment->slice_start(),
dst->shape().dimensions(dim));
}
}
} else {
return FusionDecision::Forbid("Function called on a wrong instruction.");
}
absl::flat_hash_map<const Fragment*, int> src_to_dst;
Fragments& dst_fragments_order = dst_dim_order.TensorFragmentsOrder();
FragmentOrders& dst_dim_fragments_order =
dst_dim_order.DimFragmentsOrders();
absl::flat_hash_set<int> dim_numbers_present_in_dst;
for (const int64_t dim_idx : dst->shape().layout().minor_to_major()) {
for (const Fragment* subdim : dst_logical[dim_idx]) {
dst_fragments_order.push_back(*subdim);
src_to_dst[subdim] = dst_fragments_order.size() - 1;
dim_numbers_present_in_dst.insert(subdim->dst_dim_number());
}
}
for (const auto& [dim_index, dim_sequence] :
src_dim_order.DimFragmentsOrders()) {
for (const int fragment_number : dim_sequence) {
const auto it = src_to_dst.find(&src_fragments_order[fragment_number]);
if (it == src_to_dst.cend()) {
if (hlo.opcode() == HloOpcode::kBroadcast &&
src_fragments_order[fragment_number].full_count() > 1 &&
dim_numbers_present_in_dst.contains(dim_index)) {
return FusionDecision::Forbid("Unsupported broadcast");
}
continue;
}
dst_dim_fragments_order[dim_index].push_back(it->second);
}
}
}
return dst_dim_orders;
}
DimOrderMapOrError GetPropagatedDimOrders(const HloInstruction& hlo,
const TransformDirection direction,
const DimensionOrder& src_dim_order,
const DotProperties& properties) {
VLOG(7) << "Analyzing " << hlo.ToString();
if (hlo.opcode() != HloOpcode::kParameter &&
direction == TransformDirection::kOutputToInput &&
absl::c_any_of(hlo.users(), [](const HloInstruction* user) {
return (user->opcode() == HloOpcode::kConcatenate ||
user->opcode() == HloOpcode::kDynamicSlice);
})) {
return FusionDecision::Forbid(
"No fusion into concatenations or dynamic slice.");
}
if (hlo.opcode() == HloOpcode::kParameter ||
hlo_query::IsScalarConstant(&hlo)) {
CHECK(direction == TransformDirection::kOutputToInput);
return DimOrderMap{};
} else if (hlo.opcode() == HloOpcode::kTranspose ||
hlo.opcode() == HloOpcode::kCopy) {
return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,
properties);
} else if (hlo.opcode() == HloOpcode::kBroadcast) {
if (direction != TransformDirection::kOutputToInput) {
return FusionDecision::Forbid("Unsupported broadcast direction.");
}
return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,
properties);
} else if (hlo.opcode() == HloOpcode::kPad) {
if (direction != TransformDirection::kOutputToInput) {
return FusionDecision::Forbid("Unsupported pad direction.");
}
return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,
properties);
} else if (hlo.operand_count() > 0 &&
legacy_triton::IsTritonSupportedElementwiseUpToFloatNormalization(
hlo.opcode(), hlo.operand(0)->shape().element_type())) {
return GetPropagatedDimOrdersForElementwise(hlo, direction, src_dim_order);
} else if (hlo.opcode() == HloOpcode::kBitcast) {
return GetPropagatedDimOrdersForBitcast(hlo, direction, src_dim_order,
properties);
} else if (hlo.opcode() == HloOpcode::kSlice) {
if (direction != TransformDirection::kOutputToInput) {
return FusionDecision::Forbid("Unsupported slice direction.");
}
return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,
properties);
} else if (hlo.opcode() == HloOpcode::kDynamicSlice &&
direction == TransformDirection::kOutputToInput) {
if (CodegenDecision decision = legacy_triton::IsTritonSupportedDynamicSlice(
*Cast<HloDynamicSliceInstruction>(&hlo));
!decision.CanFuse()) {
return decision;
}
return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,
properties);
} else if (hlo.opcode() == HloOpcode::kReshape) {
if (!ShapeUtil::ReshapeIsBitcast(hlo.operand(0)->shape(), hlo.shape())) {
return FusionDecision::Forbid("Non-bitcast reshape.");
}
return GetPropagatedDimOrdersForBitcast(hlo, direction, src_dim_order,
properties);
} else if (hlo.opcode() == HloOpcode::kConcatenate &&
direction == TransformDirection::kOutputToInput) {
int64_t noncontracting_dim_label = properties.noncontracting_dimension;
const FragmentOrders& src_dim_fragments_orders =
src_dim_order.DimFragmentsOrders();
auto noncontracting_dim_fragment_order_it =
src_dim_fragments_orders.find(noncontracting_dim_label);
if (noncontracting_dim_fragment_order_it !=
src_dim_fragments_orders.end()) {
if (noncontracting_dim_fragment_order_it->second.size() > 1) {
return FusionDecision::Forbid(
"Concatenations on split non-contracting dimensions are "
"unsupported.");
}
}
auto dim = LogicalIndexOfLabeledDimension(hlo.shape(), src_dim_order,
noncontracting_dim_label);
if (!dim.has_value() || dim.value() != hlo.concatenate_dimension()) {
return FusionDecision::Forbid("Unsupported concatenation.");
}
if (absl::c_any_of(hlo.operands(), [&hlo](const HloInstruction* operand) {
constexpr int kMinConcatFragmentSize = 64;
return operand->shape().dimensions(hlo.concatenate_dimension()) %
kMinConcatFragmentSize !=
0;
})) {
return FusionDecision::Forbid(
"At least one operand of concatenation can not be perfectly tiled.");
}
return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,
properties);
}
return FusionDecision::Forbid("Unimplemented instruction.");
}
int64_t InputMinusOutputBytes(const HloInstruction& hlo) {
CHECK(!hlo.shape().IsTuple());
int64_t input_size = 0;
for (const HloInstruction* operand : hlo.operands()) {
CHECK(!operand->shape().IsTuple());
input_size += ShapeUtil::ByteSizeOf(operand->shape());
}
return input_size - ShapeUtil::ByteSizeOf(hlo.shape());
}
bool CanNotBeFusedIntoAUser(const HloInstruction& hlo) {
return hlo.IsRoot() || (hlo.user_count() == 1 && hlo.users()[0]->IsRoot() &&
hlo.users()[0]->opcode() == HloOpcode::kTuple);
}
constexpr int kIoToleranceBytes = 1024;
bool IsInputWorthFusing(const HloInstruction& hlo) {
if (InputMinusOutputBytes(hlo) <= kIoToleranceBytes) {
return true;
}
if (hlo.user_count() > 1) {
return false;
}
if (hlo.opcode() == HloOpcode::kSlice &&
hlo_query::AllOperandsAreParametersOrConstants(hlo)) {
return true;
}
return hlo_query::AllOperandsAreParametersOrConstantsWithSingleUser(hlo);
}
bool IsOutputWorthFusing(const HloInstruction& hlo) {
return CanNotBeFusedIntoAUser(hlo) ||
InputMinusOutputBytes(hlo) >= -kIoToleranceBytes;
}
FusionDecision IsConversionWorthFusing(const HloInstruction& input,
se::GpuComputeCapability gpu_version) {
if (ShapeUtil::ByteSizeOf(input.operand(0)->shape()) >
ShapeUtil::ByteSizeOf(input.shape())) {
return FusionDecision::Forbid("Narrowing conversion.");
}
return FusionDecision::Allow();
}
}
DimOrdersAndReqsOrError GetPropagatedDimOrdersAndRequirements(
const HloInstruction& hlo, const DimensionOrder& src_dim_order,
TransformDirection direction, const DotProperties& properties) {
DimOrderMapOrError propagated_dim_orders_or_error =
GetPropagatedDimOrders(hlo, direction, src_dim_order, properties);
if (std::holds_alternative<FusionDecision>(propagated_dim_orders_or_error)) {
return std::get<FusionDecision>(propagated_dim_orders_or_error);
}
DimOrderMap propagated_dim_orders =
std::move(std::get<DimOrderMap>(propagated_dim_orders_or_error));
DotRequirementsOrError requirements_or_error =
GetRequirementsIfSupportedOrders(hlo, propagated_dim_orders, properties);
if (std::holds_alternative<FusionDecision>(requirements_or_error)) {
return std::get<FusionDecision>(requirements_or_error);
}
return DimOrdersAndReqs{propagated_dim_orders,
std::get<DotRequirements>(requirements_or_error)};
}
DimOrdersAndReqsOrError
GetPropagatedDimOrdersAndRequirementsIfProfitablyFusible(
const HloInstruction& hlo, TransformDirection transform_direction,
const std::optional<int>& src_operand_index,
const DimensionOrder& src_dim_order,
const se::GpuComputeCapability& gpu_version,
const DotProperties& properties) {
CHECK_EQ(transform_direction == TransformDirection::kInputToOutput,
src_operand_index.has_value());
if (hlo.opcode() == HloOpcode::kTuple ||
hlo.opcode() == HloOpcode::kGetTupleElement) {
return FusionDecision::Forbid("Unsupported instruction.");
}
if (hlo.opcode() == HloOpcode::kReduce ||
hlo.opcode() == HloOpcode::kAllReduce ||
hlo.opcode() == HloOpcode::kAllReduceStart ||
hlo.opcode() == HloOpcode::kAllReduceDone) {
return FusionDecision::Forbid("Reductions are not fused yet.");
}
if (hlo.opcode() == HloOpcode::kPad) {
return FusionDecision::Forbid("Pads are not fused yet.");
}
if (auto decision =
legacy_triton::IsTritonSupportedInstruction(hlo, gpu_version);
!decision.CanFuse()) {
return decision;
}
DimOrdersAndReqsOrError result_or_error =
GetPropagatedDimOrdersAndRequirements(hlo, src_dim_order,
transform_direction, properties);
if (std::holds_alternative<FusionDecision>(result_or_error)) {
VLOG(5) << "Not fusing " << hlo.ToString()
<< " to the output due to the decision: "
<< std::get<FusionDecision>(result_or_error).Explain();
return result_or_error;
}
DimOrdersAndReqs dim_orders_and_requirements =
std::move(std::get<DimOrdersAndReqs>(result_or_error));
int fusion_level =
hlo.GetModule()->config().debug_options().xla_gpu_triton_fusion_level();
if (transform_direction == TransformDirection::kOutputToInput) {
if (fusion_level < 2) {
if (hlo.opcode() == HloOpcode::kConvert) {
if (FusionDecision decision = IsConversionWorthFusing(hlo, gpu_version);
!decision) {
return decision;
}
} else if (hlo.IsElementwise() && hlo.opcode() != HloOpcode::kCopy) {
return FusionDecision::Forbid("Ignored elementwise operation");
}
} else {
bool accepted = false;
if (hlo.IsElementwise() && hlo.operand_count() == 2) {
for (const HloInstruction* operand : hlo.operands()) {
if (operand->opcode() == HloOpcode::kBroadcast &&
(operand->operand(0)->opcode() == HloOpcode::kParameter ||
operand->operand(0)->opcode() == HloOpcode::kConstant) &&
std::holds_alternative<DimOrdersAndReqs>(
GetPropagatedDimOrdersAndRequirementsIfProfitablyFusible(
*operand, TransformDirection::kOutputToInput,
std::nullopt,
dim_orders_and_requirements.dim_orders.at(operand),
gpu_version, properties))) {
accepted = true;
break;
}
}
}
if (!accepted && !IsInputWorthFusing(hlo)) {
return FusionDecision::Forbid(
"Not obviously profitable to fuse as input.");
}
}
} else {
if (fusion_level < 2) {
return FusionDecision::Forbid(
"Skipping fusing outputs at low fusion levels.");
}
for (int i = 0; i < hlo.operand_count(); ++i) {
const HloInstruction* operand = hlo.operand(i);
if (i == *src_operand_index) {
continue;
}
if ((operand->opcode() == HloOpcode::kBroadcast &&
ShapeUtil::IsScalar(operand->operand(0)->shape())) ||
operand->opcode() == HloOpcode::kParameter) {
continue;
}
return FusionDecision::Forbid(
"Has multiple inputs - not properly analyzed yet.");
}
if (!IsOutputWorthFusing(hlo)) {
return FusionDecision::Forbid(
"Not obviously profitable to fuse as output.");
}
}
return dim_orders_and_requirements;
}
}
}
} | #include "xla/service/gpu/triton_tiling_propagation.h"
#include <vector>
#include <gtest/gtest.h>
#include "xla/tests/hlo_test_base.h"
namespace xla::gpu {
namespace {
using TritonTilingPropagationTest = HloTestBase;
using triton_fusion::DimensionOrder;
DimensionOrder FromFragments(DimensionOrder::Fragments fragments) {
DimensionOrder dim_order;
DimensionOrder::Fragments& tensor_fragments_order =
dim_order.TensorFragmentsOrder();
DimensionOrder::FragmentOrders& dim_fragments_orders =
dim_order.DimFragmentsOrders();
for (const DimensionOrder::Fragment& fragment : fragments) {
tensor_fragments_order.push_back(fragment);
dim_fragments_orders[fragment.dst_dim_number()].push_back(
tensor_fragments_order.size());
}
return dim_order;
}
TEST_F(
TritonTilingPropagationTest,
DimensionOrdersRemainPhysicallyEquivalentAfterInsertingTrivialDimensions) {
DimensionOrder::Fragment fragment_1(0, 97);
DimensionOrder::Fragment fragment_2(0, 1);
DimensionOrder dimension_order_1 = FromFragments({fragment_1, fragment_2});
DimensionOrder::Fragment fragment_3(0, 97);
DimensionOrder::Fragment fragment_4(1, 1);
DimensionOrder dimension_order_2 = FromFragments({fragment_3, fragment_4});
EXPECT_TRUE(dimension_order_1.IsPhysicallyEquivalent(dimension_order_2));
}
TEST_F(
TritonTilingPropagationTest,
IterationSpecsRemainPhysicallyEquivalentAfterInsertingTrivialDimensions) {
TensorIterationSpec::IterationSpecFragment fragment_1 = {
1, 97, 0, 97,
{97}};
TensorIterationSpec spec_1;
spec_1[0].push_back(fragment_1);
TensorIterationSpec::IterationSpecFragment fragment_2 = {
1, 97, 0, 97,
{97}};
TensorIterationSpec::IterationSpecFragment fragment_3 = {
97, 1, 0, 1,
{1}};
TensorIterationSpec spec_2;
spec_2[0].push_back(fragment_2);
spec_2[1].push_back(fragment_3);
EXPECT_TRUE(spec_1.IsPhysicallyEquivalent(spec_2));
}
TEST_F(TritonTilingPropagationTest,
DimensionsShouldNotBeRemovedByToTensorIterationSpec) {
DimensionOrder::Fragment fragment_0(0, 97);
DimensionOrder::Fragment fragment_1(1, 1);
DimensionOrder dimension_order = FromFragments({fragment_0, fragment_1});
TensorIterationSpec spec = dimension_order.ToTensorIterationSpec();
const TensorIterationSpec::DimIterationSpec* dim_spec_0 = spec.Find(0);
EXPECT_NE(dim_spec_0, nullptr);
EXPECT_EQ(dim_spec_0->size(), 1);
EXPECT_EQ(dim_spec_0->at(0).count, 97);
const TensorIterationSpec::DimIterationSpec* dim_spec_1 = spec.Find(1);
EXPECT_NE(dim_spec_1, nullptr);
EXPECT_EQ(dim_spec_1->size(), 1);
EXPECT_EQ(dim_spec_1->at(0).count, 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/triton_tiling_propagation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/triton_tiling_propagation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
22010223-07c7-4dc2-819a-01d14addc9b5 | cpp | tensorflow/tensorflow | ir_emission_utils | third_party/xla/xla/service/cpu/ir_emission_utils.cc | third_party/xla/xla/service/cpu/ir_emission_utils_test.cc | #include "xla/service/cpu/ir_emission_utils.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/layout_util.h"
#include "xla/service/cpu/cpu_runtime.h"
#include "xla/shape_util.h"
#include "xla/window_util.h"
namespace xla {
namespace cpu {
int64_t GetMinimumAlignmentForArray(
const Shape& shape, const TargetMachineFeatures& target_machine_features) {
CHECK(LayoutUtil::IsDenseArray(shape));
int64_t allocation_size_bytes =
ShapeUtil::ElementsIn(shape) *
ShapeUtil::ByteSizeOfPrimitiveType(shape.element_type());
return target_machine_features.minimum_alignment_for_allocation(
allocation_size_bytes);
}
bool PotentiallyImplementedAsEigenConvolution(
const HloInstruction& convolution,
const TargetMachineFeatures& target_machine_features) {
const Shape& input_shape = convolution.operand(0)->shape();
const Shape& kernel_shape = convolution.operand(1)->shape();
const Shape& output_shape = convolution.shape();
auto is_aligned = [&](const Shape& shape) {
return GetMinimumAlignmentForArray(shape, target_machine_features) >=
TargetMachineFeatures::kEigenExpectedTensorAlignment;
};
if (!is_aligned(input_shape) || !is_aligned(kernel_shape) ||
!is_aligned(output_shape)) {
return false;
}
if (ShapeUtil::IsZeroElementArray(input_shape) ||
ShapeUtil::IsZeroElementArray(kernel_shape)) {
return false;
}
CHECK(
ShapeUtil::SameElementTypeIgnoringFpPrecision(input_shape, kernel_shape));
PrimitiveType primitive_type = input_shape.element_type();
if (primitive_type != F16 && primitive_type != F32) {
return false;
}
if (window_util::HasWindowReversal(convolution.window())) {
return false;
}
const ConvolutionDimensionNumbers& dnums =
convolution.convolution_dimension_numbers();
const int64_t num_spatial_dims = dnums.output_spatial_dimensions_size();
if (num_spatial_dims < 1 || num_spatial_dims > 3) {
return false;
}
for (int64_t i = 0; i < num_spatial_dims; ++i) {
if (dnums.input_spatial_dimensions(i) != i + 1) {
return false;
}
if (dnums.kernel_spatial_dimensions(i) != i) {
return false;
}
if (dnums.output_spatial_dimensions(i) != i + 1) {
return false;
}
}
return dnums.input_batch_dimension() == 0 &&
dnums.input_feature_dimension() == input_shape.dimensions_size() - 1 &&
dnums.output_batch_dimension() == 0 &&
dnums.output_feature_dimension() ==
output_shape.dimensions_size() - 1 &&
dnums.kernel_input_feature_dimension() ==
kernel_shape.dimensions_size() - 2 &&
dnums.kernel_output_feature_dimension() ==
kernel_shape.dimensions_size() - 1;
}
}
} | #include "xla/service/cpu/ir_emission_utils.h"
#include <memory>
#include "xla/service/cpu/target_machine_features_fake.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
using IrEmitterTest = HloTestBase;
TEST_F(IrEmitterTest, ConvWithZeroSizedKernelNotImplementedAsEigen) {
const char* const hlo_string = R"(
HloModule ModuleWithConv
ENTRY Conv {
input = f32[32,50,28,28]{3,2,1,0} parameter(0)
kernel = f32[50,0,5,5]{3,2,1,0} parameter(1)
ROOT convolution = f32[32,0,24,24]{3,2,1,0} convolution(input, kernel),
window={size=5x5},
dim_labels=bf01_io01->bf01
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* entry_computation = module->entry_computation();
HloInstruction* conv_instr = entry_computation->root_instruction();
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features(
[](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
});
EXPECT_FALSE(cpu::PotentiallyImplementedAsEigenConvolution(
*conv_instr, target_machine_features));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/ir_emission_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/ir_emission_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
90d52628-50f5-4b95-b9ab-5399df301b62 | cpp | tensorflow/tensorflow | fusion_deduplication_cache | third_party/xla/xla/service/gpu/fusion_deduplication_cache.cc | third_party/xla/xla/service/gpu/fusion_deduplication_cache_test.cc | #include "xla/service/gpu/fusion_deduplication_cache.h"
#include <cstddef>
#include <cstdint>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/hash/hash.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/shape_util.h"
namespace xla {
namespace gpu {
namespace {
class HloInstructionPtrHash {
public:
size_t operator()(const HloInstruction* instr) const {
return absl::HashOf(*instr);
}
};
class HloInstructionPtrEq {
public:
size_t operator()(const HloInstruction* instr1,
const HloInstruction* instr2) const {
auto operands_eq = [](const HloInstruction* a, const HloInstruction* b) {
if (a == b) return true;
return ShapeUtil::Equal(a->shape(), b->shape());
};
auto eq_computations = [](const HloComputation* a,
const HloComputation* b) { return *a == *b; };
return instr1->Identical(*instr2, operands_eq, eq_computations);
}
};
}
FusionDeduplicationCache FusionDeduplicationCache::Create(
const HloModule& module) {
absl::flat_hash_map<const HloInstruction*, InstructionId,
HloInstructionPtrHash, HloInstructionPtrEq>
deduplicated_id_map;
absl::flat_hash_map<const HloInstruction*, InstructionId> instruction_id_map;
int64_t instruction_count = module.instruction_count();
deduplicated_id_map.reserve(instruction_count);
instruction_id_map.reserve(instruction_count);
int64_t next_id = 0;
for (const HloComputation* computation : module.computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
auto it = deduplicated_id_map.emplace(instruction, next_id);
if (it.second) {
++next_id;
}
instruction_id_map[instruction] = it.first->second;
}
}
return FusionDeduplicationCache(next_id, std::move(instruction_id_map));
}
FusionDeduplicationCache::InstructionId
FusionDeduplicationCache::GetInstructionId(const HloInstruction& instruction) {
return instruction_id_map_.at(&instruction);
}
FusionDeduplicationCache::FusionId FusionDeduplicationCache::GetFusionId(
const HloInstruction& producer, const HloInstruction& consumer,
int64_t consumer_operand_index) {
FusionDeduplicationCache::FusionId fusion_id{GetInstructionId(producer),
GetInstructionId(consumer),
consumer_operand_index};
if (fusion_id_map_.emplace(fusion_id, next_id_).second) {
++next_id_;
}
return fusion_id;
}
FusionDeduplicationCache::FusionId FusionDeduplicationCache::GetFusionId(
const HloInstruction& producer, const HloInstruction& consumer) {
return GetFusionId(producer, consumer, consumer.operand_index(&producer));
}
void FusionDeduplicationCache::UpdateFusedInstructionId(
const HloInstruction& fusion_instruction,
const HloInstruction& original_producer,
const HloInstruction& original_consumer, int64_t consumer_operand_index) {
instruction_id_map_[&fusion_instruction] = fusion_id_map_.at(GetFusionId(
original_producer, original_consumer, consumer_operand_index));
}
}
} | #include "xla/service/gpu/fusion_deduplication_cache.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
HloInstruction* Fuse(HloInstruction* producer, HloInstruction* consumer) {
HloComputation* computation = consumer->parent();
HloInstruction* fusion_instruction = consumer;
if (consumer->opcode() != HloOpcode::kFusion) {
fusion_instruction =
computation->AddInstruction(HloInstruction::CreateFusion(
consumer->shape(), HloInstruction::FusionKind::kLoop, consumer));
TF_CHECK_OK(computation->ReplaceInstruction(consumer, fusion_instruction));
}
if (producer->opcode() == HloOpcode::kFusion) {
fusion_instruction->MergeFusionInstruction(producer);
} else {
fusion_instruction->FuseInstruction(producer);
}
if (producer->user_count() == 0) {
TF_CHECK_OK(computation->RemoveInstruction(producer));
}
return fusion_instruction;
}
using FusionDeduplicationCacheTest = HloTestBase;
TEST_F(FusionDeduplicationCacheTest, IdenticalInstructions_EqualId) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
add1 = f32[8] add(p0, p1)
ROOT add2 = f32[8] add(add1, p1)
})"));
FusionDeduplicationCache cache = FusionDeduplicationCache::Create(*module);
const HloInstruction* add2 = module->entry_computation()->root_instruction();
const HloInstruction* add1 = add2->operand(0);
EXPECT_EQ(cache.GetInstructionId(*add1), cache.GetInstructionId(*add2));
}
TEST_F(FusionDeduplicationCacheTest,
IdenticalInstructionsInDifferentComputations_EqualId) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
computation.1 {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ROOT add1 = f32[8] add(p0, p1)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ROOT add2 = f32[8] add(p0, p0)
})"));
FusionDeduplicationCache cache = FusionDeduplicationCache::Create(*module);
const HloInstruction* add1 =
module->GetComputationWithName("computation.1")->root_instruction();
const HloInstruction* add2 = module->entry_computation()->root_instruction();
EXPECT_EQ(cache.GetInstructionId(*add1), cache.GetInstructionId(*add2));
}
TEST_F(FusionDeduplicationCacheTest, IdenticalFusionInstructions_EqualId) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
log1 = f32[8] log(p0)
add1 = f32[8] add(log1, p1)
log2 = f32[8] log(add1)
ROOT add2 = f32[8] add(log2, p0)
})"));
HloComputation* entry_computation = module->entry_computation();
auto* add1 = entry_computation->GetInstructionWithName("add1");
auto* add2 = entry_computation->GetInstructionWithName("add2");
auto* log1 = entry_computation->GetInstructionWithName("log1");
auto* log2 = entry_computation->GetInstructionWithName("log2");
FusionDeduplicationCache cache = FusionDeduplicationCache::Create(*module);
EXPECT_EQ(cache.GetInstructionId(*add1), cache.GetInstructionId(*add2));
EXPECT_EQ(cache.GetInstructionId(*log1), cache.GetInstructionId(*log2));
EXPECT_NE(cache.GetInstructionId(*add1), cache.GetInstructionId(*log1));
EXPECT_EQ(cache.GetFusionId(*log1, *add1), cache.GetFusionId(*log2, *add2));
HloInstruction* fusion1 = Fuse(log1, add1);
cache.UpdateFusedInstructionId(*fusion1, *log1, *add1,
0);
HloInstruction* fusion2 = Fuse(log2, add2);
cache.UpdateFusedInstructionId(*fusion2, *log2, *add2,
0);
EXPECT_EQ(cache.GetInstructionId(*fusion1), cache.GetInstructionId(*fusion2));
}
TEST_F(FusionDeduplicationCacheTest,
DifferentConsumerOperandIndex_DifferentId) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
log1 = f32[8] log(p0)
add1 = f32[8] add(log1, p1)
log2 = f32[8] log(add1)
ROOT add2 = f32[8] add(p0, log2)
})"));
HloComputation* entry_computation = module->entry_computation();
auto* add1 = entry_computation->GetInstructionWithName("add1");
auto* add2 = entry_computation->GetInstructionWithName("add2");
auto* log1 = entry_computation->GetInstructionWithName("log1");
auto* log2 = entry_computation->GetInstructionWithName("log2");
FusionDeduplicationCache cache = FusionDeduplicationCache::Create(*module);
EXPECT_NE(cache.GetFusionId(*log1, *add1), cache.GetFusionId(*log2, *add2));
HloInstruction* fusion1 = Fuse(log1, add1);
cache.UpdateFusedInstructionId(*fusion1, *log1, *add1,
0);
HloInstruction* fusion2 = Fuse(log2, add2);
cache.UpdateFusedInstructionId(*fusion2, *log2, *add2,
1);
EXPECT_NE(cache.GetInstructionId(*fusion1), cache.GetInstructionId(*fusion2));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusion_deduplication_cache.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusion_deduplication_cache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bea3fc56-baf4-4f52-a00f-73076a7b90ad | cpp | tensorflow/tensorflow | fusion_process_dump | third_party/xla/xla/service/gpu/fusion_process_dump.cc | third_party/xla/xla/service/gpu/fusion_process_dump_test.cc | #include "xla/service/gpu/fusion_process_dump.h"
#include <string>
#include <string_view>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/fusion_process_dump.pb.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tools/hlo_module_loader.h"
#include "xla/util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
HloInstruction* AddFusionInstruction(HloInstruction* producer,
HloInstruction* consumer,
HloComputation* computation,
std::string_view fusion_name) {
if (consumer->opcode() == HloOpcode::kFusion) {
return consumer;
}
auto kind = HloInstruction::FusionKind::kLoop;
auto fusion_instruction = computation->AddInstruction(
HloInstruction::CreateFusion(consumer->shape(), kind, consumer),
fusion_name);
TF_CHECK_OK(computation->ReplaceInstruction(consumer, fusion_instruction));
return fusion_instruction;
}
HloInstruction* Fuse(HloInstruction* producer, HloInstruction* consumer,
HloComputation* computation,
std::string_view fusion_name) {
HloInstruction* fusion_instruction =
AddFusionInstruction(producer, consumer, computation, fusion_name);
if (producer->opcode() == HloOpcode::kFusion) {
fusion_instruction->MergeFusionInstruction(producer);
} else {
fusion_instruction->FuseInstruction(producer);
}
if (producer->user_count() == 0) {
TF_CHECK_OK(computation->RemoveInstruction(producer));
}
return fusion_instruction;
}
absl::string_view GetProducerName(const FusionStep& step) {
if (step.has_fusion()) {
return step.fusion().producer_name();
}
if (step.has_update_priority()) {
return step.update_priority().producer_name();
}
if (step.has_producer_ineligible()) {
return step.producer_ineligible().producer_name();
}
LOG(FATAL) << "Producer name not found in the current step.";
}
}
absl::StatusOr<FusionProcessDump> FusionProcessDump::LoadFromFile(
const std::string& path) {
std::string format = std::string(tsl::io::Extension(path));
std::string data;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(), path, &data));
return FusionProcessDump::LoadFromData(data, format);
}
absl::StatusOr<FusionProcessDump> FusionProcessDump::LoadFromData(
const std::string& data, absl::string_view format) {
FusionProcessDumpProto fusion_process_dump_proto;
if (format == "txt" || format == "pbtxt") {
if (!tsl::protobuf::TextFormat::ParseFromString(
data, &fusion_process_dump_proto)) {
return InvalidArgument("Failed to parse input as HLO protobuf text");
}
} else if (format == "pb") {
if (!fusion_process_dump_proto.ParseFromString(data)) {
return InvalidArgument("Failed to parse input as HLO protobuf binary");
}
} else {
return InvalidArgument(
"Invalid format from file extension: '%s'. Expected: txt, pb, or pbtxt",
format);
}
return FusionProcessDump::LoadFromProto(fusion_process_dump_proto);
}
absl::StatusOr<FusionProcessDump> FusionProcessDump::LoadFromProto(
const FusionProcessDumpProto& fusion_process_dump_proto) {
TF_ASSIGN_OR_RETURN(
auto module,
LoadModuleFromData(fusion_process_dump_proto.hlo_module_before_fusion(),
"txt"));
se::DeviceDescription gpu_device_info(
fusion_process_dump_proto.gpu_device_info());
absl::flat_hash_map<std::string, HloComputation*>
instruction_name_to_computation_map;
for (HloComputation* computation : module->MakeNonfusionComputations()) {
for (HloInstruction* instr : computation->instructions()) {
instruction_name_to_computation_map[instr->name()] = computation;
}
}
return FusionProcessDump(std::move(fusion_process_dump_proto),
std::move(module), std::move(gpu_device_info),
std::move(instruction_name_to_computation_map));
}
HloComputation* FusionProcessDump::GetCurrentComputation() {
return instruction_name_to_computation_map_.at(
GetProducerName(CurrentStep()));
}
HloInstruction* FusionProcessDump::GetInstructionWithName(
absl::string_view name) {
return instruction_name_to_computation_map_[name]->GetInstructionWithName(
name);
}
HloInstruction* FusionProcessDump::GetProducer() {
return GetInstructionWithName(GetProducerName(CurrentStep()));
}
absl::InlinedVector<HloInstruction*, 2> FusionProcessDump::GetConsumers() {
auto& step = CurrentStep();
if (step.has_fusion()) {
return {GetInstructionWithName(step.fusion().consumer_name())};
}
if (step.has_update_priority()) {
absl::InlinedVector<HloInstruction*, 2> consumers;
for (const auto& consumer_name : step.update_priority().consumer_names()) {
consumers.push_back(GetInstructionWithName(consumer_name));
}
return consumers;
}
return {};
}
const FusionStep& FusionProcessDump::CurrentStep() {
CHECK(HasNext());
return fusion_process_dump_proto_.fusion_steps(current_step_idx_);
}
bool FusionProcessDump::HasNext() {
return current_step_idx_ < fusion_process_dump_proto_.fusion_steps_size();
}
void FusionProcessDump::Advance() {
auto step = CurrentStep();
if (step.has_fusion()) {
const auto& fusion_step = step.fusion();
auto* computation = GetCurrentComputation();
HloInstruction* producer =
computation->GetInstructionWithName(fusion_step.producer_name());
HloInstruction* consumer =
computation->GetInstructionWithName(fusion_step.consumer_name());
HloInstruction* fusion =
Fuse(producer, consumer, computation, fusion_step.fusion_name());
instruction_name_to_computation_map_[fusion->name()] = computation;
last_fusion_ = fusion;
}
++current_step_idx_;
}
}
} | #include "xla/service/gpu/fusion_process_dump.h"
#include <string>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/fusion_process_dump.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace m = ::xla::match;
namespace xla {
namespace gpu {
namespace {
using FusionProcessDumpTest = HloTestBase;
void AddFusion(FusionProcessDumpProto& dump_proto,
const std::string& fusion_name, const std::string& producer_name,
const std::string& consumer_name) {
auto step = dump_proto.add_fusion_steps();
auto fusion_step = step->mutable_fusion();
fusion_step->set_fusion_name(fusion_name);
fusion_step->set_producer_name(producer_name);
fusion_step->set_consumer_name(consumer_name);
}
TEST_F(FusionProcessDumpTest, MultipleFusionSteps) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY main {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
add = f32[] add(p0, p1)
subtract = f32[] subtract(p0, p1)
abs = f32[] abs(subtract)
ROOT multiply = f32[] multiply(add, abs)
})"));
FusionProcessDumpProto dump_proto;
*dump_proto.mutable_gpu_device_info() =
TestGpuDeviceInfo::RTXA6000DeviceInfo().ToGpuProto();
dump_proto.set_hlo_module_before_fusion(
module->ToString(HloPrintOptions::ShortParsable()));
AddFusion(dump_proto, "fusion.1", "subtract", "abs");
AddFusion(dump_proto, "fusion.2", "fusion.1", "multiply");
AddFusion(dump_proto, "fusion.2", "add", "fusion.2");
TF_ASSERT_OK_AND_ASSIGN(auto fusion_process_dump,
FusionProcessDump::LoadFromProto(dump_proto));
fusion_process_dump.Advance();
fusion_process_dump.Advance();
fusion_process_dump.Advance();
EXPECT_FALSE(fusion_process_dump.HasNext());
auto root =
fusion_process_dump.module()->entry_computation()->root_instruction();
EXPECT_EQ(root->name(), "fusion.2");
ASSERT_THAT(root, GmockMatch(m::Fusion(m::Parameter(), m::Parameter())));
EXPECT_THAT(root->fused_expression_root(),
GmockMatch(m::Multiply(
m::Add(m::Parameter(), m::Parameter()),
m::Abs(m::Subtract(m::Parameter(), m::Parameter())))));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusion_process_dump.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusion_process_dump_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1559345f-112d-4a23-82b6-d268bfab2164 | cpp | tensorflow/tensorflow | kernel_reuse_cache | third_party/xla/xla/service/gpu/kernel_reuse_cache.cc | third_party/xla/xla/service/gpu/kernel_reuse_cache_test.cc | #include "xla/service/gpu/kernel_reuse_cache.h"
#include <functional>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/executable.pb.h"
#include "xla/service/gpu/kernel_arguments.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
namespace {
std::string GetArgumentFingerprint(
absl::Span<const KernelArgument> kernel_arguments) {
return absl::StrJoin(
kernel_arguments, ",", [](std::string* s, const KernelArgument& arg) {
if (arg.first_with_same_slice().has_value()) {
absl::StrAppend(s, "=", arg.first_with_same_slice().value());
return;
}
absl::StrAppend(s, arg.alignment());
if (arg.aliased()) {
absl::StrAppend(s, "a");
}
if (arg.written()) {
absl::StrAppend(s, "w");
}
});
}
}
std::string GetComputationFingerprint(
const HloComputation* fused_computation,
absl::Span<const KernelArgument> kernel_arguments,
absl::string_view discriminator) {
auto print_options = HloPrintOptions::Fingerprint()
.set_print_only_essential_constants(false)
.set_print_operand_shape(false);
return absl::StrCat(discriminator, "(",
GetArgumentFingerprint(kernel_arguments), ")",
fused_computation->ToString(print_options));
}
absl::Status KernelReuseCache::Load(const CompilationCacheProto& proto) {
for (const auto& [name, entry] : proto.entries()) {
std::optional<se::ClusterDim> cluster_dim;
if (entry.has_cluster_dim()) {
cluster_dim =
se::ClusterDim{entry.cluster_dim().x(), entry.cluster_dim().y(),
entry.cluster_dim().z()};
}
TF_RET_CHECK(
cache_
.insert(
{entry.fingerprint(),
Entry{name,
LaunchDimensions{
entry.launch_dimensions().num_blocks(),
entry.launch_dimensions().num_threads_per_block()},
cluster_dim, entry.shmem_bytes(), entry.binary()}})
.second);
}
return absl::OkStatus();
}
CompilationCacheProto KernelReuseCache::Export() const {
CompilationCacheProto proto;
for (const auto& [fingerprint, cache_entry] : cache_) {
if (!hits_.contains(fingerprint)) {
VLOG(5) << "Not exporting unused " << cache_entry.kernel_name;
continue;
}
auto [it, inserted] = proto.mutable_entries()->emplace(
cache_entry.kernel_name, CompilationCacheEntryProto{});
CHECK(inserted) << cache_entry.kernel_name;
CompilationCacheEntryProto& proto_entry = it->second;
proto_entry.set_fingerprint(fingerprint);
LaunchDimensionsProto launch_dimensions_proto;
launch_dimensions_proto.set_num_blocks(
cache_entry.launch_dimensions.num_blocks());
launch_dimensions_proto.set_num_threads_per_block(
cache_entry.launch_dimensions.num_threads_per_block());
*proto_entry.mutable_launch_dimensions() = launch_dimensions_proto;
if (cache_entry.cluster_dim.has_value()) {
ClusterDimProto cluster_dim_proto;
cluster_dim_proto.set_x(cache_entry.cluster_dim->x);
cluster_dim_proto.set_y(cache_entry.cluster_dim->y);
cluster_dim_proto.set_z(cache_entry.cluster_dim->z);
*proto_entry.mutable_cluster_dim() = cluster_dim_proto;
}
proto_entry.set_shmem_bytes(cache_entry.shmem_bytes);
proto_entry.set_binary(cache_entry.binary);
}
return proto;
}
absl::Status UpdateDiskKernelCache(
absl::string_view path, const bool do_append,
const CompilationCacheProto& current_cache,
absl::Span<const KernelReuseCache::NamedBinary> binaries_to_cache) {
CompilationCacheProto disk_cache;
if (do_append) {
std::string serialized;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(),
std::string(path), &serialized));
if (!disk_cache.ParseFromString(std::string(serialized))) {
return Internal("Failed to parse serialized CompilationCacheProto.");
}
}
auto entries = disk_cache.mutable_entries();
int stored_kernel_count = 0;
for (const auto& [name, binary] : binaries_to_cache) {
auto it_current = current_cache.entries().find(name);
TF_RET_CHECK(it_current != current_cache.entries().end());
auto [it_disk, inserted] = entries->insert({name, it_current->second});
TF_RET_CHECK(inserted);
TF_RET_CHECK(!binary.empty());
it_disk->second.set_binary(reinterpret_cast<const char*>(binary.data()),
binary.size());
VLOG(5) << "Cached kernel: " << name << ": " << binary.size();
++stored_kernel_count;
}
if (stored_kernel_count > 0) {
TF_RETURN_IF_ERROR(tsl::WriteStringToFile(tsl::Env::Default(),
std::string(path),
disk_cache.SerializeAsString()));
VLOG(2) << "Stored " << stored_kernel_count << " / "
<< binaries_to_cache.size() << " kernels in the cache file.";
}
return absl::OkStatus();
}
std::pair<absl::StatusOr<const KernelReuseCache::Entry*>, bool>
KernelReuseCache::GetWithStatus(
const HloComputation* fused_computation,
absl::Span<const KernelArgument> kernel_arguments,
absl::string_view discriminator,
const std::function<absl::StatusOr<KernelReuseCache::Entry>()>& generator) {
std::string fingerprint = GetComputationFingerprint(
fused_computation, kernel_arguments, discriminator);
VLOG(4) << "Fingerprint: ";
XLA_VLOG_LINES(4, fingerprint);
return GetWithStatus(std::move(fingerprint), generator);
}
std::pair<absl::StatusOr<const KernelReuseCache::Entry*>, bool>
KernelReuseCache::GetWithStatus(
std::string fingerprint,
const std::function<absl::StatusOr<KernelReuseCache::Entry>()>& generator) {
hits_.insert(fingerprint);
auto it = cache_.find(fingerprint);
if (it != cache_.end()) {
return {&it->second, true};
}
absl::StatusOr<Entry> entry = generator();
if (entry.ok()) {
it =
cache_.insert({std::move(fingerprint), std::move(entry.value())}).first;
return {&it->second, false};
}
return {entry.status(), false};
}
}
} | #include "xla/service/gpu/kernel_reuse_cache.h"
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "xla/service/gpu/executable.pb.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
namespace xla {
namespace gpu {
namespace {
using KernelReuseTest = ::testing::Test;
TEST_F(KernelReuseTest, ExportAndLoadWork) {
KernelReuseCache cache;
EXPECT_TRUE(cache.IsEmpty());
auto [result, was_cached] = cache.GetWithStatus(
"fingerprint", []() { return KernelReuseCache::Entry{}; });
TF_EXPECT_OK(result);
EXPECT_NE(result.value(), nullptr);
EXPECT_FALSE(was_cached);
EXPECT_FALSE(cache.IsEmpty());
const CompilationCacheProto proto = cache.Export();
cache.Clear();
EXPECT_TRUE(cache.IsEmpty());
TF_EXPECT_OK(cache.Load(proto));
EXPECT_FALSE(cache.IsEmpty());
}
TEST_F(KernelReuseTest, UpdatingDiskKernelCacheWorks) {
std::string cache_file_path;
CHECK(tsl::Env::Default()->LocalTempFilename(&cache_file_path));
{
const CompilationCacheProto proto = [](std::string kernel_name) {
KernelReuseCache cache;
auto [result, was_cached] = cache.GetWithStatus("fingerprint", [&]() {
return KernelReuseCache::Entry{.kernel_name = kernel_name};
});
return cache.Export();
}("k1");
TF_EXPECT_OK(UpdateDiskKernelCache(cache_file_path, false,
proto,
{{.name = "k1", .binary = {5, 6}}}));
}
{
const CompilationCacheProto proto = [](std::string kernel_name) {
KernelReuseCache cache;
auto [result, was_cached] = cache.GetWithStatus("fingerprint", [&]() {
return KernelReuseCache::Entry{.kernel_name = kernel_name};
});
return cache.Export();
}("k2");
TF_EXPECT_OK(UpdateDiskKernelCache(cache_file_path, true,
proto,
{{.name = "k2", .binary = {7, 8}}}));
}
std::string serialized;
TF_EXPECT_OK(
tsl::ReadFileToString(tsl::Env::Default(), cache_file_path, &serialized));
CompilationCacheProto proto;
EXPECT_TRUE(proto.ParseFromString(std::string(serialized)));
EXPECT_EQ(proto.entries_size(), 2);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/kernel_reuse_cache.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/kernel_reuse_cache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b0dfb30d-f1d3-464d-99a2-1da73dd33128 | cpp | tensorflow/tensorflow | gpu_spmd_pipeline | third_party/xla/xla/service/gpu/gpu_spmd_pipeline.cc | third_party/xla/xla/service/gpu/gpu_spmd_pipeline_test.cc | #include "xla/service/gpu/gpu_spmd_pipeline.h"
#include <cstdint>
#include <optional>
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/pass/hlo_pass_fix.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/hlo/transforms/hlo_constant_splitter.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/conditional_simplifier.h"
#include "xla/service/gather_expander.h"
#include "xla/service/gpu/transforms/algebraic_simplifier.h"
#include "xla/service/hlo_constant_folding.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/reshape_mover.h"
#include "xla/service/scatter_expander.h"
#include "xla/service/sharding_propagation.h"
#include "xla/service/sort_simplifier.h"
#include "xla/service/spmd/collective_permute_motion.h"
#include "xla/service/spmd/shardy/shardy_xla_pass.h"
#include "xla/service/spmd/stateful_rng_spmd_partitioner.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/service/while_loop_constant_sinking.h"
#include "xla/service/while_loop_simplifier.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
void AddSPMDPasses(
const HloModule* hlo_module,
const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts,
const se::GpuComputeCapability& compute_capability,
HloPassPipeline& spmd_pipeline,
std::optional<const absl::FunctionRef<void(HloPassPipeline&)>>
auto_sharding_func) {
const int64_t num_partitions = hlo_module->config().num_partitions();
CHECK_GE(num_partitions, 1);
HloPassPipeline& spmd_simplify =
spmd_pipeline.AddPass<HloPassFix<HloPassPipeline>>("spmd-simplify");
spmd_simplify.AddPass<GpuAlgebraicSimplifier>(layout_insensitive_algsimp_opts,
compute_capability);
spmd_simplify.AddPass<SortSimplifier>();
spmd_simplify.AddPass<TupleSimplifier>();
spmd_simplify.AddPass<ScatterExpander>(
ScatterExpander::kEliminateSimpleScatters);
spmd_simplify.AddPass<GatherExpander>(
GatherExpander::kEliminateSimpleGathers);
spmd_simplify.AddPass<WhileLoopConstantSinking>();
spmd_simplify.AddPass<WhileLoopSimplifier>();
ReshapeMoverOptions reshape_mover_options;
reshape_mover_options.reshape_of_1d_broadcast_is_cheap = true;
spmd_simplify.AddPass<ReshapeMover>(reshape_mover_options);
spmd_simplify.AddPass<HloPassFix<GpuAlgebraicSimplifier>>(
layout_insensitive_algsimp_opts, compute_capability);
spmd_simplify.AddPass<HloConstantFolding>();
spmd_simplify.AddPass<ConditionalSimplifier>();
const HloModuleConfig& config = hlo_module->config();
if (config.use_shardy_partitioner()) {
spmd_pipeline.AddPass<sdy::ShardyXLA>();
} else {
spmd_pipeline.AddPass<HloConstantSplitter>();
spmd_simplify.AddPass<HloDCE>();
if (auto_sharding_func.has_value()) {
(*auto_sharding_func)(spmd_pipeline);
}
spmd_pipeline.AddPass<ShardingPropagation>(
true, false,
config.allow_spmd_sharding_propagation_to_output());
}
spmd_pipeline.AddPass<spmd::StatefulRngSpmdPartitioner>(
num_partitions, hlo_module->config().replica_count(),
hlo_module->config()
.debug_options()
.xla_gpu_threshold_for_windowed_einsum_mib(),
hlo_module->config()
.debug_options()
.xla_gpu_multi_streamed_windowed_einsum(),
true,
true);
spmd_pipeline.AddPass<CollectivePermuteMotion>();
}
}
} | #include "xla/service/gpu/gpu_spmd_pipeline.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "xla/client/executable_build_options.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class GpuSpmdPartitioningTest : public HloTestBase,
public ::testing::WithParamInterface<bool> {
public:
absl::StatusOr<std::unique_ptr<HloModule>> PartitionComputation(
const char* hlo_module, int64_t num_devices) {
HloModuleConfig config = GetModuleConfigForTest(
1, num_devices);
config.set_num_partitions(num_devices);
config.set_use_shardy_partitioner(UseShardy());
TF_ASSIGN_OR_RETURN(auto module,
ParseAndReturnVerifiedModule(hlo_module, config));
HloPassPipeline spmd_pipeline("spmd-partitioner");
se::CudaComputeCapability ampere(8, 0);
AlgebraicSimplifierOptions alg_simplifier_options;
AddSPMDPasses(module.get(), alg_simplifier_options, ampere, spmd_pipeline,
std::nullopt);
TF_RETURN_IF_ERROR(spmd_pipeline.Run(module.get()).status());
XLA_VLOG_LINES(10, module->ToString());
return module;
}
protected:
bool UseShardy() const { return GetParam(); }
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
return debug_options;
}
};
TEST_P(GpuSpmdPartitioningTest, DotWithEntryComputationLayout) {
const char* const kHloModule = R"(
HloModule module,
entry_computation_layout={(f32[8,16]{0,1}, f32[16,24]{1,0})
->f32[8,24]{1,0}}
ENTRY main {
%p0 = f32[8,16] parameter(0), sharding={devices=[1,8]<=[8]}
%p1 = f32[16,24] parameter(1), sharding={devices=[8,1]<=[8]}
ROOT %dot = f32[8,24] dot(%p0, %p1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(kHloModule, 8));
EXPECT_EQ(module->config().entry_computation_layout().parameter_shape(0),
ShapeUtil::MakeShapeWithDenseLayout(F32, {8, 2}, {0, 1}));
EXPECT_EQ(module->config().entry_computation_layout().parameter_shape(1),
ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 24}, {1, 0}));
EXPECT_EQ(module->config().entry_computation_layout().result_shape(),
ShapeUtil::MakeShapeWithDenseLayout(F32, {8, 24}, {1, 0}));
}
std::string TestParamToString(
const ::testing::TestParamInfo<bool>& param_info) {
return param_info.param ? "Shardy" : "GSPMD";
}
INSTANTIATE_TEST_SUITE_P(All, GpuSpmdPartitioningTest,
::testing::Values(true, false), TestParamToString);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_spmd_pipeline.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_spmd_pipeline_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7dde5b77-37ca-4936-8496-28e66a591fb5 | cpp | tensorflow/tensorflow | buffer_comparator | third_party/xla/xla/service/gpu/buffer_comparator.cc | third_party/xla/xla/service/gpu/buffer_comparator_test.cc | #include "xla/service/gpu/buffer_comparator.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <string_view>
#include <type_traits>
#include <vector>
#include "Eigen/Core"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_handle.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/typed_kernel_factory.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
template <typename ElementT>
using ComparisonKernelT =
se::TypedKernel<se::DeviceMemory<ElementT>, se::DeviceMemory<ElementT>,
float, uint64_t, se::DeviceMemory<uint64_t>>;
struct ComparisonParams {
double relative_tol = 0.1;
bool verbose = true;
const Shape* shape = nullptr;
se::Stream* stream = nullptr;
se::DeviceMemoryBase current{};
se::DeviceMemoryBase expected{};
};
template <typename ElementT>
static absl::StatusOr<bool> DeviceCompare(std::string_view kernel_name,
void* kernel_symbol,
const ComparisonParams& params) {
se::StreamExecutor* executor = params.stream->parent();
se::DeviceMemoryHandle out(executor, executor->AllocateScalar<uint64_t>());
TF_RETURN_IF_ERROR(
params.stream->MemZero(out.memory_ptr(), sizeof(uint64_t)));
if (params.current.size() != params.expected.size()) {
return Internal("Mismatched buffer size: %d bytes vs. %d bytes",
params.current.size(), params.expected.size());
}
se::DeviceMemory<ElementT> current_typed(params.current);
se::DeviceMemory<ElementT> expected_typed(params.expected);
uint64_t buffer_size = current_typed.ElementCount();
TF_ASSIGN_OR_RETURN(
ComparisonKernelT<ElementT> comparison_kernel,
(se::TypedKernelFactory<
se::DeviceMemory<ElementT>, se::DeviceMemory<ElementT>, float,
uint64_t, se::DeviceMemory<uint64_t>>::Create(executor, kernel_name,
kernel_symbol)));
const se::DeviceDescription& gpu_device_info =
executor->GetDeviceDescription();
LaunchDimensions dim =
CalculateLaunchDimensions(*params.shape, gpu_device_info);
se::DeviceMemory<uint64_t> as_uint64(out.memory());
TF_RETURN_IF_ERROR(params.stream->ThenLaunch(
dim.thread_counts_per_block(), dim.block_counts(), comparison_kernel,
current_typed, expected_typed, static_cast<float>(params.relative_tol),
buffer_size, as_uint64));
uint64_t result = -1;
CHECK_EQ(out.memory().size(), sizeof(result));
TF_RETURN_IF_ERROR(
params.stream->Memcpy(&result, out.memory(), sizeof(result)));
TF_RETURN_IF_ERROR(params.stream->BlockHostUntilDone());
return result == 0;
}
template <typename ElementType, typename ComparisonType>
static absl::StatusOr<bool> HostCompare(const ComparisonParams& params) {
int64_t n = params.current.size() / sizeof(ElementType);
std::vector<ElementType> host_current(n), host_expected(n);
TF_RETURN_IF_ERROR(params.stream->Memcpy(host_current.data(), params.current,
params.current.size()));
TF_RETURN_IF_ERROR(params.stream->Memcpy(
host_expected.data(), params.expected, params.expected.size()));
TF_RETURN_IF_ERROR(params.stream->BlockHostUntilDone());
const auto canonicalize = [](ComparisonType a) -> ComparisonType {
if (std::is_same<ElementType, Eigen::half>::value && a) {
constexpr ComparisonType kMaxFp16Value = 65505;
if (std::isnan(a)) {
return a;
}
return std::max(-kMaxFp16Value, std::min(a, kMaxFp16Value));
}
return a;
};
int differences_seen = 0;
for (int64_t i = 0; i < n && differences_seen < 10; ++i) {
auto current_value = static_cast<ComparisonType>(host_current[i]);
auto expected_value = static_cast<ComparisonType>(host_expected[i]);
ComparisonType current_value_canonical = canonicalize(current_value);
ComparisonType expected_value_canonical = canonicalize(expected_value);
if (std::isnan(current_value_canonical) &&
std::isnan(expected_value_canonical)) {
continue;
}
if (std::isinf(current_value_canonical) &&
std::isinf(expected_value_canonical) &&
current_value_canonical == expected_value_canonical) {
continue;
}
if (std::isfinite(current_value_canonical) !=
std::isfinite(expected_value_canonical) ||
!(std::abs(current_value_canonical - expected_value_canonical) /
(std::max(std::abs(current_value_canonical),
std::abs(expected_value_canonical)) +
1) <
params.relative_tol)) {
if (!params.verbose) return false;
++differences_seen;
LOG(ERROR) << "Difference at " << i << ": " << current_value
<< ", expected " << expected_value;
}
}
return differences_seen == 0;
}
template <typename ElementT, typename ComparisonT>
static absl::StatusOr<bool> CompareEqualParameterized(
std::string_view kernel_name, void* kernel_symbol,
const ComparisonParams& params) {
XLA_SCOPED_LOGGING_TIMER("BufferComparator::CompareEqual");
TF_ASSIGN_OR_RETURN(
bool result, DeviceCompare<ElementT>(kernel_name, kernel_symbol, params));
if (result) {
return true;
}
TF_ASSIGN_OR_RETURN(bool host_return,
(HostCompare<ElementT, ComparisonT>(params)));
CHECK_EQ(host_return, result)
<< "Host comparison succeeded even though GPU comparison failed.";
return false;
}
absl::StatusOr<bool> BufferComparator::CompareEqual(
se::Stream* stream, se::DeviceMemoryBase current,
se::DeviceMemoryBase expected) const {
ComparisonParams params{relative_tol_, verbose_, &shape_,
stream, current, expected};
switch (shape_.element_type()) {
#if GOOGLE_CUDA
case xla::F8E4M3FN:
return CompareEqualParameterized<tsl::float8_e4m3fn, float>(
"fp8_e4m3fn_comparison", buffer_comparator::fp8_e4m3fn_comparison(),
params);
case xla::F8E5M2:
return CompareEqualParameterized<tsl::float8_e5m2, float>(
"fp8_e5m2_comparison", buffer_comparator::fp8_e5m2_comparison(),
params);
#endif
#if TENSORFLOW_USE_ROCM && TF_ROCM_VERSION >= 60200
case xla::F8E4M3FNUZ:
return CompareEqualParameterized<tsl::float8_e4m3fnuz, float>(
"fp8_e4m3fnuz_comparison",
buffer_comparator::fp8_e4m3fnuz_comparison(), params);
case xla::F8E5M2FNUZ:
return CompareEqualParameterized<tsl::float8_e5m2fnuz, float>(
"fp8_e5m2fnuz_comparison",
buffer_comparator::fp8_e5m2fnuz_comparison(), params);
#endif
case xla::F16:
return CompareEqualParameterized<Eigen::half, float>(
"fp16_comparison", buffer_comparator::fp16_comparison(), params);
case xla::BF16:
return CompareEqualParameterized<Eigen::bfloat16, float>(
"bf16_comparison", buffer_comparator::bf16_comparison(), params);
case xla::F32:
return CompareEqualParameterized<float, float>(
"fp32_comparison", buffer_comparator::fp32_comparison(), params);
case xla::F64:
return CompareEqualParameterized<double, double>(
"fp64_comparison", buffer_comparator::fp64_comparison(), params);
case xla::S8:
return CompareEqualParameterized<int8_t, float>(
"int8_comparison", buffer_comparator::int8_comparison(), params);
case xla::S32:
return CompareEqualParameterized<int32_t, float>(
"int32_comparison", buffer_comparator::int32_comparison(), params);
default:
return Unimplemented("Unimplemented element type");
}
}
BufferComparator::BufferComparator(const Shape& shape, double tolerance,
bool verbose)
: shape_(shape), relative_tol_(tolerance), verbose_(verbose) {
auto double_dim_size = [&]() {
int64_t prev_zero_dim_size = shape_.dimensions(0);
shape_.set_dimensions(0, prev_zero_dim_size * 2);
};
if (shape_.element_type() == PrimitiveType::C64) {
shape_.set_element_type(PrimitiveType::F32);
double_dim_size();
} else if (shape_.element_type() == PrimitiveType::C128) {
shape_.set_element_type(PrimitiveType::F64);
double_dim_size();
}
}
}
} | #include "xla/service/gpu/buffer_comparator.h"
#include <cmath>
#include <complex>
#include <cstdint>
#include <limits>
#include <vector>
#include "xla/primitive_util.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_handle.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream.h"
#include "xla/types.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/status.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
constexpr double kDefaultTolerance = 0.1;
class BufferComparatorTest : public testing::Test {
protected:
BufferComparatorTest()
#if GOOGLE_CUDA
: platform_(se::PlatformManager::PlatformWithName("CUDA").value()),
#elif TENSORFLOW_USE_ROCM
: platform_(se::PlatformManager::PlatformWithName("ROCM").value()),
#endif
stream_exec_(platform_->ExecutorForDevice(0).value()) {
}
template <typename ElementType>
bool CompareEqualBuffers(const std::vector<ElementType>& current,
const std::vector<ElementType>& expected,
double tolerance) {
auto stream = stream_exec_->CreateStream().value();
se::DeviceMemoryHandle current_buffer(
stream_exec_, stream_exec_->AllocateArray<ElementType>(current.size()));
se::DeviceMemoryHandle expected_buffer(
stream_exec_,
stream_exec_->AllocateArray<ElementType>(expected.size()));
TF_CHECK_OK(stream->Memcpy(current_buffer.memory_ptr(), current.data(),
current_buffer.memory().size()));
TF_CHECK_OK(stream->Memcpy(expected_buffer.memory_ptr(), expected.data(),
expected_buffer.memory().size()));
TF_CHECK_OK(stream->BlockHostUntilDone());
BufferComparator comparator(
ShapeUtil::MakeShape(
primitive_util::NativeToPrimitiveType<ElementType>(),
{static_cast<int64_t>(current.size())}),
tolerance);
return comparator
.CompareEqual(stream.get(), current_buffer.memory(),
expected_buffer.memory())
.value();
}
template <typename ElementType>
bool CompareEqualFloatBuffers(const std::vector<float>& lhs_float,
const std::vector<float>& rhs_float,
double tolerance = kDefaultTolerance) {
std::vector<ElementType> lhs(lhs_float.begin(), lhs_float.end());
std::vector<ElementType> rhs(rhs_float.begin(), rhs_float.end());
return CompareEqualBuffers(lhs, rhs, tolerance);
}
template <typename ElementType>
bool CompareEqualComplex(const std::vector<std::complex<ElementType>>& lhs,
const std::vector<std::complex<ElementType>>& rhs) {
return CompareEqualBuffers<std::complex<ElementType>>(lhs, rhs,
kDefaultTolerance);
}
se::Platform* platform_;
se::StreamExecutor* stream_exec_;
};
TEST_F(BufferComparatorTest, TestComplex) {
EXPECT_FALSE(
CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {6, 7}}));
EXPECT_TRUE(CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}},
{{0.1, 0.2}, {2.2, 3.3}}));
EXPECT_TRUE(
CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {2, 3}}));
EXPECT_FALSE(
CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {6, 3}}));
EXPECT_FALSE(
CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {6, 7}}));
EXPECT_FALSE(
CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}}, {{0.1, 6}, {2, 3}}));
EXPECT_TRUE(CompareEqualComplex<double>({{0.1, 0.2}, {2, 3}},
{{0.1, 0.2}, {2.2, 3.3}}));
EXPECT_FALSE(
CompareEqualComplex<double>({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {2, 7}}));
}
TEST_F(BufferComparatorTest, TestNaNs) {
EXPECT_TRUE(
CompareEqualFloatBuffers<Eigen::half>({std::nanf("")}, {std::nanf("")}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({std::nanf("")},
{std::nanf("1234")}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({std::nanf("")}, {1.}));
EXPECT_TRUE(
CompareEqualFloatBuffers<float>({std::nanf("")}, {std::nanf("")}));
EXPECT_TRUE(
CompareEqualFloatBuffers<float>({std::nanf("")}, {std::nanf("1234")}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({std::nanf("")}, {1.}));
EXPECT_TRUE(
CompareEqualFloatBuffers<double>({std::nanf("")}, {std::nanf("")}));
EXPECT_TRUE(
CompareEqualFloatBuffers<double>({std::nanf("")}, {std::nanf("1234")}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({std::nanf("")}, {1.}));
}
TEST_F(BufferComparatorTest, TestInfs) {
const auto inf = std::numeric_limits<float>::infinity();
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({inf}, {std::nanf("")}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({inf}, {inf}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({inf}, {65504}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({-inf}, {-65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({inf}, {-65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({-inf}, {65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({inf}, {-20}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({-inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({-inf}, {-20}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({inf}, {std::nanf("")}));
EXPECT_TRUE(CompareEqualFloatBuffers<float>({inf}, {inf}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({inf}, {65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({-inf}, {-65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({inf}, {-65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({-inf}, {65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({inf}, {-20}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({-inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({-inf}, {-20}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({inf}, {std::nanf("")}));
EXPECT_TRUE(CompareEqualFloatBuffers<double>({inf}, {inf}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({inf}, {65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({-inf}, {-65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({inf}, {-65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({-inf}, {65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({inf}, {-20}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({-inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({-inf}, {-20}));
#if GOOGLE_CUDA
EXPECT_TRUE(
CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {std::nanf("")}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {inf}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {-inf}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {448}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {-448}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {-20}));
EXPECT_FALSE(
CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {std::nanf("")}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {inf}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {-inf}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {57344}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({-inf}, {-57344}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {-20}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({-inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({-inf}, {-20}));
#endif
}
TEST_F(BufferComparatorTest, TestNumbers) {
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({20}, {20.1}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({20}, {23.0}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({20}, {23.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({20}, {26.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({0}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({0.9}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({9}, {10}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({10}, {9}));
EXPECT_TRUE(CompareEqualFloatBuffers<float>({20}, {20.1}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({20}, {23.0}));
EXPECT_TRUE(CompareEqualFloatBuffers<float>({20}, {23.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({20}, {26.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({0}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<float>({0.9}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<float>({9}, {10}));
EXPECT_TRUE(CompareEqualFloatBuffers<float>({10}, {9}));
EXPECT_TRUE(CompareEqualFloatBuffers<double>({20}, {20.1}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({20}, {23.0}));
EXPECT_TRUE(CompareEqualFloatBuffers<double>({20}, {23.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({20}, {26.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({0}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<double>({0.9}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<double>({9}, {10}));
EXPECT_TRUE(CompareEqualFloatBuffers<double>({10}, {9}));
EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({100}, {101}));
EXPECT_FALSE(CompareEqualFloatBuffers<int8_t>({100}, {120}));
EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({100}, {120}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<int8_t>({90}, {120}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<int8_t>({0}, {10}));
EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({9}, {10}));
EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({90}, {100}));
EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({100}, {90}));
EXPECT_FALSE(CompareEqualFloatBuffers<int8_t>({-128}, {127}));
#if GOOGLE_CUDA
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({20}, {20.1}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({20}, {23.0}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({20}, {23.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({20}, {26.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({0}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({0.9}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({9}, {10}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({9}, {10}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({20}, {20.1}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({20}, {23.0}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({20}, {23.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({20}, {30.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({0}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({0.9}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({11}, {12}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({12}, {11}));
#endif
const double tol = 0.001;
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({0.9}, {1}, tol));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({0.9}, {0.901}, tol));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({10}, {10.1}, tol));
EXPECT_TRUE(CompareEqualFloatBuffers<float>({10}, {10.01}, tol));
EXPECT_FALSE(CompareEqualFloatBuffers<int8_t>({100}, {101}, tol));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({20}, {20.1}, tol));
EXPECT_TRUE(CompareEqualFloatBuffers<double>({20}, {20.01}, tol));
}
TEST_F(BufferComparatorTest, TestMultiple) {
{
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>(
{20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1}));
std::vector<float> lhs(200);
std::vector<float> rhs(200);
for (int i = 0; i < 200; i++) {
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>(lhs, rhs))
<< "should be the same at index " << i;
lhs[i] = 3;
rhs[i] = 5;
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>(lhs, rhs))
<< "should be the different at index " << i;
lhs[i] = 0;
rhs[i] = 0;
}
}
{
EXPECT_TRUE(CompareEqualFloatBuffers<float>(
{20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1}));
std::vector<float> lhs(200);
std::vector<float> rhs(200);
for (int i = 0; i < 200; i++) {
EXPECT_TRUE(CompareEqualFloatBuffers<float>(lhs, rhs))
<< "should be the same at index " << i;
lhs[i] = 3;
rhs[i] = 5;
EXPECT_FALSE(CompareEqualFloatBuffers<float>(lhs, rhs))
<< "should be the different at index " << i;
lhs[i] = 0;
rhs[i] = 0;
}
}
{
EXPECT_TRUE(CompareEqualFloatBuffers<double>(
{20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1}));
std::vector<float> lhs(200);
std::vector<float> rhs(200);
for (int i = 0; i < 200; i++) {
EXPECT_TRUE(CompareEqualFloatBuffers<double>(lhs, rhs))
<< "should be the same at index " << i;
lhs[i] = 3;
rhs[i] = 5;
EXPECT_FALSE(CompareEqualFloatBuffers<double>(lhs, rhs))
<< "should be the different at index " << i;
lhs[i] = 0;
rhs[i] = 0;
}
}
{
EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({20, 30, 40, 50, 60},
{21, 31, 41, 51, 61}));
std::vector<float> lhs(200);
std::vector<float> rhs(200);
for (int i = 0; i < 200; i++) {
EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>(lhs, rhs))
<< "should be the same at index " << i;
lhs[i] = 3;
rhs[i] = 5;
EXPECT_FALSE(CompareEqualFloatBuffers<int8_t>(lhs, rhs))
<< "should be the different at index " << i;
lhs[i] = 0;
rhs[i] = 0;
}
}
#if GOOGLE_CUDA
{
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>(
{20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1}));
std::vector<float> lhs(200);
std::vector<float> rhs(200);
for (int i = 0; i < 200; i++) {
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>(lhs, rhs))
<< "should be the same at index " << i;
lhs[i] = 3;
rhs[i] = 5;
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>(lhs, rhs))
<< "should be the different at index " << i;
lhs[i] = 0;
rhs[i] = 0;
}
}
{
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>(
{20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1}));
std::vector<float> lhs(200);
std::vector<float> rhs(200);
for (int i = 0; i < 200; i++) {
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>(lhs, rhs))
<< "should be the same at index " << i;
lhs[i] = 3;
rhs[i] = 5;
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>(lhs, rhs))
<< "should be the different at index " << i;
lhs[i] = 0;
rhs[i] = 0;
}
}
#endif
}
TEST_F(BufferComparatorTest, BF16) {
const int element_count = 3123;
int64_t rng_state = 0;
auto stream = stream_exec_->CreateStream().value();
se::DeviceMemoryHandle lhs(
stream_exec_,
stream_exec_->AllocateArray<Eigen::bfloat16>(element_count));
InitializeBuffer(stream.get(), BF16, &rng_state, lhs.memory());
se::DeviceMemoryHandle rhs(
stream_exec_,
stream_exec_->AllocateArray<Eigen::bfloat16>(element_count));
InitializeBuffer(stream.get(), BF16, &rng_state, rhs.memory());
BufferComparator comparator(ShapeUtil::MakeShape(BF16, {element_count}));
EXPECT_FALSE(comparator.CompareEqual(stream.get(), lhs.memory(), rhs.memory())
.value());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/buffer_comparator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/buffer_comparator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5befe34d-a742-4dd8-8eb2-5fbb362c43ee | cpp | tensorflow/tensorflow | gpu_fusible | third_party/xla/xla/service/gpu/gpu_fusible.cc | third_party/xla/xla/service/gpu/gpu_fusible_test.cc | #include "xla/service/gpu/gpu_fusible.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <optional>
#include <stack>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/synchronization/mutex.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/permutation_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
namespace {
bool HasAnyTiledTransposeRoot(const HloComputation& computation) {
return absl::c_any_of(GetFusionRoots(computation),
[&](const HloInstruction* instr) {
return GetDescriptionForTiledTransposeEmitter(
FindNonTrivialHero(*instr))
.has_value();
});
}
const Shape& GetElementShape(const HloFusionAnalysis& analysis) {
const Shape* shape = &analysis.fusion_root(0).shape();
while (shape->IsTuple()) {
shape = &shape->tuple_shapes(0);
}
return *shape;
}
int ComputeMaxUnrollFactor(int64_t num_elements) {
constexpr int kMaxUnrollFactor = 4;
for (int i = kMaxUnrollFactor; i > 1; i /= 2) {
if (num_elements % i == 0) {
return i;
}
}
return 1;
}
}
bool IfFusedReadsElementsMultipleTimes(const HloInstruction& instr) {
CHECK_NE(instr.opcode(), HloOpcode::kFusion) << "`instr` has to be unfused.";
if (instr.opcode() == HloOpcode::kGather ||
instr.opcode() == HloOpcode::kBroadcast) {
return ShapeUtil::ElementsIn(instr.shape()) >
ShapeUtil::ElementsIn(instr.operand(0)->shape());
}
if (instr.opcode() == HloOpcode::kReduceWindow) {
for (const auto& dim : instr.window().dimensions()) {
if (dim.size() > dim.stride()) {
return true;
}
}
}
return false;
}
bool IsExpensiveToRepeat(const HloInstruction& instr) {
CHECK_NE(instr.opcode(), HloOpcode::kFusion) << "`instr` has to be unfused.";
constexpr int kMaxInputsPerOutput = 10;
if (instr.opcode() == HloOpcode::kReduce &&
!IsReductionFromOrToContiguousDimensions(instr)) {
int64_t reduction_ratio = ShapeUtil::ElementsIn(instr.operand(0)->shape()) /
ShapeUtil::ElementsIn(instr.shape());
if (reduction_ratio > kMaxInputsPerOutput) return true;
}
if (instr.opcode() == HloOpcode::kReduceWindow) {
int64_t reduction_ratio = 1;
for (const auto& dim : instr.window().dimensions())
reduction_ratio *= dim.size();
if (reduction_ratio > kMaxInputsPerOutput) return true;
}
return false;
}
bool IsPhysicallyTransposing(const HloInstruction& instr) {
if (instr.opcode() == HloOpcode::kFusion) {
for (const HloInstruction* fused_instr : instr.fused_instructions()) {
if (IsPhysicallyTransposing(*fused_instr)) {
return true;
}
}
}
return instr.opcode() == HloOpcode::kCopy ||
(instr.opcode() == HloOpcode::kTranspose &&
!ShapeUtil::TransposeIsBitcast(instr.operand(0)->shape(),
instr.shape(), instr.dimensions()));
}
namespace {
std::pair<int64_t, int64_t> MostMinorNonTrivialDimension(const Shape& shape) {
int64_t position_of_first_non_trivial_dim = 0;
for (int64_t dim : shape.layout().minor_to_major()) {
if (shape.dimensions()[dim] > 1) {
return {dim, position_of_first_non_trivial_dim};
}
++position_of_first_non_trivial_dim;
}
return {-1, position_of_first_non_trivial_dim};
}
}
bool TransposesMinorDimension(const HloInstruction* instr) {
switch (instr->opcode()) {
case HloOpcode::kFusion:
return absl::c_any_of(instr->fused_instructions(),
TransposesMinorDimension);
case HloOpcode::kCopy: {
int64_t first_non_trivial_operand_dim =
MostMinorNonTrivialDimension(instr->operand(0)->shape()).first;
int64_t first_non_trivial_output_dim =
MostMinorNonTrivialDimension(instr->shape()).first;
return first_non_trivial_operand_dim != first_non_trivial_output_dim;
}
case HloOpcode::kTranspose: {
auto position_in_minor_to_major = InversePermutation(
instr->operand(0)->shape().layout().minor_to_major());
int64_t position_of_first_non_trivial_dim =
MostMinorNonTrivialDimension(instr->operand(0)->shape()).second;
for (int64_t output_dim : instr->shape().layout().minor_to_major()) {
if (instr->shape().dimensions()[output_dim] == 1) {
continue;
}
int64_t operand_dim = instr->dimensions().at(output_dim);
return position_in_minor_to_major[operand_dim] >
position_of_first_non_trivial_dim;
}
return false;
}
default:
return false;
}
}
bool IsReduceInputFusion(const HloInstruction& instr) {
return instr.opcode() == HloOpcode::kFusion &&
absl::c_any_of(GetFusionRoots(*instr.called_computations()[0]),
[](const HloInstruction* root) {
return IsRealReductionHero(*root,
FindNonTrivialHero(*root));
});
}
bool IsInputFusibleReduction(const HloInstruction& instr) {
return IsReduceInputFusion(instr) ||
IsReductionFromOrToContiguousDimensions(instr);
}
bool IsNestableVariadicReduction(const HloInstruction& instr) {
return instr.shape().IsTuple() &&
((instr.opcode() == HloOpcode::kReduce &&
!IsReductionFromOrToContiguousDimensions(instr)) ||
(instr.opcode() == HloOpcode::kFusion &&
instr.fusion_kind() == HloInstruction::FusionKind::kLoop &&
instr.fused_expression_root()->opcode() == HloOpcode::kReduce));
}
bool IsInputFusibleTranspose(const HloInstruction& instr) {
if (instr.opcode() == HloOpcode::kBitcast || instr.IsCustomFusion()) {
return false;
}
if (instr.opcode() == HloOpcode::kFusion) {
return HasAnyTiledTransposeRoot(*instr.fused_instructions_computation());
}
return GetDescriptionForTiledTransposeEmitter(instr).has_value();
}
const HloInstruction* GetRealHeroForMultiOutputFusion(
const HloInstruction& instr) {
if (instr.opcode() != HloOpcode::kFusion) {
return &instr;
}
auto fused_expression_root = instr.fused_expression_root();
if (!instr.IsMultiOutputFusion()) {
const auto& hero = FindNonTrivialHero(*fused_expression_root);
if (IsRealReductionHero(*fused_expression_root, hero) ||
GetDescriptionForTiledTransposeEmitter(hero).has_value()) {
return &hero;
}
return fused_expression_root;
}
for (auto* inst : fused_expression_root->mutable_operands()) {
const auto& hero = FindNonTrivialHero(*inst);
if (IsRealReductionHero(*inst, hero) ||
GetDescriptionForTiledTransposeEmitter(hero).has_value()) {
return &hero;
}
}
return fused_expression_root->operands()[0];
}
FusionDecision FusionHeroesAreCompatible(const HloInstruction* hero1,
const HloInstruction* hero2) {
auto hero1_is_unnested_reduce =
IsReductionFromOrToContiguousDimensions(*hero1);
auto tiled_transpose_hero1 = GetDescriptionForTiledTransposeEmitter(*hero1);
bool hero1_is_unnested_transpose = tiled_transpose_hero1.has_value();
bool hero2_is_unnested_reduce =
IsReductionFromOrToContiguousDimensions(*hero2);
auto tiled_transpose_hero2 = GetDescriptionForTiledTransposeEmitter(*hero2);
bool hero2_is_unnested_transpose = tiled_transpose_hero2.has_value();
if (hero1_is_unnested_reduce && hero2_is_unnested_reduce &&
!AreReductionsMultiOutputFusionCompatible(hero2, hero1)) {
return FusionDecision::Forbid("tiled reductions with different shapes");
} else if (hero1_is_unnested_transpose && hero2_is_unnested_transpose &&
!tiled_transpose_hero1->IsEquivalent(*tiled_transpose_hero2)) {
return FusionDecision::Forbid("tiled transposes with different shapes");
} else if ((hero1_is_unnested_transpose && hero2_is_unnested_reduce) ||
(hero1_is_unnested_reduce && hero2_is_unnested_transpose)) {
return FusionDecision::Forbid("MOF-fusion of a transpose and a reduction");
}
if (hero1_is_unnested_transpose || hero2_is_unnested_transpose) {
auto check_path_of_intermediate_ops = [](HloInstruction* param) {
if (param->user_count() != 1) {
return false;
}
HloInstruction* hlo = param->users()[0];
while (hlo->user_count() > 0) {
if (!IsIntermediate(hlo)) {
return false;
}
hlo = hlo->users()[0];
}
return true;
};
HloInstruction* fusion1 = hero1->parent()->FusionInstruction();
HloInstruction* fusion2 = hero2->parent()->FusionInstruction();
if (fusion1 != nullptr && fusion2 != nullptr) {
if (hero1_is_unnested_transpose && fusion2->IsUserOf(fusion1)) {
int64_t operand_idx = fusion2->operand_index(fusion1);
auto hlo = fusion2->fused_parameter(operand_idx);
if (!check_path_of_intermediate_ops(hlo)) {
return FusionDecision::Forbid("tiled transpose would become untiled");
}
} else if (hero2_is_unnested_transpose && fusion1->IsUserOf(fusion2)) {
int64_t operand_idx = fusion1->operand_index(fusion2);
auto hlo = fusion1->fused_parameter(operand_idx);
if (!check_path_of_intermediate_ops(hlo)) {
return FusionDecision::Forbid("tiled transpose would become untiled");
}
}
}
}
return FusionDecision::Allow();
}
FusionDecision ShapesCompatibleForMultiOutputFusion(
const HloInstruction& instr1, const HloInstruction& instr2) {
auto get_loop_shape = [&](const HloInstruction* element_instr) {
const auto& hero = element_instr->parent()->IsFusionComputation()
? FindNonTrivialHero(*element_instr)
: *element_instr;
if (IsReductionFromOrToContiguousDimensions(*element_instr) ||
GetDescriptionForTiledTransposeEmitter(hero).has_value()) {
return hero.operand(0)->shape();
}
return element_instr->shape();
};
const HloInstruction* hero1 = GetRealHeroForMultiOutputFusion(instr1);
const HloInstruction* hero2 = GetRealHeroForMultiOutputFusion(instr2);
if (auto compatible = FusionHeroesAreCompatible(hero1, hero2); !compatible) {
return compatible;
}
const Shape& l1 = get_loop_shape(hero1);
const Shape& l2 = get_loop_shape(hero2);
bool accept_unequal_shape = !l1.IsTuple() && !l2.IsTuple();
if (!ShapeUtil::EqualIgnoringElementType(l1, l2) &&
(!accept_unequal_shape ||
!ShapeUtil::IsReshapeOrTransposeBitcast(l1, l2,
true))) {
return FusionDecision::Forbid("different loop shapes");
}
return FusionDecision::Allow();
}
bool IsInputFusibleScatter(const HloInstruction& instr) {
if (instr.opcode() == HloOpcode::kScatter ||
(instr.opcode() == HloOpcode::kFusion &&
instr.fusion_kind() == HloInstruction::FusionKind::kInput &&
instr.fused_expression_root()->opcode() == HloOpcode::kScatter)) {
return true;
}
return false;
}
bool IsInputFusible(const HloInstruction& instr) {
return instr.IsFusible() &&
(IsInputFusibleReduction(instr) || IsInputFusibleScatter(instr) ||
IsInputFusibleTranspose(instr));
}
bool IsUniversallyLoopFusible(const HloInstruction& instr) {
if (instr.IsElementwise() && instr.operand_count() > 0 &&
instr.opcode() != HloOpcode::kCopy) {
return true;
}
switch (instr.opcode()) {
case HloOpcode::kCopy:
return !GetDescriptionForTiledTransposeEmitter(instr).has_value();
case HloOpcode::kFusion:
return instr.fusion_kind() == HloInstruction::FusionKind::kLoop;
case HloOpcode::kBitcast:
case HloOpcode::kBroadcast:
case HloOpcode::kConcatenate:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kGather:
case HloOpcode::kPad:
case HloOpcode::kReduceWindow:
case HloOpcode::kReshape:
case HloOpcode::kReverse:
case HloOpcode::kSlice:
case HloOpcode::kTranspose:
return true;
default:
return false;
}
}
bool IsLoopFusibleAsConsumer(const HloInstruction& instr) {
if (!instr.IsFusible()) return false;
if (instr.opcode() == HloOpcode::kBitcast) return false;
if (instr.opcode() == HloOpcode::kReduce) return true;
if (!IsInputFusible(instr) && instr.opcode() == HloOpcode::kFusion &&
instr.fusion_kind() == HloInstruction::FusionKind::kInput) {
return true;
}
return IsUniversallyLoopFusible(instr);
}
bool IsLoopFusibleAsProducer(const HloInstruction& instr) {
if (!instr.IsFusible()) return false;
switch (instr.opcode()) {
case HloOpcode::kIota:
case HloOpcode::kConstant:
return true;
case HloOpcode::kReduce:
return !instr.shape().IsTuple();
default:
return IsUniversallyLoopFusible(instr);
}
}
static bool AllSatisfy(const HloInstruction& instr,
const HloPredicate& predicate) {
if (instr.opcode() != HloOpcode::kFusion) {
return predicate(&instr);
}
return absl::c_all_of(
instr.fused_instructions(), [&](const HloInstruction* i) {
return i->opcode() == HloOpcode::kParameter || predicate(i);
});
}
FusionDecision CanEmitInputFusedScatter(const HloInstruction& producer,
const HloInstruction& consumer) {
if (IsInputFusibleScatter(producer)) {
return FusionDecision::Forbid("do not fuse into the output of scatter");
}
if (!IsInputFusibleScatter(consumer)) {
return FusionDecision::Allow();
}
const HloInstruction* inplace_operand;
if (consumer.opcode() == HloOpcode::kFusion) {
const HloInstruction* scatter = consumer.fused_expression_root();
CHECK_EQ(scatter->opcode(), HloOpcode::kScatter);
CHECK_EQ(scatter->operand(0)->opcode(), HloOpcode::kParameter);
inplace_operand = consumer.operand(scatter->operand(0)->parameter_number());
} else {
inplace_operand = consumer.operand(0);
}
if (inplace_operand == &producer) {
return FusionDecision::Forbid(
"do not fuse into the in-place operand of scatter");
}
if (absl::c_linear_search(producer.operands(), inplace_operand)) {
return FusionDecision::Forbid(
"Producer uses the in-place operand of a scatter");
}
return FusionDecision::Allow();
}
FusionDecision IsProducerConsumerFusible(const HloInstruction& producer,
const HloInstruction& consumer) {
if (!IsLoopFusibleAsProducer(producer) &&
!IsInputFusibleTranspose(producer)) {
return FusionDecision::Forbid("the producer is not loop-fusible");
}
if (IsInputFusibleReduction(producer)) {
if (!producer.GetModule()
->config()
.debug_options()
.xla_gpu_enable_reduction_epilogue_fusion()) {
return FusionDecision::Forbid(
"Reduction epilogue fusion is not enabled.");
}
const HloInstruction& reduce_hero =
producer.opcode() == HloOpcode::kFusion
? FindNonTrivialHero(*producer.fused_expression_root())
: producer;
if (!ReductionIsRaceFree(
reduce_hero.GetModule()->config(),
GetReductionKindAndContiguousComponents(reduce_hero))) {
return FusionDecision::Forbid(
"Reduction output fusion only works for race free reductions");
}
if (!AllSatisfy(consumer, [](const HloInstruction* hlo) {
return IsIntermediate(hlo, 1);
})) {
return FusionDecision::Forbid(
"Reductions from/to continuous dims epilogue not fusible");
}
if (producer.user_count() > 1) {
return FusionDecision::Forbid(
"reduction output fusion only works for single user");
}
}
if (auto can_fuse = CanEmitInputFusedScatter(producer, consumer); !can_fuse) {
return can_fuse;
}
if (!IsInputFusible(consumer) && !IsLoopFusibleAsConsumer(consumer)) {
return FusionDecision::Forbid(
"the consumer is not input-fusible and not loop-fusible");
}
if (producer.IsMultiOutputFusion()) {
return FusionDecision::Forbid(
"the producer is not fusible as it is a multi-output fusion");
}
if (producer.opcode() == HloOpcode::kConstant &&
(!ShapeUtil::IsEffectiveScalar(producer.shape()) ||
consumer.opcode() != HloOpcode::kFusion)) {
return FusionDecision::Forbid("not fusing constant");
}
return InstructionFusion::ShouldFuseInPlaceOp(&producer, &consumer);
}
FusionDecision IsProducerMultiOutputFusible(const HloInstruction& producer) {
if (producer.IsMultiOutputFusion()) {
return FusionDecision::Forbid("Producer is a multi-output fusion");
}
if (!HloDataflowAnalysis::GetInPlaceInputOutputPairs(&producer).empty()) {
return FusionDecision::Forbid("In-place operations are present");
}
if (!IsLoopFusibleAsProducer(producer)) {
return FusionDecision::Forbid("producer is not loop-fusible");
}
if (IsPhysicallyTransposing(producer)) {
return FusionDecision::Forbid("producer is physically transposing");
}
return FusionDecision::Allow();
}
static int64_t SharedMemoryUsageNoCache(const HloInstruction& instr) {
if (instr.opcode() == HloOpcode::kFusion) {
int64_t sum = 0;
for (const HloInstruction* hlo :
instr.fused_instructions_computation()->instructions()) {
sum += SharedMemoryUsageNoCache(*hlo);
}
return sum;
} else if (instr.opcode() == HloOpcode::kReduce &&
IsReductionFromOrToContiguousDimensions(instr)) {
ReductionDimensions reduction_info =
GetReductionKindAndContiguousComponents(instr);
int64_t primitive_size = ShapeUtil::ByteSizeOfPrimitiveType(
instr.operand(0)->shape().element_type());
int num_variadic =
instr.shape().IsTuple() ? instr.shape().tuple_shapes_size() : 1;
if (reduction_info.is_row_reduction) {
return 32 * primitive_size * num_variadic;
} else {
return 4 * 32 * 33 * primitive_size * num_variadic;
}
} else if (auto tr = GetDescriptionForTiledTransposeEmitter(instr)) {
int64_t primitive_size =
ShapeUtil::ByteSizeOfPrimitiveType(instr.shape().element_type());
int64_t bytes_required = 32 * 33 * primitive_size;
if (tr->permutation.back() == tr->permutation.size() - 1) {
bytes_required *= tr->dimensions.back();
}
return bytes_required;
}
return 0;
}
int64_t FusionInfoCache::GetSharedMemoryUsage(const HloInstruction& instr) {
{
absl::MutexLock lock(&mutex_);
auto it = shared_memory_usage_.find(&instr);
if (it != shared_memory_usage_.end()) {
return it->second;
}
}
int64_t shared_memory_usage = SharedMemoryUsageNoCache(instr);
absl::MutexLock lock(&mutex_);
shared_memory_usage_.emplace(&instr, shared_memory_usage);
return shared_memory_usage;
}
int64_t SharedMemoryUsage(const HloInstruction& instr, FusionInfoCache* cache) {
if (!cache) {
return SharedMemoryUsageNoCache(instr);
}
return cache->GetSharedMemoryUsage(instr);
}
constexpr int64_t kMaxUnnestedReductionOutputsPerFusion = 8;
static int64_t NumUnnestedReductionsNoCache(const HloInstruction& instr) {
if (instr.opcode() == HloOpcode::kReduce &&
IsReductionFromOrToContiguousDimensions(instr)) {
return 1;
}
if (instr.opcode() == HloOpcode::kFusion) {
int64_t sum = 0;
for (const HloInstruction* hlo :
instr.fused_instructions_computation()->instructions()) {
sum += NumUnnestedReductionsNoCache(*hlo);
}
return sum;
}
return 0;
}
int64_t FusionInfoCache::GetNumUnnestedReductions(const HloInstruction& instr) {
{
absl::MutexLock lock(&mutex_);
auto it = num_unnested_reductions_.find(&instr);
if (it != num_unnested_reductions_.end()) {
return it->second;
}
}
int64_t num_unnested_reductions = NumUnnestedReductionsNoCache(instr);
absl::MutexLock lock(&mutex_);
num_unnested_reductions_.emplace(&instr, num_unnested_reductions);
return num_unnested_reductions;
}
static int64_t NumUnnestedReductions(const HloInstruction& instr,
FusionInfoCache* cache) {
if (!cache) {
return NumUnnestedReductionsNoCache(instr);
}
return cache->GetNumUnnestedReductions(instr);
}
FusionDecision FusionFitsInBudget(const HloInstruction& instr1,
const HloInstruction& instr2,
const se::DeviceDescription& device_info,
bool is_consumer_producer_fusion,
FusionInfoCache* cache ) {
if (SharedMemoryUsage(instr1, cache) + SharedMemoryUsage(instr2, cache) >
device_info.shared_memory_per_block()) {
return FusionDecision::Forbid(
"shared memory usage would be over the budget of ")
<< device_info.shared_memory_per_block() << "B";
}
if (NumUnnestedReductions(instr1, cache) +
NumUnnestedReductions(instr2, cache) >
kMaxUnnestedReductionOutputsPerFusion) {
return FusionDecision::Forbid("over ")
<< kMaxUnnestedReductionOutputsPerFusion
<< " unnested reductions in fusion";
}
int64_t num_output_buffers = ShapeUtil::SubshapeCount(instr1.shape()) +
ShapeUtil::SubshapeCount(instr2.shape());
if (instr1.operand_count() + instr2.operand_count() - 1 +
num_output_buffers <=
MaxOperandsAndOutputsPerFusion()) {
return FusionDecision::Allow();
} else {
VLOG(5) << "Operand count of " << "(" << instr1.ToString()
<< " ) = " << instr1.operand_count() << " and ( "
<< instr2.ToString() << " ) = " << instr2.operand_count()
<< " and num_output_buffers = " << num_output_buffers
<< " is bigger than the bound of "
<< MaxOperandsAndOutputsPerFusion();
}
absl::flat_hash_set<const HloInstruction*> operands(instr1.operands().begin(),
instr1.operands().end());
operands.insert(instr2.operands().begin(), instr2.operands().end());
operands.erase(&instr1);
operands.erase(&instr2);
if (is_consumer_producer_fusion &&
operands.size() <= instr1.operands().size()) {
return FusionDecision::Allow();
}
if (operands.size() + num_output_buffers > MaxOperandsAndOutputsPerFusion()) {
return FusionDecision::Forbid(
"Number of operands and output buffers is larger than allowed budget "
"per fusion");
}
return FusionDecision::Allow();
}
bool CreatesHeavyComputation(const HloInstruction& producer,
const HloInstruction& consumer) {
auto producer_is_heavy = [&](const HloInstruction& instr) {
if (producer.opcode() != HloOpcode::kFusion) {
return IsExpensiveToRepeat(producer);
}
for (const auto& instr : producer.fused_instructions()) {
if (IsExpensiveToRepeat(*instr)) {
return true;
}
}
return false;
};
if (!producer_is_heavy(producer)) {
return false;
}
if (consumer.opcode() != HloOpcode::kFusion) {
return IfFusedReadsElementsMultipleTimes(consumer);
}
for (const HloInstruction* operand : consumer.operands()) {
if (operand != &producer) {
continue;
}
const HloInstruction* root =
consumer.fused_instructions_computation()->parameter_instruction(
consumer.operand_index(operand));
std::stack<const HloInstruction*> dfs;
dfs.push(root);
absl::flat_hash_set<const HloInstruction*> visited;
while (!dfs.empty()) {
const HloInstruction* cur = dfs.top();
dfs.pop();
if (!visited.insert(cur).second) {
continue;
}
if (IfFusedReadsElementsMultipleTimes(*cur)) {
return true;
}
for (const auto& user : cur->users()) {
if (visited.contains(user)) {
continue;
}
dfs.push(user);
}
}
}
return false;
}
bool IsFusibleAsMultiOutputFusionRoot(const HloInstruction& instr) {
return instr.IsFusible() && !instr.IsCustomFusion() &&
(IsInputFusibleReduction(instr) || IsInputFusibleTranspose(instr) ||
instr.IsLoopFusion() ||
instr.IsElementwise());
}
HloInstruction::FusionKind ChooseFusionKind(const HloInstruction& producer,
const HloInstruction& consumer) {
return (IsInputFusible(consumer) || IsInputFusible(producer))
? HloInstruction::FusionKind::kInput
: HloInstruction::FusionKind::kLoop;
}
bool IsConsumerTheOnlyNonRootUser(const HloInstruction& instr,
const HloInstruction& consumer) {
return absl::c_all_of(instr.users(), [&](const HloInstruction* user) {
if (user->opcode() == HloOpcode::kGetTupleElement) {
return IsConsumerTheOnlyNonRootUser(*user, consumer);
}
return user == &consumer || user == user->parent()->root_instruction();
});
}
size_t GetInstrCountOfFusible(const HloInstruction& instr) {
return instr.opcode() == HloOpcode::kFusion ? instr.fused_instruction_count()
: 1;
}
absl::InlinedVector<const HloInstruction*, 2> GetOutputsOfFusible(
const HloInstruction& instr) {
if (instr.opcode() != HloOpcode::kFusion) {
return {&instr};
}
HloInstruction* root = instr.fused_expression_root();
if (root->opcode() != HloOpcode::kTuple) {
return {root};
} else {
auto v = root->operands();
return absl::InlinedVector<const HloInstruction*, 2>(v.begin(), v.end());
}
}
size_t GetOutputSizeOfFusible(const HloInstruction& instr) {
if (!instr.IsMultiOutputFusion()) {
return 1;
}
const HloInstruction* root = instr.fused_expression_root();
return ShapeUtil::TupleElementCount(root->shape());
}
static void GetFusionRootsRec(const HloInstruction* root,
std::vector<const HloInstruction*>& out) {
if (root->opcode() == HloOpcode::kGetTupleElement &&
root->operand(0)->opcode() == HloOpcode::kTuple) {
return GetFusionRootsRec(root->operand(0)->operand(root->tuple_index()),
out);
} else if (root->opcode() == HloOpcode::kGetTupleElement) {
out.push_back(root->operand(0));
} else if (root->opcode() == HloOpcode::kTuple) {
for (int i = 0; i < root->operand_count(); i++) {
GetFusionRootsRec(root->operand(i), out);
}
} else {
out.push_back(root);
}
}
std::vector<const HloInstruction*> GetFusionRoots(
const HloComputation& computation) {
std::vector<const HloInstruction*> out;
GetFusionRootsRec(computation.root_instruction(), out);
return out;
}
bool IsGenericTritonFusion(const HloInstruction& instr) {
return instr.opcode() == HloOpcode::kFusion &&
instr.fusion_kind() == HloInstruction::FusionKind::kCustom &&
instr.backend_config<GpuBackendConfig>().ok() &&
instr.backend_config<GpuBackendConfig>()
->fusion_backend_config()
.kind() == kTritonFusionKind;
}
bool MayPreventVectorization(const HloFusionAdaptor& fusion) {
static constexpr int kMaxConcatArgumentsForUnrolling = 10;
return HloAnyOf(fusion, [&](auto node) {
switch (node.opcode()) {
case HloOpcode::kReduceWindow:
case HloOpcode::kSort:
case HloOpcode::kDot:
case HloOpcode::kSin:
case HloOpcode::kCos:
case HloOpcode::kTan:
case HloOpcode::kPower:
case HloOpcode::kAtan2:
return true;
case HloOpcode::kConcatenate:
return node.instruction().operand_count() >
kMaxConcatArgumentsForUnrolling;
case HloOpcode::kReduce:
return node.instruction().shape().tuple_shapes_size() > 1;
default:
return false;
}
});
}
std::vector<HloComputation*> GetFusibleComputations(
const HloModule& module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
auto result = module.MakeComputationPostOrder(execution_threads);
absl::flat_hash_set<const HloComputation*> computations_not_to_fuse;
for (const auto* computation : result) {
for (const auto* instr : computation->instructions()) {
if (HloInstruction::MightHaveCalledComputations(instr->opcode()) &&
instr->opcode() != HloOpcode::kWhile &&
instr->opcode() != HloOpcode::kConditional &&
instr->opcode() != HloOpcode::kFusion) {
for (auto* called : instr->called_computations()) {
computations_not_to_fuse.insert(called);
}
}
}
}
result.erase(
std::remove_if(result.begin(), result.end(),
[&](HloComputation* computation) {
return computation->IsFusionComputation() ||
computations_not_to_fuse.contains(computation);
}),
result.end());
return result;
}
LaunchDimensionsConfig ComputeLoopFusionConfig(
const HloFusionAnalysis& analysis) {
return ComputeLoopFusionConfig(analysis, GetElementShape(analysis));
}
LaunchDimensionsConfig ComputeLoopFusionConfig(
const HloFusionAnalysis& analysis, const Shape& element_shape) {
int unroll_factor = 1;
int64_t num_elements = ShapeUtil::ElementsIn(element_shape);
int64_t n_threads_max = analysis.device_info().threads_per_core_limit() *
analysis.device_info().core_count();
if (num_elements >= n_threads_max &&
!MayPreventVectorization(analysis.fusion())) {
unroll_factor = ComputeMaxUnrollFactor(num_elements);
}
CHECK(absl::has_single_bit(static_cast<uint64_t>(unroll_factor)));
unroll_factor = std::max(
unroll_factor,
CeilOfRatio(8, analysis.input_output_info().smallest_output_dtype_bits));
CHECK(absl::has_single_bit(static_cast<uint64_t>(unroll_factor)));
VLOG(2) << "Unroll factor: " << unroll_factor;
LaunchDimensionsConfig launch_config{unroll_factor};
return launch_config;
}
}
} | #include "xla/service/gpu/gpu_fusible.h"
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
using ::testing::ElementsAre;
using GpuFusibleTest = HloTestBase;
const char kModulePrefix[] = R"(
HloModule test_module
scalar_add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
})";
TEST_F(GpuFusibleTest, IsPhysicallyTransposing_ElementwiseProducer) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
p0 = f32[2,2,2]{2,1,0} parameter(0)
c0 = f32[] constant(0)
exp = f32[2,2,2]{2,1,0} exponential(p0)
ROOT reduce = f32[2,2]{1,0} reduce(exp, c0), dimensions={2}, to_apply=scalar_add
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* exp =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_EQ(exp->opcode(), HloOpcode::kExp);
EXPECT_FALSE(IsPhysicallyTransposing(*exp));
}
TEST_F(GpuFusibleTest, IsPhysicallyTransposing_MixedLayoutProducer) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
mixed_input_layouts_computation {
p0.1 = f16[128,1024,32,32]{1,3,2,0} parameter(0)
p1.1 = f16[128,1024,32,32]{3,2,1,0} parameter(1)
copy = f16[128,1024,32,32]{1,3,2,0} copy(p1.1)
c0 = f16[] constant(0)
broadcast = f16[128,1024,32,32]{1,3,2,0} broadcast(c0), dimensions={}
greater-than = pred[128,1024,32,32]{1,3,2,0} compare(copy, broadcast), direction=GT
ROOT root = f16[128,1024,32,32]{1,3,2,0} select(greater-than, p0.1, broadcast)
}
fused_reduce {
p0.2 = f16[128,1024,32,32]{1,3,2,0} parameter(0)
convert = f32[128,1024,32,32]{1,3,2,0} convert(p0.2)
c0.2 = f32[] constant(0)
ROOT reduce = f32[1024]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add
}
ENTRY entry {
p0 = f16[128,1024,32,32]{1,3,2,0} parameter(0)
p1 = f16[128,1024,32,32]{3,2,1,0} parameter(1)
loop_fusion = f16[128,1024,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=mixed_input_layouts_computation
reduce_fusion = f32[1024]{0} fusion(loop_fusion), kind=kInput, calls=fused_reduce
ROOT root = (f32[1024]{0}, f16[128,1024,32,32]{1,3,2,0}) tuple(reduce_fusion, loop_fusion)
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* loop_fusion =
module->entry_computation()->root_instruction()->operand(1);
ASSERT_EQ(loop_fusion->fused_expression_root()->opcode(), HloOpcode::kSelect);
EXPECT_TRUE(IsPhysicallyTransposing(*loop_fusion));
}
TEST_F(GpuFusibleTest,
IsPhysicallyTransposing_MixedLayoutProducerWithTrivialDim) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
mixed_input_layouts_computation {
p0.1 = f16[128,1,32,32]{1,3,2,0} parameter(0)
p1.1 = f16[128,1,32,32]{3,2,1,0} parameter(1)
bitcast = f16[128,1,32,32]{1,3,2,0} bitcast(p1.1)
c0 = f16[] constant(0)
broadcast = f16[128,1,32,32]{1,3,2,0} broadcast(c0), dimensions={}
greater-than = pred[128,1,32,32]{1,3,2,0} compare(bitcast, broadcast), direction=GT
ROOT root = f16[128,1,32,32]{1,3,2,0} select(greater-than, p0.1, broadcast)
}
fused_reduce {
p0.2 = f16[128,1,32,32]{1,3,2,0} parameter(0)
convert = f32[128,1,32,32]{1,3,2,0} convert(p0.2)
c0.2 = f32[] constant(0)
ROOT reduce = f32[1]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add
}
ENTRY entry {
p0 = f16[128,1,32,32]{1,3,2,0} parameter(0)
p1 = f16[128,1,32,32]{3,2,1,0} parameter(1)
loop_fusion = f16[128,1,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=mixed_input_layouts_computation
reduce_fusion = f32[1]{0} fusion(loop_fusion), kind=kInput, calls=fused_reduce
ROOT root = (f32[1]{0}, f16[128,1,32,32]{1,3,2,0}) tuple(reduce_fusion, loop_fusion)
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* loop_fusion =
module->entry_computation()->root_instruction()->operand(1);
ASSERT_EQ(loop_fusion->fused_expression_root()->opcode(), HloOpcode::kSelect);
EXPECT_FALSE(IsPhysicallyTransposing(*loop_fusion));
}
TEST_F(GpuFusibleTest, IsPhysicallyTransposing_CopyProducer) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduce {
p0.1 = f32[128,1024,32,32]{1,3,2,0} parameter(0)
c0.1 = f32[] constant(0)
ROOT reduce = f32[1024]{0} reduce(p0.1, c0.1), dimensions={0,2,3}, to_apply=scalar_add
}
ENTRY entry {
p0 = f16[128,1024,32,32]{3,2,1,0} parameter(0)
copy = f32[128,1024,32,32]{1,3,2,0} copy(p0)
ROOT reduce_fusion = f32[1024]{0} fusion(copy), kind=kInput, calls=fused_reduce
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* copy =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_EQ(copy->opcode(), HloOpcode::kCopy);
EXPECT_TRUE(IsPhysicallyTransposing(*copy));
}
TEST_F(GpuFusibleTest, IsPhysicallyTransposing_PhysicalTranspose) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduce {
p0.1 = f32[1024,128,32,32]{3,2,1,0} parameter(0)
c0.1 = f32[] constant(0)
ROOT reduce = f32[1024]{0} reduce(p0.1, c0.1), dimensions={1,2,3}, to_apply=scalar_add
}
ENTRY entry {
p0 = f16[128,1024,32,32]{3,2,1,0} parameter(0)
copy = f32[1024,128,32,32]{3,2,1,0} transpose(p0), dimensions={1,0,2,3}
ROOT reduce_fusion = f32[1024]{0} fusion(copy), kind=kInput, calls=fused_reduce
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* transpose =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_EQ(transpose->opcode(), HloOpcode::kTranspose);
EXPECT_TRUE(IsPhysicallyTransposing(*transpose));
}
TEST_F(GpuFusibleTest, IsPhysicallyTransposing_LayoutChangingFusionProducer) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
layout_changing_computation {
p0.1 = f16[128,1024,32,32]{3,2,1,0} parameter(0)
p1.1 = f16[128,1024,32,32]{3,2,1,0} parameter(1)
c0 = f16[] constant(0)
broadcast = f16[128,1024,32,32]{3,2,1,0} broadcast(c0), dimensions={}
greater-than = pred[128,1024,32,32]{3,2,1,0} compare(p1.1, broadcast), direction=GT
select = f16[128,1024,32,32]{3,2,1,0} select(greater-than, p0.1, broadcast)
ROOT root = f16[128,1024,32,32]{1,3,2,0} copy(select)
}
fused_reduce {
p0.2 = f16[128,1024,32,32]{1,3,2,0} parameter(0)
convert = f32[128,1024,32,32]{1,3,2,0} convert(p0.2)
c0.2 = f32[] constant(0)
ROOT reduce = f32[1024]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add
}
ENTRY entry {
p0 = f16[128,1024,32,32]{3,2,1,0} parameter(0)
p1 = f16[128,1024,32,32]{3,2,1,0} parameter(1)
loop_fusion = f16[128,1024,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=layout_changing_computation
ROOT reduce_fusion = f32[1024]{0} fusion(loop_fusion), kind=kInput, calls=fused_reduce
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* loop_fusion =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_EQ(loop_fusion->fused_expression_root()->opcode(), HloOpcode::kCopy);
EXPECT_TRUE(IsPhysicallyTransposing(*loop_fusion));
}
TEST_F(GpuFusibleTest,
IsPhysicallyTransposing_ConsiderMaximumTrueRanksParamsOnly) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
broadcasting_computation {
p0.1 = f32[128,1024,32,32]{1,3,2,0} parameter(0)
p1.1 = f32[1,128,1,1]{3,2,1,0} parameter(1)
reshape = f32[128]{0} reshape(p1.1)
broadcast = f32[128,1024,32,32]{1,3,2,0} broadcast(reshape), dimensions={0}
ROOT add = f32[128,1024,32,32]{1,3,2,0} add(p0.1, broadcast)
}
ENTRY entry {
p0 = f32[128,1024,32,32]{1,3,2,0} parameter(0)
p1 = f32[1,128,1,1]{3,2,1,0} parameter(1)
loop_fusion = f32[128,1024,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=broadcasting_computation
c0.2 = f32[] constant(0)
ROOT reduce = f32[1024]{0} reduce(loop_fusion, c0.2), dimensions={0,2,3}, to_apply=scalar_add
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* loop_fusion =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_EQ(loop_fusion->fused_expression_root()->opcode(), HloOpcode::kAdd);
EXPECT_FALSE(IsPhysicallyTransposing(*loop_fusion));
}
TEST_F(GpuFusibleTest, TransposesMinorDimension) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
default_layout = f32[10,20,30,40]{3,2,1,0} parameter(0)
non_default_layout = f32[10,20,30,40]{1,2,3,0} parameter(1)
transpose_minor_default = f32[10,20,40,30]{3,2,1,0} transpose(default_layout), dimensions={0,1,3,2}
no_transpose_minor_default = f32[10,20,40,30]{2,3,1,0} transpose(default_layout), dimensions={0,1,3,2}
transpose_major_default = f32[10,30,20,40]{3,2,1,0} transpose(default_layout), dimensions={0,2,1,3}
transpose_minor_non_default = f32[10,30,20,40]{1,2,3,0} transpose(non_default_layout), dimensions={0,2,1,3}
no_transpose_minor_non_default = f32[10,20,40,30]{1,2,0,3} transpose(non_default_layout), dimensions={0,1,3,2}
transpose_major_non_default = f32[10,20,40,30]{1,2,3,0} transpose(non_default_layout), dimensions={0,1,3,2}
ROOT r = tuple(transpose_minor_default, no_transpose_minor_default, transpose_major_default,
transpose_minor_non_default, no_transpose_minor_non_default, transpose_major_non_default)
})"));
auto* tuple = (*module)->entry_computation()->root_instruction();
EXPECT_TRUE(TransposesMinorDimension(tuple->operand(0)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(1)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(2)));
EXPECT_TRUE(TransposesMinorDimension(tuple->operand(3)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(4)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(5)));
}
TEST_F(GpuFusibleTest, TransposesMinorDimensionSkipTrivialDimensions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
default_layout = f32[10,20,1,1]{3,2,1,0} parameter(0)
non_default_layout = f32[10,20,1,1]{1,2,3,0} parameter(1)
transpose_minor_default = f32[10,20,1,1]{3,2,1,0} transpose(default_layout), dimensions={0,1,3,2}
transpose_nontrivial_minor_default = f32[10,1,20,1]{3,2,1,0} transpose(default_layout), dimensions={0,2,1,3}
no_transpose_minor_default = f32[10,20,1,1]{2,3,1,0} transpose(default_layout), dimensions={0,1,3,2}
transpose_one_major_default = f32[1,20,10,1]{3,2,1,0} transpose(default_layout), dimensions={2,1,0,3}
transpose_two_major_default = f32[20,10,1,1]{3,2,1,0} transpose(default_layout), dimensions={1,0,2,3}
transpose_minor_non_default = f32[10,1,20,1]{1,2,3,0} transpose(non_default_layout), dimensions={0,2,1,3}
no_transpose_minor_non_default = f32[10,20,1,1]{1,2,0,3} transpose(non_default_layout), dimensions={0,1,3,2}
transpose_major_non_default = f32[10,20,1,1]{1,2,3,0} transpose(non_default_layout), dimensions={0,1,3,2}
ROOT r = tuple(transpose_minor_default, transpose_nontrivial_minor_default, no_transpose_minor_default, transpose_one_major_default, transpose_two_major_default,
transpose_minor_non_default, no_transpose_minor_non_default, transpose_major_non_default)
})"));
auto* tuple = (*module)->entry_computation()->root_instruction();
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(0)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(1)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(2)));
EXPECT_TRUE(TransposesMinorDimension(tuple->operand(3)));
EXPECT_TRUE(TransposesMinorDimension(tuple->operand(4)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(5)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(6)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(7)));
}
TEST_F(GpuFusibleTest, CopyTransposesMinorDimension) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
default_layout = f32[10,20,30,40]{3,2,1,0} parameter(0)
non_default_layout = f32[10,20,30,40]{1,2,3,0} parameter(1)
copy_transpose_minor_default = f32[10,20,30,40]{2,3,1,0} copy(default_layout)
copy_no_transpose_minor_default = f32[10,20,30,40]{3,2,1,0} copy(default_layout)
copy_transpose_minor_non_default = f32[10,20,30,40]{2,1,3,0} copy(non_default_layout)
copy_no_transpose_minor_non_default = f32[10,20,30,40]{1,2,3,0} copy(non_default_layout)
ROOT r = tuple(copy_transpose_minor_default, copy_no_transpose_minor_default,
copy_transpose_minor_non_default, copy_no_transpose_minor_non_default)
})"));
auto* tuple = (*module)->entry_computation()->root_instruction();
EXPECT_TRUE(TransposesMinorDimension(tuple->operand(0)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(1)));
EXPECT_TRUE(TransposesMinorDimension(tuple->operand(2)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(3)));
}
TEST_F(GpuFusibleTest, CopyTransposesMinorDimensionSkipTrivialDimensions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
default_layout = f32[10,20,1,1]{3,2,1,0} parameter(0)
non_default_layout = f32[10,20,1,1]{1,2,3,0} parameter(1)
copy_transpose_minor_default = f32[10,20,1,1]{2,3,1,0} copy(default_layout)
copy_no_transpose_minor_default = f32[10,20,1,1]{3,2,1,0} copy(default_layout)
copy_transpose_minor_non_default = f32[10,20,1,1]{2,0,3,1} copy(non_default_layout)
copy_no_transpose_minor_non_default = f32[10,20,1,1]{1,2,3,0} copy(non_default_layout)
ROOT r = tuple(copy_transpose_minor_default, copy_no_transpose_minor_default,
copy_transpose_minor_non_default, copy_no_transpose_minor_non_default)
})"));
auto* tuple = (*module)->entry_computation()->root_instruction();
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(0)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(1)));
EXPECT_TRUE(TransposesMinorDimension(tuple->operand(2)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(3)));
}
TEST_F(GpuFusibleTest, IsReduceInputFusion_ReductionToVector) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
c0 = f32[] parameter(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(1)
ROOT reduce = f32[512]{0} reduce(p1, c0), dimensions={0,2,3}, to_apply=scalar_add
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kReduce);
EXPECT_FALSE(IsReduceInputFusion(*reduce));
EXPECT_TRUE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest, IsReduceInputFusion_ElementalReduction) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
c0 = f32[] parameter(0)
p1 = f32[8,512,5,16,1,1]{5,4,3,2,1,0} parameter(1)
ROOT reduce = f32[512,5,1,1]{3,2,1,0} reduce(p1, c0), dimensions={3,0},
to_apply=scalar_add
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kReduce);
EXPECT_FALSE(IsReduceInputFusion(*reduce));
EXPECT_FALSE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest, IsReduceInputFusion_SingleOutputInputReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduction {
c0 = f32[] constant(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
ROOT reduce = f32[128,512]{1,0} reduce(p1, c0), dimensions={2,3}, to_apply=scalar_add
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
ROOT fusion = f32[128,512]{1,0} fusion(p0), kind=kInput, calls=fused_reduction
})"))
.value();
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);
EXPECT_TRUE(IsReduceInputFusion(*reduce));
EXPECT_TRUE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest, IsReduceInputFusion_SingleOutputLoopReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduction {
c0 = f32[] constant(0)
p1 = f32[8,512,5,16,1,1]{5,4,3,2,1,0} parameter(0)
ROOT reduce = f32[8,5,1,1]{3,2,1,0} reduce(p1, c0), dimensions={1,3}, to_apply=scalar_add
}
ENTRY entry {
p0 = f32[8,512,5,16,1,1]{5,4,3,2,1,0} parameter(0)
ROOT fusion = f32[8,5,1,1]{3,2,1,0} fusion(p0), kind=kLoop, calls=fused_reduction
})"))
.value();
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);
EXPECT_FALSE(IsReduceInputFusion(*reduce));
EXPECT_FALSE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest, IsReduceInputFusion_MultiOutputInputReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduction {
c0 = f32[] constant(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
reduce.0 = f32[128,512]{1,0} reduce(p1, c0), dimensions={2,3}, to_apply=scalar_add
reduce.1 = f32[128,512]{1,0} reduce(p1, c0), dimensions={2,3}, to_apply=scalar_add
ROOT root = (f32[128,512]{1,0}, f32[128,512]{1,0}) tuple(reduce.0, reduce.1)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
ROOT fusion = (f32[128,512]{1,0}, f32[128,512]{1,0}) fusion(p0), kind=kInput, calls=fused_reduction
})"))
.value();
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);
EXPECT_TRUE(IsReduceInputFusion(*reduce));
EXPECT_TRUE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest,
IsReduceInputFusion_MultiOutputInputReduceFusionWithExtraOutputs) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduction {
c0 = f32[] constant(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
reduce = f32[128,512]{1,0} reduce(p1, c0), dimensions={2,3}, to_apply=scalar_add
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1, p1)
ROOT root = (f32[128,512]{1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(reduce, mul)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
ROOT fusion = (f32[128,512]{1,0}, f32[128,512,28,28]{3,2,1,0}) fusion(p0), kind=kInput, calls=fused_reduction
})"))
.value();
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);
EXPECT_TRUE(IsReduceInputFusion(*reduce));
EXPECT_TRUE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest, IsReduceInputFusion_MultiOutputLoopReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduction {
c0 = f32[] constant(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
reduce.0 = f32[512,28]{1,0} reduce(p1, c0), dimensions={0,2}, to_apply=scalar_add
reduce.1 = f32[512,28]{1,0} reduce(p1, c0), dimensions={0,2}, to_apply=scalar_add
ROOT root = (f32[512,28]{1,0}, f32[512,28]{1,0}) tuple(reduce.0, reduce.1)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
ROOT fusion = (f32[512,28]{1,0}, f32[512,28]{1,0}) fusion(p0), kind=kLoop, calls=fused_reduction
})"))
.value();
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);
EXPECT_FALSE(IsReduceInputFusion(*reduce));
EXPECT_FALSE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest,
IsReduceInputFusion_MultiOutputLoopFusionReduceAndElementwiseOp) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduction {
c0 = f32[] constant(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
reduce = f32[512,28]{1,0} reduce(p1, c0), dimensions={0,2}, to_apply=scalar_add
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1, p1)
ROOT root = (f32[512,28]{1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(reduce, mul)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
ROOT fusion = (f32[512,28]{1,0}, f32[128,512,28,28]{3,2,1,0}) fusion(p0), kind=kLoop, calls=fused_reduction
})"))
.value();
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);
EXPECT_FALSE(IsReduceInputFusion(*reduce));
EXPECT_FALSE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest, CustomFusionIsNotFusibleAsConsumer) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_fusion {
p = s32[20,3] parameter(0)
ROOT neg = s32[20,3] negate(p)
}
ENTRY e {
p = s32[20,3] parameter(0)
ROOT r = s32[20,3] fusion(p), kind=kCustom, calls=triton_fusion
})"));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_FALSE(IsFusibleAsMultiOutputFusionRoot(*root));
}
TEST_F(GpuFusibleTest, FusionHeroesAreCompatible_TransposeFusionCompatible) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[64,32]{1,0} parameter(0)
neg = f32[64,32]{1,0} negate(p0.1)
ROOT transpose = f32[32,64]{1,0} transpose(neg), dimensions={1,0}
}
fused_computation_2 {
p0.2 = f32[32,64]{1,0} parameter(0)
neg = f32[32,64]{1,0} negate(p0.2)
ROOT add = f32[32,64]{1,0} add(neg, neg)
}
ENTRY entry {
p0 = f32[64,32]{1,0} parameter(0)
fusion.1 = f32[32,64]{1,0} fusion(p0), kind=kLoop, calls=fused_computation_1
ROOT fusion.2 = f32[32,64]{1,0} fusion(fusion.1), kind=kLoop, calls=fused_computation_2
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction();
const HloInstruction* fusion_2 = fusion_1->operand(0);
EXPECT_TRUE(FusionHeroesAreCompatible(fusion_1->fused_expression_root(),
fusion_2->fused_expression_root()));
EXPECT_TRUE(FusionHeroesAreCompatible(fusion_2->fused_expression_root(),
fusion_1->fused_expression_root()));
}
TEST_F(GpuFusibleTest, FusionHeroesAreCompatible_TransposeFusionNotCompatible) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[64,32]{1,0} parameter(0)
neg = f32[64,32]{1,0} negate(p0.1)
bc = f32[1,64,32]{2,1,0} bitcast(neg)
transpose = f32[1,32,64]{2,1,0} transpose(bc), dimensions={0,2,1}
ROOT bc2 = f32[32,64]{1,0} bitcast(transpose)
}
fused_computation_2 {
p0.2 = f32[32,64]{1,0} parameter(0)
broadcast = f32[32,64,4]{2,1,0} broadcast(p0.2), dimensions={0,1}
ROOT add = f32[32,64,4]{2,1,0} add(broadcast, broadcast)
}
ENTRY entry {
p0 = f32[64,32]{1,0} parameter(0)
fusion.1 = f32[32,64]{1,0} fusion(p0), kind=kLoop, calls=fused_computation_1
ROOT fusion.2 = f32[32,64,4]{2,1,0} fusion(fusion.1), kind=kLoop, calls=fused_computation_2
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction();
const HloInstruction* fusion_2 = fusion_1->operand(0);
EXPECT_FALSE(
FusionHeroesAreCompatible(fusion_1->fused_expression_root(),
fusion_2->fused_expression_root()->operand(0)));
EXPECT_FALSE(
FusionHeroesAreCompatible(fusion_2->fused_expression_root()->operand(0),
fusion_1->fused_expression_root()));
}
TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_LoopFusions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
}
fused_computation_2 {
p0.2 = f32[6400]{0} parameter(0)
const.2 = f32[] constant(1)
broadcast = f32[6400]{0} broadcast(const.2), dimensions={}
ROOT div = f32[6400]{0} divide(p0.2, broadcast)
}
ENTRY entry {
p0 = f32[6400]{0} parameter(0)
fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1
fusion.2 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_2
ROOT root = (f32[6400]{0}, f32[6400]{0}) tuple(fusion.1, fusion.2)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_IgnoreFpPrecision) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
}
fused_computation_2 {
p0.2 = f32[6400]{0} parameter(0)
ROOT convert = f16[6400]{0} convert(p0.2)
}
ENTRY entry {
p0 = f32[6400]{0} parameter(0)
fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1
fusion.2 = f16[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_2
ROOT root = (f32[6400]{0}, f16[6400]{0}) tuple(fusion.1, fusion.2)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_BitcastCompatible) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
}
fused_computation_2 {
p0.2 = f32[6400]{0} parameter(0)
bitcast = f32[1,6400]{1,0} bitcast(p0.2)
ROOT convert = f16[1,6400]{1,0} convert(bitcast)
}
ENTRY entry {
p0 = f32[6400]{0} parameter(0)
fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1
fusion.2 = f16[1,6400]{1,0} fusion(p0), kind=kLoop, calls=fused_computation_2
ROOT root = (f32[6400]{0}, f16[1,6400]{1,0}) tuple(fusion.1, fusion.2)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_Reduce) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
}
ENTRY entry {
p0 = f32[6400]{0} parameter(0)
fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1
const.2 = f32[] constant(0)
reduce = f32[] reduce(p0, const.2), dimensions={0}, to_apply=scalar_add
ROOT root = (f32[6400]{0}, f32[]) tuple(fusion.1, reduce)
})"))
.value();
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* reduce =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion, *reduce));
}
TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_Elementwise) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
}
ENTRY entry {
p0 = f32[6400]{0} parameter(0)
fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1
const.2 = f32[] constant(1)
broadcast = f32[6400]{0} broadcast(const.2), dimensions={}
div = f32[6400]{0} divide(p0, broadcast)
ROOT root = (f32[6400]{0}, f32[6400]{0}) tuple(fusion.1, div)
})"))
.value();
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* div =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion, *div));
}
TEST_F(GpuFusibleTest,
ShapesCompatibleForMultiOutputFusion_MultiOutputLoopFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
mul = f32[8,1,5,16,1,1]{5,4,3,2,1,0} multiply(p0.1, p0.1)
exp = f32[8,1,5,16,1,1]{5,4,3,2,1,0} exponential(p0.1)
ROOT tuple = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) tuple(mul, exp)
}
fused_computation_2 {
p0.2 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
const.2 = f32[] constant(0)
broadcast = f32[8,1,5,16,1,1]{5,4,3,2,1,0} broadcast(const.2), dimensions={}
ROOT add = f32[8,1,5,16,1,1]{5,4,3,2,1,0} add(p0.2, broadcast)
}
ENTRY entry {
p0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
fusion.1 = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) fusion(p0), kind=kLoop, calls=fused_computation_1
fusion.2 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} fusion(p0), kind=kLoop, calls=fused_computation_2
gte0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=0
gte1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=1
ROOT root = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) tuple(gte0, gte1, fusion.2)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(2);
EXPECT_NE(fusion_1, fusion_2);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest,
ShapesCompatibleForMultiOutputFusion_DifferentElementType) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
mul = f32[8,1,5,16,1,1]{5,4,3,2,1,0} multiply(p0.1, p0.1)
exp = f32[8,1,5,16,1,1]{5,4,3,2,1,0} exponential(p0.1)
ROOT tuple = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) tuple(mul, exp)
}
fused_computation_2 {
p0.2 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
const.2 = f32[] constant(0)
broadcast = f32[8,1,5,16,1,1]{5,4,3,2,1,0} broadcast(const.2), dimensions={}
add = f32[8,1,5,16,1,1]{5,4,3,2,1,0} add(p0.2, broadcast)
ROOT convert = s32[8,1,5,16,1,1]{5,4,3,2,1,0} convert(add)
}
ENTRY entry {
p0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
fusion.1 = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) fusion(p0), kind=kLoop, calls=fused_computation_1
fusion.2 = s32[8,1,5,16,1,1]{5,4,3,2,1,0} fusion(p0), kind=kLoop, calls=fused_computation_2
gte0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=0
gte1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=1
ROOT root = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}, s32[8,1,5,16,1,1]{5,4,3,2,1,0}) tuple(gte0, gte1, fusion.2)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(2);
EXPECT_NE(fusion_1, fusion_2);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_UnfusedOps) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
c0 = f32[] constant(0)
exp = f32[32,32,32]{2,1,0} exponential(p0)
reduce = f32[32,32]{1,0} reduce(exp, c0), dimensions={2},
to_apply=scalar_add
ROOT root = (f32[32,32]{1,0}, f32[32,32,32]{2,1,0}) tuple(reduce, exp)
})"))
.value();
const HloInstruction* reduce =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* exp =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*reduce, *exp));
}
TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_DifferentLayouts) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY reduce {
p0 = f32[2,2,2]{2,1,0} parameter(0)
p1 = f32[2,2,2]{0,1,2} parameter(1)
c0 = f32[] constant(0)
exp = f32[2,2,2]{2,1,0} exponential(p0)
reduce = f32[2,2]{0,1} reduce(p1, c0), dimensions={2}, to_apply=scalar_add
ROOT root = (f32[2,2]{0,1}, f32[2,2,2]{2,1,0}) tuple(reduce, exp)
})"))
.value();
const HloInstruction* reduce =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* exp =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_FALSE(ShapesCompatibleForMultiOutputFusion(*reduce, *exp));
}
TEST_F(
GpuFusibleTest,
ShapesCompatibleForMultiOutputFusion_SiblingTransposeFusionsNotCompatible) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_021_transpose {
param_0 = f32[20,20,20]{2,1,0} parameter(0)
transpose = f32[20,20,20]{2,1,0} transpose(param_0), dimensions={0,2,1}
ROOT bitcast = f32[8000]{0} bitcast(transpose)
}
fused_220_transpose {
param_0 = f32[20,20,20]{2,1,0} parameter(0)
transpose = f32[20,20,20]{2,1,0} transpose(param_0), dimensions={2,1,0}
ROOT bitcast = f32[8000]{0} bitcast(transpose)
}
ENTRY reduce {
p0 = f32[20,20,20]{2,1,0} parameter(0)
fusion = f32[8000]{0} fusion(p0), kind=kInput, calls=fused_021_transpose
fusion.1 = f32[8000]{0} fusion(p0), kind=kInput, calls=fused_220_transpose
ROOT root = (f32[8000]{0}, f32[8000]{0}) tuple(fusion, fusion.1)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_FALSE(
FusionHeroesAreCompatible(fusion_1->fused_expression_root()->operand(0),
fusion_2->fused_expression_root()->operand(0)));
EXPECT_FALSE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest,
ShapesCompatibleForMultiOutputFusion_SiblingTransposeFusionsCompatible) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_1230_transpose {
param_0 = f32[1,20,20]{2,1,0} parameter(0)
bitcast.1 = f32[20,2,2,5]{3,2,1,0} bitcast(param_0)
transpose = f32[2,2,5,20]{3,2,1,0} transpose(bitcast.1), dimensions={1,2,3,0}
ROOT bitcast.2 = f32[400]{0} bitcast(transpose)
}
fused_021_transpose {
param_0 = f32[1,20,20]{2,1,0} parameter(0)
transpose = f32[1,20,20]{2,1,0} transpose(param_0), dimensions={0,2,1}
ROOT bitcast = f32[400]{0} bitcast(transpose)
}
ENTRY reduce {
p0 = f32[1,20,20]{2,1,0} parameter(0)
fusion = f32[400]{0} fusion(p0), kind=kInput, calls=fused_1230_transpose
fusion.1 = f32[400]{0} fusion(p0), kind=kInput, calls=fused_021_transpose
ROOT root = (f32[400]{0}, f32[400]{0}) tuple(fusion, fusion.1)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest,
ShapesCompatibleForMultiOutputFusion_MultiOutputReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_select {
p1.1 = f32[2,2,2]{2,1,0} parameter(1)
c0 = f32[] constant(0)
broadcast = f32[2,2,2]{2,1,0} broadcast(f32[] c0), dimensions={}
greater-than = pred[2,2,2]{2,1,0} compare(f32[2,2,2]{2,1,0} p1.1, f32[2,2,2]{2,1,0} broadcast), direction=GT
p0.1 = f32[2,2,2]{2,1,0} parameter(0)
ROOT select = f32[2,2,2]{2,1,0} select(pred[2,2,2]{2,1,0} greater-than, f32[2,2,2]{2,1,0} p0.1, f32[2,2,2]{2,1,0} broadcast)
}
fused_reduce {
p0.2 = f32[2,2,2]{2,1,0} parameter(0)
c1 = f32[] constant(0)
r1 = f32[2,2]{1,0} reduce(p0.2, c1), dimensions={2}, to_apply=scalar_add
mul = f32[2,2,2]{2,1,0} multiply(p0.2, p0.2)
r2 = f32[2,2]{1,0} reduce(mul, c1), dimensions={2}, to_apply=scalar_add
ROOT tuple = (f32[2,2]{1,0}, f32[2,2]{1,0}) tuple(r1, r2)
}
ENTRY reduce {
p0 = f32[2,2,2]{2,1,0} parameter(0)
p1 = f32[2,2,2]{2,1,0} parameter(1)
select = f32[2,2,2]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_select
fusion = (f32[2,2]{1,0}, f32[2,2]{1,0}) fusion(select), kind=kInput, calls=fused_reduce
gte0 = f32[2,2]{1,0} get-tuple-element(fusion), index=0
gte1 = f32[2,2]{1,0} get-tuple-element(fusion), index=1
ROOT root = (f32[2,2]{1,0}, f32[2,2]{1,0}, f32[2,2,2]{2,1,0}) tuple(gte1, gte1, select)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(1)->operand(0);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_ReduceFusions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduce_1 {
p0.1 = f32[2,2,2]{2,1,0} parameter(0)
c0 = f32[] constant(0)
ROOT reduce = f32[2,2]{1,0} reduce(f32[2,2,2]{2,1,0} p0.1, f32[] c0), dimensions={0}, to_apply=scalar_add
}
fused_reduce_2 {
p0.2 = f32[2,2,2]{2,1,0} parameter(0)
mul = f32[2,2,2]{2,1,0} multiply(f32[2,2,2]{2,1,0} p0.2, f32[2,2,2]{2,1,0} p0.2)
c1 = f32[] constant(0)
ROOT reduce = f32[2,2]{1,0} reduce(f32[2,2,2]{2,1,0} mul, f32[] c1), dimensions={0}, to_apply=scalar_add
}
ENTRY reduce {
p0 = f32[2,2,2]{2,1,0} parameter(0)
p1 = f32[2,2,2]{2,1,0} parameter(1)
reduce_1 = f32[2,2]{1,0} fusion(p0), kind=kLoop, calls=fused_reduce_1
reduce_2 = f32[2,2]{1,0} fusion(p1), kind=kLoop, calls=fused_reduce_2
ROOT root = (f32[2,2]{1,0}, f32[2,2]{1,0}) tuple(reduce_1, reduce_2)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest,
ShapesCompatibleForMultiOutputFusion_DifferentReduceDimensions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduce_1 {
p0.1 = f32[32,32,32]{2,1,0} parameter(0)
c0 = f32[] constant(0)
ROOT reduce = f32[32,32]{1,0} reduce(f32[32,32,32]{2,1,0} p0.1, f32[] c0),
dimensions={0}, to_apply=scalar_add
}
fused_reduce_2 {
p0.2 = f32[32,32,32]{2,1,0} parameter(0)
mul = f32[32,32,32]{2,1,0} multiply(f32[32,32,32]{2,1,0} p0.2,
f32[32,32,32]{2,1,0} p0.2)
c1 = f32[] constant(0)
ROOT reduce = f32[32,32]{1,0} reduce(f32[32,32,32]{2,1,0} mul, f32[] c1),
dimensions={2}, to_apply=scalar_add
}
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[32,32,32]{2,1,0} parameter(1)
reduce_1 = f32[32,32]{1,0} fusion(p0), kind=kLoop, calls=fused_reduce_1
reduce_2 = f32[32,32]{1,0} fusion(p1), kind=kLoop, calls=fused_reduce_2
ROOT root = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(reduce_1, reduce_2)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_FALSE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest,
ShapesCompatibleForMultiOutputFusion_NoReductionToVector) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_element_wise {
p0.1 = f32[32,32,32]{2,1,0} parameter(0)
p1.1 = f32[32,32,32]{2,1,0} parameter(1)
ROOT add = f32[32,32,32]{2,1,0} add(p0.1, p1.1)
}
fused_reduce {
p0.2 = f32[32,32,32]{2,1,0} parameter(0)
mul = f32[32,32,32]{2,1,0} multiply(f32[32,32,32]{2,1,0} p0.2,
f32[32,32,32]{2,1,0} p0.2)
broadcast = f32[32,32,32,32]{3,2,1,0} broadcast(mul), dimensions={3,2,1}
c1 = f32[] constant(0)
ROOT reduce = f32[32,32]{1,0} reduce(f32[32,32,32,32]{3,2,1,0} broadcast,
f32[] c1), dimensions={1,3}, to_apply=scalar_add
}
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[32,32,32]{2,1,0} parameter(1)
element_wise = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop,
calls=fused_element_wise
fusion = f32[32,32]{1,0} fusion(element_wise),
kind=kLoop, calls=fused_reduce
ROOT root = (f32[32,32]{1,0}, f32[32,32,32]{2,1,0})
tuple(fusion, element_wise)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_FALSE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest, IsFusibleAsMultiOutputFusionRoot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
})")
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*root));
}
TEST_F(GpuFusibleTest, ScatterIsNotFusibleAsMultiOutputFusionRoot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY Scatter {
p0 = s32[3,3] parameter(0)
operand = s32[3,3] add(p0, p0)
p1 = s32[2] parameter(1)
indices = s32[2] add(p1, p1)
p2 = s32[2,3] parameter(2)
updates = s32[2,3] add(p2, p2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
})")
.value();
const HloInstruction* scatter_inst =
module->entry_computation()->root_instruction();
EXPECT_FALSE(IsFusibleAsMultiOutputFusionRoot(*scatter_inst));
}
TEST_F(GpuFusibleTest, ProducerConsumerFusionElementwiseAndReduce) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
c0 = f32[] constant(0)
exp = f32[32,32,32]{2,1,0} exponential(p0)
reduce = f32[32,32]{1,0} reduce(exp, c0), dimensions={2},
to_apply=scalar_add
ROOT root = (f32[32,32]{1,0}, f32[32,32,32]{2,1,0}) tuple(reduce, exp)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* consumer = root->operand(0);
const HloInstruction* producer = root->operand(1);
EXPECT_TRUE(IsProducerMultiOutputFusible(*producer));
EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*consumer));
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*producer, *consumer));
}
TEST_F(GpuFusibleTest, ProducerConsumerFusionTransposeAndLoopFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_add {
p0.1 = f32[32,31,30]{2,1,0} parameter(0)
p1.1 = f32[32,31,30]{2,1,0} parameter(1)
neg = f32[32,31,30]{2,1,0} negate(p0.1)
ROOT add = f32[32,31,30]{2,1,0} add(neg, p1.1)
}
ENTRY reduce {
p0 = f32[32,31,30]{2,1,0} parameter(0)
p1 = f32[32,30,31]{2,1,0} parameter(1)
transpose = f32[32,31,30]{2,1,0} transpose(p1), dimensions={0,2,1}
ROOT add = f32[32,31,30]{2,1,0} fusion(p0, transpose), kind=kLoop, calls=fused_add
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* consumer = root;
const HloInstruction* producer = root->operand(1);
EXPECT_TRUE(IsProducerConsumerFusible(*producer, *consumer));
}
TEST_F(GpuFusibleTest, ProducerConsumerFusionReduceAndLoopFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_add {
p0.1 = f32[32,31,30]{2,1,0} parameter(0)
p1.1 = f32[32,31,30]{2,1,0} parameter(1)
neg = f32[32,31,30]{2,1,0} negate(p0.1)
ROOT add = f32[32,31,30]{2,1,0} add(neg, p1.1)
}
ENTRY reduce {
p0 = f32[32,31,30]{2,1,0} parameter(0)
p1 = f32[32,31,30,29]{3,2,1,0} parameter(1)
c0 = f32[] constant(0.0)
reduce = f32[32,31,30]{2,1,0} reduce(p1, c0), dimensions={3}, to_apply=scalar_add
ROOT add = f32[32,31,30]{2,1,0} fusion(p0, reduce), kind=kLoop, calls=fused_add
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* consumer = root;
const HloInstruction* producer = root->operand(1);
EXPECT_TRUE(IsProducerConsumerFusible(*producer, *consumer));
}
TEST_F(GpuFusibleTest, ProducerConsumerFusionLoopFusionAndReduce) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_add {
p0.1 = f32[32,32,32]{2,1,0} parameter(0)
p1.1 = f32[32,32,32]{2,1,0} parameter(1)
ROOT add = f32[32,32,32]{2,1,0} add(p0.1, p1.1)
}
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[32,32,32]{2,1,0} parameter(1)
c0 = f32[] constant(0)
add = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_add
reduce = f32[32,32]{1,0} reduce(add, c0), dimensions={2},
to_apply=scalar_add
ROOT root = (f32[32,32]{1,0}, f32[32,32,32]{2,1,0}) tuple(reduce, add)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* consumer = root->operand(0);
const HloInstruction* producer = root->operand(1);
EXPECT_TRUE(IsProducerMultiOutputFusible(*producer));
EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*consumer));
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*producer, *consumer));
}
TEST_F(GpuFusibleTest, ProducerConsumerFusionLoopFusionAndReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_select {
p1.1 = f32[32,32,32]{2,1,0} parameter(1)
c0 = f32[] constant(0)
broadcast = f32[32,32,32]{2,1,0} broadcast(f32[] c0), dimensions={}
greater-than = pred[32,32,32]{2,1,0} compare(f32[32,32,32]{2,1,0} p1.1,
f32[32,32,32]{2,1,0} broadcast), direction=GT
p0.1 = f32[32,32,32]{2,1,0} parameter(0)
ROOT select = f32[32,32,32]{2,1,0} select(pred[32,32,32]{2,1,0}
greater-than, f32[32,32,32]{2,1,0} p0.1, f32[32,32,32]{2,1,0} broadcast)
}
fused_reduce {
p0.2 = f32[32,32,32]{2,1,0} parameter(0)
c1 = f32[] constant(0)
r1 = f32[32,32]{1,0} reduce(p0.2, c1), dimensions={2},
to_apply=scalar_add
mul = f32[32,32,32]{2,1,0} multiply(p0.2, p0.2)
r2 = f32[32,32]{1,0} reduce(mul, c1), dimensions={2},
to_apply=scalar_add
ROOT tuple = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(r1, r2)
}
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[32,32,32]{2,1,0} parameter(1)
select = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_select
fusion = (f32[32,32]{1,0}, f32[32,32]{1,0}) fusion(select), kind=kInput,
calls=fused_reduce
ROOT root = ((f32[32,32]{1,0}, f32[32,32]{1,0}), f32[32,32,32]{2,1,0}) tuple(fusion, select)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* consumer = root->operand(0);
const HloInstruction* producer = root->operand(1);
EXPECT_TRUE(IsProducerMultiOutputFusible(*producer));
EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*consumer));
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*producer, *consumer));
}
TEST_F(GpuFusibleTest, ProducerConsumerFusionDoNotFuseLoopReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_element_wise {
p0.1 = f32[2,2,2]{2,1,0} parameter(0)
p1.1 = f32[2,2,2]{2,1,0} parameter(1)
ROOT root = f32[2,2,2]{2,1,0} add(p0.1, p1.1)
}
fused_reduce {
p0.2 = f32[2,2,2]{2,1,0} parameter(0)
mul = f32[2,2,2]{2,1,0} multiply(f32[2,2,2]{2,1,0} p0.2,
f32[2,2,2]{2,1,0} p0.2)
broadcast = f32[2,2,2,2]{3,2,1,0} broadcast(mul), dimensions={3,2,1}
c1 = f32[] constant(0)
ROOT reduce = f32[2,2]{1,0} reduce(f32[2,2,2,2]{3,2,1,0} broadcast,
f32[] c1), dimensions={1,3}, to_apply=scalar_add
}
ENTRY reduce {
p0 = f32[2,2,2]{2,1,0} parameter(0)
p1 = f32[2,2,2]{2,1,0} parameter(1)
element_wise = f32[2,2,2]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_element_wise
fusion = f32[2,2]{1,0} fusion(element_wise), kind=kLoop, calls=fused_reduce
ROOT root = (f32[2,2]{1,0}, f32[2,2,2]{2,1,0}) tuple(fusion, element_wise)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* consumer = root->operand(0);
const HloInstruction* producer = root->operand(1);
EXPECT_TRUE(IsProducerMultiOutputFusible(*producer));
EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*consumer));
EXPECT_FALSE(ShapesCompatibleForMultiOutputFusion(*producer, *consumer));
}
TEST_F(GpuFusibleTest, ProducerConsumerFusionReduceUnfriendlyLoopFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
mixed_input_layouts_computation {
p0.1 = f16[128,1024,32,32]{1,3,2,0} parameter(0)
p1.1 = f16[128,1024,33,33]{3,2,1,0} parameter(1)
copy = f16[128,1024,33,33]{1,3,2,0} copy(p1.1)
slice = f16[128,1024,32,32]{1,3,2,0} slice(copy), slice={[0:128],[0:1024],[0:32],[0:32]}
c0 = f16[] constant(0)
broadcast = f16[128,1024,32,32]{1,3,2,0} broadcast(c0), dimensions={}
greater-than = pred[128,1024,32,32]{1,3,2,0} compare(slice, broadcast), direction=GT
ROOT root = f16[128,1024,32,32]{1,3,2,0} select(greater-than, p0.1, broadcast)
}
fused_reduce {
p0.2 = f16[128,1024,32,32]{1,3,2,0} parameter(0)
convert = f32[128,1024,32,32]{1,3,2,0} convert(p0.2)
c0.2 = f32[] constant(0)
ROOT reduce = f32[1024]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add
}
ENTRY reduce {
p0 = f16[128,1024,32,32]{1,3,2,0} parameter(0)
p1 = f16[128,1024,33,33]{3,2,1,0} parameter(1)
loop_fusion = f16[128,1024,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=mixed_input_layouts_computation
reduce_fusion = f32[1024]{0} fusion(loop_fusion), kind=kInput, calls=fused_reduce
ROOT root = (f32[1024]{0}, f16[128,1024,32,32]{1,3,2,0}) tuple(reduce_fusion, loop_fusion)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* consumer = root->operand(0);
const HloInstruction* producer = root->operand(1);
EXPECT_FALSE(IsProducerMultiOutputFusible(*producer));
EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*consumer));
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*producer, *consumer));
}
TEST_F(GpuFusibleTest, ProducerConsumerFusionInPlaceOperation) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
%fusion {
%param_0 = s32[4,4]{1,0} parameter(0)
%copy = s32[4,4]{0,1} copy(%param_0)
ROOT %transpose = s32[4,4]{1,0} transpose(%copy), dimensions={1,0}
}
ENTRY %main {
%param_0 = s32[4,4]{1,0} parameter(0)
%constant_0 = s32[] constant(0)
%constant_1 = s32[] constant(1)
%constant_1x1_1 = s32[1,1] constant({ {1} })
%updated = s32[4,4]{1,0} dynamic-update-slice(%param_0, %constant_1x1_1, %constant_1, %constant_0)
%transpose = s32[4,4]{0,1} fusion(%updated), kind=kLoop, calls=fusion
ROOT %tuple = tuple(%updated, %transpose)
})"))
.value();
const HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
const HloInstruction* dus = tuple->operand(0);
EXPECT_EQ(dus->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* transpose = tuple->operand(1);
EXPECT_EQ(transpose->opcode(), HloOpcode::kFusion);
EXPECT_FALSE(IsProducerMultiOutputFusible(*dus));
EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*transpose));
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*dus, *transpose));
}
TEST_F(GpuFusibleTest, NonscalarConstantsNotFused) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY BroadcastIntoReduce {
constant = f32[16] constant({0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15})
broadcast = f32[16,16,16,16]{3,2,1,0} broadcast(constant), dimensions={0}
constant.1 = f32[] constant(0)
reduce = f32[] reduce(broadcast, constant.1), dimensions={0,1,2,3},
to_apply=add
ROOT root = (f32[], f32[], f32[16,16,16,16], f32[16]) tuple(reduce, constant.1, broadcast, constant)
})")
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* consumer = root->operand(0);
const HloInstruction* producer = root->operand(1);
const HloInstruction* consumer2 = root->operand(2);
const HloInstruction* producer2 = root->operand(3);
EXPECT_FALSE(
static_cast<bool>(IsProducerConsumerFusible(*producer, *consumer)));
EXPECT_FALSE(
static_cast<bool>(IsProducerConsumerFusible(*producer2, *consumer2)));
}
TEST_F(GpuFusibleTest, FuseLayoutChangingOpWithElementwise) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY entry {
p0 = f32[16,16,16,16]{3,2,1,0} parameter(0)
copy = f32[16,16,16,16]{0,1,2,3} copy(p0)
ROOT add = f32[16,16,16,16]{0,1,2,3} add(copy, copy)
})")
.value();
const HloInstruction* consumer =
module->entry_computation()->root_instruction();
const HloInstruction* producer = consumer->operand(0);
EXPECT_TRUE(
static_cast<bool>(IsProducerConsumerFusible(*producer, *consumer)));
}
TEST_F(GpuFusibleTest, FuseReduceWithUnaryElementwise) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY main.12 {
Arg_0.1 = f32[2048]{0} parameter(0)
constant.4 = f32[] constant(0.0)
reduce.10 = f32[] reduce(Arg_0.1, constant.4), dimensions={0}, to_apply=scalar_add
ROOT exp = f32[] exponential(reduce.10)
})"))
.value();
const HloInstruction* consumer =
module->entry_computation()->root_instruction();
const HloInstruction* producer = consumer->operand(0);
EXPECT_TRUE(
static_cast<bool>(IsProducerConsumerFusible(*producer, *consumer)));
}
TEST_F(GpuFusibleTest, DoNotFuseReduceWithRacesWithUnaryElementwise) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY main.12 {
Arg_0.1 = f32[196608]{0} parameter(0)
constant.4 = f32[] constant(0.0)
reduce.10 = f32[] reduce(Arg_0.1, constant.4), dimensions={0}, to_apply=scalar_add
ROOT exp = f32[] exponential(reduce.10)
})"))
.value();
const HloInstruction* consumer =
module->entry_computation()->root_instruction();
const HloInstruction* producer = consumer->operand(0);
EXPECT_FALSE(
static_cast<bool>(IsProducerConsumerFusible(*producer, *consumer)));
}
TEST_F(GpuFusibleTest, CreatesHeavyComputation_NonfusionInstr) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
p_0 = f32[20,50] parameter(0)
constant_1 = f32[] constant(1)
reduce-window_1 = f32[21,41] reduce-window(p_0, constant_1),
window={size=20x10 pad=0_20x0_0}, to_apply=scalar_add
constant_2 = f32[] constant(2)
reduce-window_2 = f32[21,41] reduce-window(p_0, constant_2),
window={size=20x10 pad=0_20x0_0}, to_apply=scalar_add
ROOT root = (f32[21,41], f32[21,41])
tuple(reduce-window_1, reduce-window_2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* producer = root->operand(0);
const HloInstruction* consumer = root->operand(1);
EXPECT_TRUE(CreatesHeavyComputation(*producer, *consumer));
}
TEST_F(GpuFusibleTest, DoesNotCreateHeavyComputation_NonfusionInstr) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
p_0 = f32[3,5] parameter(0)
constant = f32[] constant(1)
broadcast = f32[3, 5] broadcast(f32[] constant), dimensions={}
scaled_p_0 = f32[3,5] multiply(f32[3, 5] broadcast, f32[3,5]{1, 0} p_0)
p_1 = f32[2,5] parameter(1)
reduce-window = f32[3,5] reduce-window(p_1, constant),
window={size=2x1 pad=0_2x0_0}, to_apply=scalar_add
ROOT root = (f32[3,5], f32[3,5]) tuple(reduce-window, scaled_p_0)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* producer = root->operand(0);
const HloInstruction* consumer = root->operand(1);
EXPECT_FALSE(CreatesHeavyComputation(*producer, *consumer));
}
TEST_F(GpuFusibleTest,
DoesNotCreateHeavyComputation_NonoverlappingReduceWindows) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
p_0 = f32[2,5] parameter(0)
constant_1 = f32[] constant(1)
reduce-window_1 = f32[3,5] reduce-window(p_0, constant_1),
window={size=2x1 pad=0_2x0_0}, to_apply=scalar_add
constant_2 = f32[] constant(2)
reduce-window_2 = f32[2,3] reduce-window(p_0, constant_2),
window={size=2x1 pad=0_2x0_0 stride=2x2}, to_apply=scalar_add
ROOT root = (f32[3,5], f32[2,3]) tuple(reduce-window_1, reduce-window_2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* producer = root->operand(0);
const HloInstruction* consumer = root->operand(1);
EXPECT_FALSE(CreatesHeavyComputation(*producer, *consumer));
}
TEST_F(GpuFusibleTest, CreatesHeavyComputation_ReduceWindowGather) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
p0 = s32[512,512,2] parameter(0)
p1 = f32[1,1,512,512] parameter(1)
constant_1 = f32[] constant(0)
reduce-window.1 = reduce-window(p1, constant_1),
window={size=1x1x16x16 stride=1x1x16x16}, to_apply=scalar_add
ROOT ret = gather(reduce-window.1, p0), offset_dims={0,1,2,3},
collapsed_slice_dims={}, start_index_map={1,2},
index_vector_dim=2, slice_sizes={1,1,1,1}
})"))
.value();
auto gather = module->entry_computation()->root_instruction();
auto reduce_window = gather->operand(0);
EXPECT_EQ(gather->opcode(), HloOpcode::kGather);
EXPECT_EQ(reduce_window->opcode(), HloOpcode::kReduceWindow);
EXPECT_FALSE(IfFusedReadsElementsMultipleTimes(*reduce_window));
EXPECT_TRUE(IsExpensiveToRepeat(*reduce_window));
EXPECT_TRUE(IfFusedReadsElementsMultipleTimes(*gather));
EXPECT_TRUE(CreatesHeavyComputation(*reduce_window, *gather));
}
TEST_F(GpuFusibleTest, CreatesHeavyComputation_FusionInstr) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_producer {
operand = f32[20,20] parameter(0)
constant = f32[] constant(1)
ROOT reduce-window = f32[11,11] reduce-window(operand, constant),
window={size=20x20 pad=0_10x0_10}, to_apply=scalar_add
}
fused_consumer {
operand_0 = f32[11,11] parameter(0)
operand_1 = f32[11,11] parameter(1)
constant = f32[] constant(1)
reduce-window = f32[11,11] reduce-window(operand_1, constant),
window={size=2x2 pad=0_1x0_1}, to_apply=scalar_add
ROOT scaled_operand_1 =
f32[11,11] multiply(f32[11,11] operand_0, f32[11,11] reduce-window)
}
ENTRY entry {
p0 = f32[20,20] parameter(0)
p1 = f32[11,11] parameter(1)
producer = f32[11,11] fusion(p0), kind=kLoop, calls=fused_producer
consumer = f32[11,11] fusion(p1, producer), kind=kLoop, calls=fused_consumer
ROOT root = (f32[11,11], f32[11,11]) tuple(producer, consumer)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* producer = root->operand(0);
const HloInstruction* consumer = root->operand(1);
EXPECT_TRUE(CreatesHeavyComputation(*producer, *consumer));
}
TEST_F(GpuFusibleTest, DoesNotCreateHeavyComputation_FusionInstr) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_producer {
p_0 = f32[2,2] parameter(0)
constant = f32[] constant(1)
ROOT reduce-window = f32[2,2] reduce-window(p_0, constant),
window={size=2x2 pad=0_1x0_1}, to_apply=scalar_add
}
fused_consumer {
p_0 = f32[2,2] parameter(0)
p_1 = f32[2,2] parameter(1)
constant = f32[] constant(1)
reduce-window = f32[2,2] reduce-window(p_1, constant),
window={size=2x2 pad=0_1x0_1}, to_apply=scalar_add
ROOT scaled_p_1 = f32[2,2] multiply(f32[2, 2] p_0, f32[2,2] reduce-window)
}
ENTRY entry {
p_0 = f32[2,2] parameter(0)
producer = f32[2,2] fusion(p_0), kind=kLoop, calls=fused_producer
consumer = f32[2,2] fusion(producer, p_0), kind=kLoop, calls=fused_consumer
ROOT root = (f32[2,2], f32[2,2]) tuple(producer, consumer)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* producer = root->operand(0);
const HloInstruction* consumer = root->operand(1);
EXPECT_FALSE(CreatesHeavyComputation(*producer, *consumer));
}
TEST_F(GpuFusibleTest, ChooseFusionKind) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
ENTRY computation {
p = f32[1,5000,6000]{2,1,0} parameter(0)
c = f32[1,6000,5000]{2,1,0} transpose(p), dimensions={0,2,1}
ROOT r = f32[300,20,5000]{2,1,0} reshape(c)
}
)")
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* producer = root->operand(0);
EXPECT_EQ(ChooseFusionKind(*producer, *root),
HloInstruction::FusionKind::kInput);
}
TEST_F(GpuFusibleTest, GetFusionRoots1) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fusion {
p0 = s32[] parameter(0)
custom-call = (bf16[], s32[]) custom-call(p0), custom_call_target="my_custom_call"
get-tuple-element.0 = bf16[] get-tuple-element(custom-call), index=0
get-tuple-element.1 = s32[] get-tuple-element(custom-call), index=1
ROOT tuple = (bf16[], s32[], s32[]) tuple(get-tuple-element.0, get-tuple-element.1, p0)
}
ENTRY entry{
p0 = s32[] parameter(0)
ROOT fusion = (bf16[], s32[], s32[]) fusion(p0), kind=kCustom, calls=fusion
}
)")
.value();
auto called_computations =
module->entry_computation()->root_instruction()->called_computations();
ASSERT_EQ(called_computations.size(), 1);
auto fusion = called_computations.front();
auto roots = GetFusionRoots(*fusion);
auto custom_call = fusion->root_instruction()->operand(0)->operand(0);
auto parameter = fusion->root_instruction()->operand(2);
std::vector<const HloInstruction*> expected_roots{custom_call, custom_call,
parameter};
EXPECT_EQ(roots, expected_roots);
}
TEST_F(GpuFusibleTest, GetFusionRoots2) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fusion {
p0 = s32[] parameter(0)
custom-call.1 = bf16[] custom-call(p0), custom_call_target="my_custom_call1"
custom-call.2 = bf16[] custom-call(p0), custom_call_target="my_custom_call2"
ROOT tuple = (bf16[], bf16[], s32[]) tuple(custom-call.1, custom-call.2, p0)
}
ENTRY entry{
p0 = s32[] parameter(0)
ROOT fusion = (bf16[], bf16[], s32[]) fusion(p0), kind=kCustom, calls=fusion
}
)")
.value();
auto called_computations =
module->entry_computation()->root_instruction()->called_computations();
ASSERT_EQ(called_computations.size(), 1);
auto fusion = called_computations.front();
auto roots = GetFusionRoots(*fusion);
auto custom_call1 = fusion->root_instruction()->operand(0);
auto custom_call2 = fusion->root_instruction()->operand(1);
auto parameter = fusion->root_instruction()->operand(2);
std::vector<const HloInstruction*> expected_roots{custom_call1, custom_call2,
parameter};
EXPECT_EQ(roots, expected_roots);
}
TEST_F(GpuFusibleTest, GetFusionRoots3) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fusion {
p0 = s32[] parameter(0)
custom-call = (bf16[], s32[]) custom-call(p0), custom_call_target="my_custom_call"
get-tuple-element.0 = bf16[] get-tuple-element(custom-call), index=0
custom-call.2 = bf16[] custom-call(p0), custom_call_target="my_custom_call2"
get-tuple-element.1 = s32[] get-tuple-element(custom-call), index=1
ROOT tuple = (bf16[], bf16[], s32[], s32[]) tuple(get-tuple-element.0, custom-call.2, get-tuple-element.1, p0)
}
ENTRY entry{
p0 = s32[] parameter(0)
ROOT fusion = (bf16[], bf16[], s32[], s32[]) fusion(p0), kind=kCustom, calls=fusion
}
)")
.value();
auto called_computations =
module->entry_computation()->root_instruction()->called_computations();
ASSERT_EQ(called_computations.size(), 1);
auto fusion = called_computations.front();
auto roots = GetFusionRoots(*fusion);
auto custom_call1 = fusion->root_instruction()->operand(0)->operand(0);
auto custom_call2 = fusion->root_instruction()->operand(1);
auto parameter = fusion->root_instruction()->operand(3);
std::vector<const HloInstruction*> expected_roots{custom_call1, custom_call2,
custom_call1, parameter};
EXPECT_EQ(roots, expected_roots);
}
TEST_F(GpuFusibleTest, GetFusionRootsWithGTEMakeTupleSequence) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fusion {
p0 = s32[] parameter(0)
p1 = s32[32] parameter(1)
custom-call = (bf16[], s32[], u32[]) custom-call(p1), custom_call_target="my_custom_call"
get-tuple-element.0 = bf16[] get-tuple-element(custom-call), index=0
get-tuple-element.1 = s32[] get-tuple-element(custom-call), index=1
bitcast = s32[1] bitcast(get-tuple-element.1)
dynamic-update-slice = s32[32] dynamic-update-slice(p1, bitcast, p0)
get-tuple-element.2 = u32[] get-tuple-element(custom-call), index=2
ROOT tuple = (bf16[], s32[32], u32[]) tuple(get-tuple-element.0, dynamic-update-slice, get-tuple-element.2)
}
ENTRY entry{
p0 = s32[] parameter(0)
bitcast = s32[32] bitcast(p0)
ROOT fusion = (bf16[], s32[32], u32[]) fusion(p0, bitcast), kind=kCustom, calls=fusion
}
)")
.value();
auto called_computations =
module->entry_computation()->root_instruction()->called_computations();
ASSERT_EQ(called_computations.size(), 1);
auto fusion = called_computations.front();
auto roots = GetFusionRoots(*fusion);
auto custom_call = fusion->root_instruction()->operand(0)->operand(0);
auto dus = fusion->root_instruction()->operand(1);
std::vector<const HloInstruction*> expected_result{custom_call, dus,
custom_call};
EXPECT_EQ(roots, expected_result);
}
TEST_F(GpuFusibleTest, GetFusionRootsWithMakeTupleGTESequence) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fusion {
p0 = s32[] parameter(0)
p1 = s32[32] parameter(1)
custom-call = (bf16[], s32[], u32[]) custom-call(p1), custom_call_target="my_custom_call"
get-tuple-element.0 = bf16[] get-tuple-element(custom-call), index=0
get-tuple-element.1 = s32[] get-tuple-element(custom-call), index=1
bitcast = s32[1] bitcast(get-tuple-element.1)
dynamic-update-slice = s32[32] dynamic-update-slice(p1, bitcast, p0)
get-tuple-element.2 = u32[] get-tuple-element(custom-call), index=2
tuple = (bf16[], s32[32], u32[]) tuple(get-tuple-element.0, dynamic-update-slice, get-tuple-element.2)
get-tuple-element.3 = bf16[] get-tuple-element(tuple), index=0
get-tuple-element.4 = u32[] get-tuple-element(tuple), index=2
ROOT tuple2 = (bf16[], s32[32], u32[]) tuple(get-tuple-element.3, dynamic-update-slice, get-tuple-element.4)
}
ENTRY entry{
p0 = s32[] parameter(0)
bitcast = s32[32] bitcast(p0)
ROOT fusion = (bf16[], s32[32], u32[]) fusion(p0, bitcast), kind=kCustom, calls=fusion
}
)")
.value();
auto called_computations =
module->entry_computation()->root_instruction()->called_computations();
ASSERT_EQ(called_computations.size(), 1);
auto fusion = called_computations.front();
auto roots = GetFusionRoots(*fusion);
auto tuple_inst = fusion->root_instruction()->operand(0)->operand(0);
auto custom_call = tuple_inst->operand(0)->operand(0);
auto dus = fusion->root_instruction()->operand(1);
std::vector<const HloInstruction*> expected_result{custom_call, dus,
custom_call};
EXPECT_EQ(roots, expected_result);
}
TEST_F(GpuFusibleTest, GetFusionRootsWithTupleMultipleSameOperands) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fusion {
p1 = s32[32] parameter(0)
add0 = s32[32] add(p1, p1)
ROOT _ = (s32[32], s32[32]) tuple(add0, add0)
}
ENTRY entry {
p0 = s32[32] parameter(0)
ROOT fusion = (s32[32], s32[32]) fusion(p0), kind=kCustom, calls=fusion
}
)")
.value();
auto called_computations =
module->entry_computation()->root_instruction()->called_computations();
ASSERT_EQ(called_computations.size(), 1);
auto fusion = called_computations.front();
auto roots = GetFusionRoots(*fusion);
auto add0 = fusion->root_instruction()->operand(0);
EXPECT_THAT(GetFusionRoots(*fusion), ElementsAre(add0, add0));
}
TEST_F(GpuFusibleTest, GetFusibleComputations) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduce {
p0 = f32[128,1024] parameter(0)
c0 = f32[] constant(0)
ROOT reduce = f32[128]{0} reduce(p0, c0), dimensions={1}, to_apply=scalar_add
}
body_a {
p0 = f32[128,1024] parameter(0)
ROOT reduce_fusion = f32[128] fusion(p0), kind=kInput, calls=fused_reduce
}
body_b {
p0 = f32[128,1024] parameter(0)
c0 = f32[] constant(0)
ROOT bc = f32[128] broadcast(c0), dimensions={}
}
ENTRY main {
p0 = s32[] parameter(0)
p1 = f32[128,1024] parameter(1)
ROOT conditional = f32[128] conditional(p0, p1, p1),
branch_computations={body_a, body_b}
})"))
.value();
auto fusible = GetFusibleComputations(*module, {});
EXPECT_THAT(fusible, ElementsAre(module->GetComputationWithName("body_a"),
module->GetComputationWithName("body_b"),
module->entry_computation()));
}
TEST_F(GpuFusibleTest, GetSharedMemoryUsage) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
wrapped_transpose {
p0 = f32[128,1024,2]{2,1,0} parameter(0)
ROOT transpose = f32[1024,128,2]{2,1,0} transpose(p0), dimensions={1,0,2}
}
ENTRY main {
p = f32[128,1024,2] parameter(0)
ROOT res = f32[1024,128,2]{2,1,0} fusion(p), kind=kInput, calls=wrapped_transpose
})"))
.value();
auto& debug_options = module->mutable_config().mutable_debug_options();
debug_options.set_xla_gpu_mlir_emitter_level(3);
FusionInfoCache cache;
auto fusion = module->entry_computation()->root_instruction();
EXPECT_EQ(cache.GetSharedMemoryUsage(*fusion), 32 * 33 * 2 * 4);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_fusible.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_fusible_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c987cf9e-49f9-4548-8911-f4a481f0a4b8 | cpp | tensorflow/tensorflow | buffer_allocations | third_party/xla/xla/service/gpu/buffer_allocations.cc | third_party/xla/xla/backends/cpu/runtime/buffer_allocations_test.cc | #include "xla/service/gpu/buffer_allocations.h"
#include <cstdint>
#include <set>
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/service/buffer_assignment.h"
#include "xla/stream_executor/device_memory.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
absl::Status BufferAllocations::TearDown(
const std::set<se::DeviceMemoryBase>& live_addresses,
absl::Span<const BufferAllocation> allocations) {
absl::Status status;
const int64_t num_buffers = allocations.size();
for (BufferAllocation::Index i = 0; i < num_buffers; ++i) {
const BufferAllocation& allocation = allocations[i];
se::DeviceMemoryBase buffer_address = GetDeviceAddress(allocation.index());
if ((allocation.maybe_live_out() &&
!live_addresses.count(buffer_address)) ||
allocation.IsPreallocatedTempBuffer()) {
auto dealloc_result =
memory_allocator_->Deallocate(device_ordinal_, buffer_address);
if (!dealloc_result.ok() && status.ok()) {
status = dealloc_result;
}
}
}
return status;
}
se::DeviceMemoryBase BufferAllocations::GetDeviceAddress(
BufferAllocation::Index buffer_index) const {
CHECK_GE(buffer_index, 0);
CHECK_LT(buffer_index, buffers_.size());
return buffers_[buffer_index];
}
se::DeviceMemoryBase& BufferAllocations::GetMutableDeviceAddress(
BufferAllocation::Index buffer_index) {
CHECK_GE(buffer_index, 0);
CHECK_LT(buffer_index, buffers_.size());
return buffers_[buffer_index];
}
se::DeviceMemoryBase BufferAllocations::GetDeviceAddress(
const BufferAllocation::Slice& buffer_slice) const {
int64_t index = buffer_slice.index();
se::DeviceMemoryBase base = GetDeviceAddress(index);
int64_t offset = buffer_slice.offset();
CHECK_LE(buffer_slice.offset(), base.size())
<< "slice offset " << offset << " must be smaller than buffer #" << index
<< " size " << base.size();
int64_t extent = offset + buffer_slice.size();
CHECK_LE(extent, base.size())
<< "slice extent " << extent << " must be smaller than buffer #" << index
<< " size " << base.size();
return base.GetByteSlice(buffer_slice.offset(), buffer_slice.size());
}
}
} | #include "xla/backends/cpu/runtime/buffer_allocations.h"
#include <cstddef>
#include <vector>
#include "xla/service/buffer_assignment.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/stream_executor/device_memory.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(BufferAllocationsTest, GetDeviceAddress) {
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> data = {1.0, 2.0, 3.0, 4.0};
size_t size_in_bytes = data.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(data.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation alloc(0, size_in_bytes, 0);
BufferAllocation::Slice slice(&alloc, 2 * sizeof(float),
sizeof(float));
TF_ASSERT_OK_AND_ASSIGN(se::DeviceMemoryBase alloc_mem,
allocations.GetDeviceAddress(0));
EXPECT_EQ(alloc_mem.opaque(), &data[0]);
TF_ASSERT_OK_AND_ASSIGN(se::DeviceMemoryBase slice_mem,
allocations.GetDeviceAddress(slice));
EXPECT_EQ(slice_mem.opaque(), &data[2]);
}
TEST(BufferAllocationsTest, GetDeviceAddressUnchecked) {
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> data = {1.0, 2.0, 3.0, 4.0};
size_t size_in_bytes = data.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(data.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation alloc(0, size_in_bytes, 0);
BufferAllocation::Slice slice(&alloc, 2 * sizeof(float),
sizeof(float));
se::DeviceMemoryBase alloc_mem = allocations.GetDeviceAddressUnchecked(0);
EXPECT_EQ(alloc_mem.opaque(), &data[0]);
se::DeviceMemoryBase slice_mem = allocations.GetDeviceAddressUnchecked(slice);
EXPECT_EQ(slice_mem.opaque(), &data[2]);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/buffer_allocations.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/buffer_allocations_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9a2dfbbc-53b8-4988-9ab6-73c85b8aa912 | cpp | tensorflow/tensorflow | cudnn_support_utils | third_party/xla/xla/service/gpu/cudnn_support_utils.cc | third_party/xla/xla/service/gpu/cudnn_support_utils_test.cc | #include "xla/service/gpu/cudnn_support_utils.h"
#include <cstdint>
#include <vector>
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> CudnnSupportsOptimizedIntegerConvolution(
const se::CudaComputeCapability& compute_capability,
HloCustomCallInstruction& conv, int vector_size) {
TF_ASSIGN_OR_RETURN(auto kind, GetCudnnConvKind(&conv));
const Shape& input_shape = conv.operand(0)->shape();
const Shape& kernel_shape = conv.operand(1)->shape();
const Shape& result_shape = conv.shape().tuple_shapes(0);
const auto& dnums = conv.convolution_dimension_numbers();
if (vector_size != 4 && vector_size != 32) {
VLOG(3) << "Unsupported vector size for integer convolution: "
<< vector_size;
return false;
}
if ((vector_size == 32 && !compute_capability.IsAtLeast(7, 5)) ||
!compute_capability.IsAtLeast(6, 1)) {
VLOG(3) << "Compute capability " << compute_capability.ToString()
<< " is not sufficent for int8x" << vector_size
<< " vectorization.";
return false;
}
if (kind != CudnnConvKind::kForward &&
kind != CudnnConvKind::kForwardActivation) {
VLOG(3) << "Convolution kind is not forward or foward-activation: "
<< conv.ToString();
return false;
}
if (!primitive_util::IsIntegralType(input_shape.element_type()) ||
!primitive_util::IsIntegralType(kernel_shape.element_type())) {
VLOG(3) << "Convolution does not accept integer inputs/weights: "
<< conv.ToString();
return false;
}
if (dnums.input_spatial_dimensions().size() != 2 ||
dnums.kernel_spatial_dimensions().size() != 2 ||
dnums.output_spatial_dimensions().size() != 2) {
VLOG(3) << "Convolution is not 2D: " << conv.ToString();
return false;
}
if (vector_size == 32 &&
!primitive_util::IsIntegralType(result_shape.element_type())) {
VLOG(3) << "int8x32 convolutions only support integer output: "
<< conv.ToString();
return false;
}
if (vector_size == 32) {
int64_t W = input_shape.dimensions(dnums.input_spatial_dimensions()[0]);
int64_t H = input_shape.dimensions(dnums.input_spatial_dimensions()[1]);
int64_t R = kernel_shape.dimensions(dnums.kernel_spatial_dimensions()[0]);
int64_t S = kernel_shape.dimensions(dnums.kernel_spatial_dimensions()[1]);
const int64_t dilationW = conv.window().dimensions()[0].base_dilation();
const int64_t dilationH = conv.window().dimensions()[1].base_dilation();
if ((W <= (R - 1) * dilationW) || (H <= (S - 1) * dilationH)) {
VLOG(3) << "Conv spatial filter/input dimensions are too small for "
"vecotrized int8x32 convolution: "
<< conv.ToString();
return false;
}
}
if (window_util::HasDilation(conv.window())) {
VLOG(3) << "Vectorized integer convolutions do not support dilation: "
<< conv.ToString();
return false;
}
return true;
}
absl::StatusOr<CudnnReorderTransposeConfig>
CudnnInferTransposeForFilterReordering(
const Shape& shape, const ConvolutionDimensionNumbers& dimension_numbers) {
if (shape.rank() != 4 && shape.rank() != 5) {
return Internal("Filter shape has unexpected rank.");
}
const int64_t dO = dimension_numbers.kernel_output_feature_dimension();
const int64_t dI = dimension_numbers.kernel_input_feature_dimension();
const int64_t dH = dimension_numbers.kernel_spatial_dimensions().at(0);
const int64_t dW = dimension_numbers.kernel_spatial_dimensions().at(1);
bool revectorize = shape.rank() == 5;
const int64_t dZ = revectorize ? 10 - dO - dI - dH - dW : -1;
const int64_t vsize = revectorize ? shape.dimensions(dZ) : 1;
if (shape.dimensions(dO) % 32 != 0 ||
shape.dimensions(dI) % (32 / vsize) != 0 ||
(revectorize && vsize != 4 && vsize != 32)) {
return Internal("Filter shape is not vectorizable.");
}
std::vector<int64_t> output = {
shape.dimensions(dO), shape.dimensions(dI) / (32 / vsize),
shape.dimensions(dH), shape.dimensions(dW), 32};
Shape output_shape = ShapeUtil::MakeShape(shape.element_type(), output);
auto calc_index = [&](int dim) {
bool split_v = vsize == 32;
return (revectorize
? (dI < dim ? 2 - split_v : 0) + (dZ < dim ? 1 + split_v : 0)
: (dI < dim ? 3 : 0)) +
(dO < dim ? 3 : 0) + (dH < dim) + (dW < dim);
};
int idx_O = calc_index(dO);
int idx_I = calc_index(dI);
int idx_H = calc_index(dH);
int idx_W = calc_index(dW);
int idx_Y = vsize == 32 ? calc_index(dZ) : idx_I + 1;
int idx_Z = vsize == 4 ? calc_index(dZ) : vsize == 32 ? idx_Y + 1 : idx_I + 2;
std::vector<int64_t> dims(8);
dims[idx_O] = shape.dimensions(dO) / 8;
dims[idx_O + 1] = 4;
dims[idx_O + 2] = 2;
dims[idx_I] = shape.dimensions(dI) / (32 / vsize);
dims[idx_Y] = 8;
dims[idx_Z] = 4;
dims[idx_H] = shape.dimensions(dH);
dims[idx_W] = shape.dimensions(dW);
Shape split_shape = ShapeUtil::MakeShape(shape.element_type(), dims);
std::vector<int64_t> permutation = {idx_I, idx_H, idx_W, idx_O,
idx_O + 2, idx_Y, idx_O + 1, idx_Z};
return CudnnReorderTransposeConfig{split_shape, output_shape, permutation};
}
absl::StatusOr<CudnnReorderTransposeConfig>
CudnnInferTransposeForBiasReordering(const Shape& shape) {
if (shape.rank() != 1) {
return Internal("Bias shape has unexpected rank.");
}
if (shape.dimensions(0) % 32 != 0) {
return Internal("Bias shape is not vectorizable.");
}
std::vector<int64_t> dims = {shape.dimensions(0) / 32, 4, 2, 4};
Shape split_shape = ShapeUtil::MakeShape(shape.element_type(), dims);
std::vector<int64_t> permutation = {0, 2, 1, 3};
return CudnnReorderTransposeConfig{split_shape, shape, permutation};
}
bool IsWorkspaceAllocationRoot(const HloInstruction& root) {
return root.IsRoot() && root.opcode() == HloOpcode::kTuple &&
root.operand_count() == 2 &&
root.operand(1)->IsCustomCall(kWorkspaceAllocationCustomCallTarget) &&
root.operand(1)->operand_count() == 0;
}
}
} | #include "xla/service/gpu/cudnn_support_utils.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::tsl::testing::IsOkAndHolds;
class CudnnSupportUtilsTest : public HloTestBase {
public:
absl::StatusOr<HloCustomCallInstruction*> GetCustomCall(
xla::VerifiedHloModule* module, absl::string_view target) {
HloCustomCallInstruction* call = nullptr;
for (HloComputation* comp : module->MakeNonfusionComputations()) {
for (HloInstruction* inst : comp->instructions()) {
if (inst->IsCustomCall(target)) {
VLOG(1) << inst->ToString();
if (call != nullptr) {
return tsl::errors::FailedPrecondition(
"Found more than one custom call.");
}
call = Cast<HloCustomCallInstruction>(inst);
}
}
}
if (call == nullptr) {
return tsl::errors::FailedPrecondition(
"Did not find any matching custom call.");
}
return call;
}
};
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedIntegerConvolutionCheckVectorSize) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[8,10,10,128] parameter(0)
filter = s8[2,2,128,128] parameter(1)
ROOT result = (s8[8,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(module.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 7),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 1),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedIntegerConvolutionCheckComputeCapability) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[8,10,10,128] parameter(0)
filter = s8[2,2,128,128] parameter(1)
ROOT result = (s8[8,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(module.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({6, 0}, *conv, 4),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({6, 1}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 4}, *conv, 32),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedIntegerConvolutionCheckKind) {
auto moduleFwd = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (s8[32,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleFwd.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
auto moduleBwdFilter = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = f16[10,20,30,41] parameter(0)
output = f16[10,20,30,40] parameter(1)
result = (f16[2,2,41,40], u8[0]) custom-call(input, output),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBackwardFilter"
ROOT gte = f16[2,2,41,40] get-tuple-element(result), index=0
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleBwdFilter.get(), "__cudnn$convBackwardFilter"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
auto moduleBwdInput = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
output = f16[10,20,30,40] parameter(0)
filter = f16[2,2,41,40] parameter(1)
result = (f16[10,20,30,41], u8[0]) custom-call(output, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBackwardInput"
ROOT gte = f16[10,20,30,41] get-tuple-element(result), index=0
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleBwdInput.get(), "__cudnn$convBackwardInput"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckTypes) {
auto moduleS8InOut = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (s8[32,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleS8InOut.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
auto moduleS8InF32Out = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (f32[32,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleS8InF32Out.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
auto moduleF32InF32Out = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = f32[32,10,10,64] parameter(0)
filter = f32[2,2,64,128] parameter(1)
ROOT result = (f32[32,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleF32InF32Out.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckDims) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,10,64] parameter(0)
filter = s8[2,2,2,64,128] parameter(1)
ROOT result = (s8[32,10,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b012f_012io->b012f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(module.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckDilation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (s8[32,20,20,128], u8[0]) custom-call(input, filter),
window={size=2x2 rhs_dilate=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(module.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckAlgo1Dims) {
auto moduleFilterCoversInput = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,2,2,64] parameter(0)
filter = s8[3,3,64,128] parameter(1)
ROOT result = (s8[32,2,2,128], u8[0]) custom-call(input, filter),
window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv, GetCustomCall(moduleFilterCoversInput.get(),
"__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
auto moduleFilterAlmostCoversInput = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,3,3,64] parameter(0)
filter = s8[3,3,64,128] parameter(1)
ROOT result = (s8[32,3,3,128], u8[0]) custom-call(input, filter),
window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(moduleFilterAlmostCoversInput.get(),
"__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
}
class ReorderFilterRank4Test : public ::testing::TestWithParam<std::string> {};
TEST_P(ReorderFilterRank4Test, InferTransposeRank4) {
auto input_dims = GetParam();
size_t dI = input_dims.find('i');
size_t dO = input_dims.find('o');
size_t dH = input_dims.find('0');
size_t dW = input_dims.find('1');
ConvolutionDimensionNumbers dnums;
dnums.set_kernel_input_feature_dimension(dI);
dnums.set_kernel_output_feature_dimension(dO);
dnums.add_kernel_spatial_dimensions(dH);
dnums.add_kernel_spatial_dimensions(dW);
int64_t shape_dims[4] = {0, 0, 0, 0};
shape_dims[dI] = 224;
shape_dims[dO] = 96;
shape_dims[dH] = 5;
shape_dims[dW] = 3;
Shape shape = ShapeUtil::MakeShape(U8, absl::MakeSpan(shape_dims));
auto input = HloInstruction::CreateParameter(0, shape, "input");
auto filter = HloInstruction::CreateParameter(1, shape, "filter");
TF_ASSERT_OK_AND_ASSIGN(CudnnReorderTransposeConfig inferred_config,
CudnnInferTransposeForFilterReordering(shape, dnums));
EXPECT_THAT(inferred_config.result_shape.dimensions(),
::testing::ElementsAre(96, 7, 5, 3, 32));
Shape reshaped = ShapeUtil::PermuteDimensions(
inferred_config.permutation, inferred_config.transpose_shape);
EXPECT_THAT(reshaped.dimensions(),
::testing::ElementsAre(7, 5, 3, 12, 2, 8, 4, 4));
EXPECT_EQ(inferred_config.permutation[6], inferred_config.permutation[4] - 1);
EXPECT_EQ(inferred_config.permutation[7], inferred_config.permutation[5] + 1);
}
std::vector<std::string> GeneratePermutations(std::string input_dims) {
std::sort(input_dims.begin(), input_dims.end());
std::vector<std::string> permutations;
do {
permutations.push_back(input_dims);
} while (std::next_permutation(input_dims.begin(), input_dims.end()));
return permutations;
}
INSTANTIATE_TEST_SUITE_P(ReorderTestSuite, ReorderFilterRank4Test,
::testing::ValuesIn(GeneratePermutations("01io")));
class ReorderFilterRank5Test
: public ::testing::TestWithParam<std::tuple<std::string, int>> {};
TEST_P(ReorderFilterRank5Test, InferTransposeRank5) {
auto [input_dims, vsize] = GetParam();
size_t dI = input_dims.find('i');
size_t dO = input_dims.find('o');
size_t dH = input_dims.find('0');
size_t dW = input_dims.find('1');
ConvolutionDimensionNumbers dnums;
dnums.set_kernel_input_feature_dimension(dI);
dnums.set_kernel_output_feature_dimension(dO);
dnums.add_kernel_spatial_dimensions(dH);
dnums.add_kernel_spatial_dimensions(dW);
int64_t shape_dims[5] = {vsize, vsize, vsize, vsize, vsize};
shape_dims[dI] = 224 / vsize;
shape_dims[dO] = 96;
shape_dims[dH] = 5;
shape_dims[dW] = 3;
Shape shape = ShapeUtil::MakeShape(U8, absl::MakeSpan(shape_dims));
auto input = HloInstruction::CreateParameter(0, shape, "input");
auto filter = HloInstruction::CreateParameter(1, shape, "filter");
TF_ASSERT_OK_AND_ASSIGN(CudnnReorderTransposeConfig inferred_config,
CudnnInferTransposeForFilterReordering(shape, dnums));
EXPECT_THAT(inferred_config.result_shape.dimensions(),
::testing::ElementsAre(96, 7, 5, 3, 32));
Shape reshaped = ShapeUtil::PermuteDimensions(
inferred_config.permutation, inferred_config.transpose_shape);
EXPECT_THAT(reshaped.dimensions(),
::testing::ElementsAre(7, 5, 3, 12, 2, 8, 4, 4));
EXPECT_EQ(inferred_config.permutation[6], inferred_config.permutation[4] - 1);
}
INSTANTIATE_TEST_SUITE_P(
ReorderTestSuite, ReorderFilterRank5Test,
::testing::Combine(::testing::ValuesIn(GeneratePermutations("01?io")),
::testing::Values(4, 32)));
class ReorderBiasTest : public ::testing::Test {};
TEST_F(ReorderBiasTest, InferTranspose) {
Shape shape = ShapeUtil::MakeShape(U8, {96});
auto bias = HloInstruction::CreateParameter(2, shape, "bias");
Shape unused = ShapeUtil::MakeNil();
auto input = HloInstruction::CreateParameter(0, unused, "input");
auto filter = HloInstruction::CreateParameter(1, unused, "filter");
TF_ASSERT_OK_AND_ASSIGN(CudnnReorderTransposeConfig inferred_config,
CudnnInferTransposeForBiasReordering(shape));
Shape reshaped = ShapeUtil::PermuteDimensions(
inferred_config.permutation, inferred_config.transpose_shape);
EXPECT_THAT(reshaped.dimensions(), ::testing::ElementsAre(3, 2, 4, 4));
EXPECT_EQ(inferred_config.permutation[2], 1);
EXPECT_EQ(inferred_config.permutation[3], 3);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/cudnn_support_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/cudnn_support_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
351b5a0b-c4a1-46de-8a0c-dd47539046d9 | cpp | tensorflow/tensorflow | split_k_gemm_rewriter | third_party/xla/xla/service/gpu/split_k_gemm_rewriter.cc | third_party/xla/xla/service/gpu/split_k_gemm_rewriter_test.cc | #include "xla/service/gpu/split_k_gemm_rewriter.h"
#include <cmath>
#include <cstdint>
#include <iterator>
#include <stack>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/layout.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/fusions/triton/triton_support_legacy.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/triton_fusion_analysis.h"
#include "xla/service/gpu/triton_tiling_propagation.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
bool HasDivisibleSuffixAllowingSplit(const absl::Span<int64_t const> span,
const int64_t divisor) {
CHECK_GE(divisor, 1);
int64_t product = 1;
for (auto it = span.crbegin(); it != span.crend(); ++it) {
product *= *it;
if (product % divisor == 0) {
return true;
}
if (divisor % product != 0) {
return false;
}
}
return false;
}
namespace {
void CopyIncrementingAboveThreshold(
const tsl::protobuf::RepeatedField<int64_t>& source,
tsl::protobuf::RepeatedField<int64_t>& destination, const int threshold) {
destination.Reserve(source.size());
for (int64_t x : source) {
if (x >= threshold) {
++x;
}
destination.Add(x);
}
}
void CopyIncrementingAboveThreshold(absl::Span<const int64_t> source,
DimensionVector& destination,
const int threshold) {
destination.reserve(source.size());
for (int64_t x : source) {
if (x >= threshold) {
++x;
}
destination.push_back(x);
}
}
absl::Status UncompilableMatmul(absl::string_view explanation) {
absl::Status s = absl::CancelledError(explanation);
s.SetPayload(kUncompilableFusion, absl::Cord(explanation));
return s;
}
absl::StatusOr<HloInstruction*> MakeSparseMetaOperand(
HloDotInstruction& dot, const TritonGemmConfig& config) {
CHECK_EQ(dot.sparse_operands(), 1);
CHECK_EQ(dot.sparsity().front().index(), 0);
HloInstruction* meta = dot.mutable_operand(2);
const Shape& shape = meta->shape();
if (shape.dimensions().back() % config.split_k != 0) {
return UncompilableMatmul("Sparsity metadata has incorrect shape.");
}
std::vector<int64_t> dimensions(shape.dimensions().begin(),
shape.dimensions().end() - 1);
dimensions.push_back(config.split_k);
dimensions.push_back(shape.dimensions().back() / config.split_k);
Shape new_shape = ShapeUtil::MakeShapeWithDescendingLayout(
shape.element_type(), dimensions);
return MakeBitcastHlo(meta, new_shape);
}
}
absl::StatusOr<HloInstruction*> MakeSplitKOperand(
HloInstruction& dot, const TritonFusionAnalysis& analysis,
const TritonGemmConfig& config, const int64_t contracting_dim_idx,
const int operand_number) {
HloInstruction* operand = dot.mutable_operand(operand_number);
const int64_t k = operand->shape().dimensions(contracting_dim_idx);
const bool need_padding = k % config.split_k != 0;
TritonFusionAnalysis::Scope scope = (operand_number == 0)
? TritonFusionAnalysis::Scope::LHS
: TritonFusionAnalysis::Scope::RHS;
auto check_if_supported = [&](const HloInstruction& hlo,
bool check_divisibility) {
const TensorIterationSpec::DimIterationSpec* spec =
analysis.IterSpec(scope, &hlo, contracting_dim_idx);
if (spec == nullptr) {
return absl::OkStatus();
}
if (spec->size() != 1) {
return UncompilableMatmul("Unsupported case.");
}
const TensorIterationSpec::IterationSpecFragment& fragment = spec->at(0);
if (fragment.is_sliced()) {
return UncompilableMatmul(
"Sliced contracting dimension is not supported yet.");
}
if (check_divisibility && !HasDivisibleSuffixAllowingSplit(
fragment.subfragments, config.split_k)) {
return UncompilableMatmul("Contracting dimension is too fragmented.");
}
if (config.split_k > ceil(1.0 * fragment.count / config.block_k)) {
return UncompilableMatmul(
"Too small divisible part of the contracting dimension.");
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(
check_if_supported(*operand, !need_padding));
for (const HloInstruction* param : analysis.ScopeParameters(scope)) {
TF_RETURN_IF_ERROR(
check_if_supported(*param, !need_padding));
}
if (need_padding) {
HloInstruction* const zero =
dot.parent()->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(operand->shape().element_type())));
PaddingConfig padding_config = MakeNoPaddingConfig(operand->shape().rank());
padding_config.mutable_dimensions(contracting_dim_idx)
->set_edge_padding_high(config.split_k - k % config.split_k);
TF_ASSIGN_OR_RETURN(HloInstruction * pad,
MakePadHlo(operand, zero, padding_config));
*pad->mutable_shape()->mutable_layout() = operand->shape().layout();
operand = pad;
}
CHECK_GE(operand->shape().dimensions(contracting_dim_idx), config.split_k);
const Shape& shape = operand->shape();
Shape new_shape(shape.element_type(), {}, {}, {});
for (int i = 0; i < shape.rank(); ++i) {
const int64_t dimension_size = shape.dimensions(i);
if (i == contracting_dim_idx) {
new_shape.add_dimensions(config.split_k);
new_shape.add_dimensions(dimension_size / config.split_k);
} else {
new_shape.add_dimensions(dimension_size);
}
}
Layout* new_layout = new_shape.mutable_layout();
for (int64_t logical_dim_idx : shape.layout().minor_to_major()) {
if (logical_dim_idx >= contracting_dim_idx) {
new_layout->add_minor_to_major(logical_dim_idx + 1);
}
if (logical_dim_idx <= contracting_dim_idx) {
new_layout->add_minor_to_major(logical_dim_idx);
}
}
return MakeBitcastHlo(operand, new_shape);
}
absl::Status MakeDotComputationSplitKBatch(
HloComputation* computation, const TritonGemmConfig& config,
bool disable_reduced_precision_reduction) {
HloDotInstruction* dot = Cast<HloDotInstruction>(
hlo_query::GetFirstInstructionWithOpcode(*computation, HloOpcode::kDot));
TF_ASSIGN_OR_RETURN(const auto analysis,
TritonFusionAnalysis::Execute(*computation));
const DotDimensionNumbers& old_dim_numbers = dot->dot_dimension_numbers();
DotDimensionNumbers new_dim_numbers;
TF_ASSIGN_OR_RETURN(const int64_t lhs_contracting_idx,
ContractingDimensionIndex(*dot, 0));
CopyIncrementingAboveThreshold(
old_dim_numbers.lhs_contracting_dimensions(),
*new_dim_numbers.mutable_lhs_contracting_dimensions(),
lhs_contracting_idx);
new_dim_numbers.mutable_lhs_batch_dimensions()->Add(lhs_contracting_idx);
CopyIncrementingAboveThreshold(
old_dim_numbers.lhs_batch_dimensions(),
*new_dim_numbers.mutable_lhs_batch_dimensions(), lhs_contracting_idx);
TF_ASSIGN_OR_RETURN(const int64_t rhs_contracting_idx,
ContractingDimensionIndex(*dot, 1));
CopyIncrementingAboveThreshold(
old_dim_numbers.rhs_contracting_dimensions(),
*new_dim_numbers.mutable_rhs_contracting_dimensions(),
rhs_contracting_idx);
new_dim_numbers.mutable_rhs_batch_dimensions()->Add(rhs_contracting_idx);
CopyIncrementingAboveThreshold(
old_dim_numbers.rhs_batch_dimensions(),
*new_dim_numbers.mutable_rhs_batch_dimensions(), rhs_contracting_idx);
if (dot->sparse_operands()) {
if (dot->sparsity().size() != 1 || dot->sparsity().front().index() != 0) {
return UncompilableMatmul("Sparsity is only supported on left operand.");
}
}
std::stack<HloInstruction*> to_process;
absl::flat_hash_set<HloInstruction*> to_process_set;
HloInstruction* current = dot;
do {
to_process.push(current);
CHECK(to_process_set.insert(current).second);
if (current->users().empty()) {
break;
}
CHECK_EQ(current->user_count(), 1);
current = current->users()[0];
if (!legacy_triton::IsDistributiveOverAddition(*current)) {
return Cancelled("Operation non-distributive over addition after dot.");
}
} while (true);
bool did_pad = false;
while (!to_process.empty()) {
HloInstruction* current = to_process.top();
to_process.pop();
HloInstruction* expanded;
if (current == dot) {
TF_ASSIGN_OR_RETURN(
HloInstruction * lhs,
MakeSplitKOperand(*dot, analysis, config, lhs_contracting_idx, 0));
TF_ASSIGN_OR_RETURN(
HloInstruction * rhs,
MakeSplitKOperand(*dot, analysis, config, rhs_contracting_idx, 1));
if (lhs->operand(0)->opcode() == HloOpcode::kPad) {
CHECK_EQ(rhs->operand(0)->opcode(), HloOpcode::kPad);
did_pad = true;
}
std::vector<SparsityDescriptor> sparsity(dot->sparsity().begin(),
dot->sparsity().end());
std::vector<HloInstruction*> sparse_meta(sparsity.size());
for (int i = 0; i < sparsity.size(); ++i) {
sparsity[i].set_dimension(sparsity[i].dimension() + 1);
TF_ASSIGN_OR_RETURN(sparse_meta[i],
MakeSparseMetaOperand(*dot, config));
}
expanded = MakeDotHlo(lhs, rhs, new_dim_numbers, dot->precision_config(),
dot->shape().element_type(), sparsity, sparse_meta)
.value();
expanded->mutable_shape()->mutable_layout()->clear_minor_to_major();
CopyIncrementingAboveThreshold(dot->shape().layout().minor_to_major(),
*expanded->mutable_shape()
->mutable_layout()
->mutable_minor_to_major(),
0);
expanded->mutable_shape()->mutable_layout()->add_minor_to_major(0);
dot->SetupDerivedInstruction(expanded);
} else {
expanded = computation->AddInstruction(current->CloneWithNewShape(
ShapeUtil::PrependMajorDimension(config.split_k, current->shape())));
if (expanded->opcode() == HloOpcode::kTranspose) {
const auto* old_transpose = Cast<HloTransposeInstruction>(current);
auto* new_transpose = Cast<HloTransposeInstruction>(expanded);
new_transpose->mutable_dimensions()->clear();
new_transpose->mutable_dimensions()->reserve(
new_transpose->shape().rank());
new_transpose->mutable_dimensions()->push_back(0);
for (const int64_t dim : old_transpose->dimensions()) {
new_transpose->mutable_dimensions()->push_back(dim + 1);
}
}
}
TF_RETURN_IF_ERROR(current->ReplaceAllUsesWithDifferentShape(expanded));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(current));
if (current == dot) {
continue;
}
for (int i = 0; i < expanded->operands().size(); ++i) {
HloInstruction* operand = expanded->mutable_operand(i);
if (!to_process_set.contains(operand)) {
std::vector<int64_t> broadcast_dimensions(operand->shape().rank());
absl::c_iota(broadcast_dimensions, 1);
TF_RETURN_IF_ERROR(expanded->ReplaceOperandWithDifferentShape(
i, MakeBroadcastHlo(operand, broadcast_dimensions,
ShapeUtil::PrependMajorDimension(
config.split_k, operand->shape()))));
}
}
}
if (disable_reduced_precision_reduction) {
PrimitiveType output_type =
computation->root_instruction()->shape().element_type();
PrimitiveType accumulator_type = output_type == PrimitiveType::F64
? PrimitiveType::F64
: PrimitiveType::F32;
computation->root_instruction()->mutable_shape()->set_element_type(
accumulator_type);
}
if (did_pad) {
TF_RETURN_IF_ERROR(
TritonFusionAnalysis::Execute(*computation, config.split_k).status());
}
return absl::OkStatus();
}
absl::Status MakeDotSplitKBatch(HloInstruction* dot_fusion,
const TritonGemmConfig& config) {
CHECK_EQ(dot_fusion->opcode(), HloOpcode::kFusion);
if (dot_fusion->shape().IsTuple()) {
return Unimplemented("Tuple output is not supported with split-K yet.");
}
const bool disable_reduced_precision_reduction =
dot_fusion->GetModule()
->config()
.debug_options()
.xla_gpu_triton_gemm_disable_reduced_precision_reduction();
const PrimitiveType output_type = dot_fusion->shape().element_type();
const Layout output_layout = dot_fusion->shape().layout();
TF_RETURN_IF_ERROR(MakeDotComputationSplitKBatch(
dot_fusion->fused_instructions_computation(), config,
disable_reduced_precision_reduction));
const HloInstruction* root = dot_fusion->fused_expression_root();
*dot_fusion->mutable_shape() = root->shape();
HloInstruction* zero =
dot_fusion->parent()->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(root->shape().element_type())));
TF_ASSIGN_OR_RETURN(HloInstruction * reduce,
MakeReduceHlo(dot_fusion, zero, {0},
HloOpcode::kAdd, &dot_fusion->metadata()));
*reduce->mutable_shape()->mutable_layout() = output_layout;
if (dot_fusion->IsRoot()) {
dot_fusion->parent()->set_root_instruction(reduce,
true);
} else {
TF_RETURN_IF_ERROR(dot_fusion->ReplaceAllUsesWithDifferentShape(reduce));
}
if (disable_reduced_precision_reduction) {
HloInstruction* convert = MakeConvertToHlo(reduce, output_type);
if (reduce->IsRoot()) {
reduce->parent()->set_root_instruction(convert,
true);
} else {
TF_RETURN_IF_ERROR(reduce->ReplaceAllUsesWithDifferentShape(convert));
}
}
return absl::OkStatus();
}
}
} | #include "xla/service/gpu/split_k_gemm_rewriter.h"
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/triton_fusion_analysis.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/layout_assignment.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::testing::FieldsAre;
namespace m = ::xla::match;
TEST(HasDivisibleSuffixAllowingSplitTest, AllTests) {
EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({1}, 1));
EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({2}, 2));
EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({2, 2}, 2));
EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({3, 2}, 6));
EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({2, 3, 2}, 6));
EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({15, 2}, 6));
EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({3, 15, 2}, 6));
EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({}, 1));
EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({1}, 2));
EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({3}, 2));
EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({2, 3}, 2));
}
using SplitKTest = HloTestBase;
TEST_F(SplitKTest, MakeSplitK) {
const std::string hlo_text = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[3,128,5,32]{3,2,1,0} parameter(0)
bitcast.1 = s8[3,5,32,128]{2,1,3,0} bitcast(parameter_0)
copy.1 = s8[3,5,32,128]{3,2,1,0} copy(bitcast.1)
reshape.5 = s8[480,128]{1,0} reshape(copy.1)
convert.8 = bf16[480,128]{1,0} convert(reshape.5)
parameter_1 = bf16[16,128]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[3,128,5,32]{3,2,1,0} parameter(0)
p1 = bf16[16,128]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm",
metadata={op_name="foo"}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kReduce);
EXPECT_EQ(root->metadata().op_name(), "foo");
}
TEST_F(SplitKTest, MakeSplitKWithOutputFusion) {
const std::string hlo_text = R"(
HloModule t
triton_gemm_dot {
p0 = f16[480,128]{1,0} parameter(0)
p1 = f16[16,128]{1,0} parameter(1)
d = f16[480,16]{1,0} dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
c = bf16[] constant(123)
n = bf16[] negate(c)
bc = bf16[480,16]{1,0} broadcast(n)
cv = bf16[480,16]{1,0} convert(d)
ROOT a = bf16[480,16]{1,0} multiply(bc, cv)
}
ENTRY e {
p0 = f16[480,128]{1,0} parameter(0)
p1 = f16[16,128]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduce);
}
TEST_F(SplitKTest, PreventSplitKWithNonDistributiveOperations) {
const std::string hlo_text = R"(
HloModule t
triton_gemm_dot {
p0 = f16[480,128]{1,0} parameter(0)
p1 = f16[16,128]{1,0} parameter(1)
d = f16[480,16]{1,0} dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
c = f32[480,16]{1,0} convert(d)
ROOT s = f32[480,16]{1,0} tanh(c)
}
ENTRY e {
p0 = f16[480,128]{1,0} parameter(0)
p1 = f16[16,128]{1,0} parameter(1)
ROOT fusion = f32[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
EXPECT_THAT(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config),
tsl::testing::StatusIs(
tsl::error::CANCELLED,
absl::StrFormat(
"Operation non-distributive over addition after dot.")));
}
TEST_F(SplitKTest, MakeSplitKWithNonDivisibleDimensionSize) {
constexpr absl::string_view kHloText = R"(
t {
c1 = s32[] constant(1)
bc1 = s32[31]{0} broadcast(c1), dimensions={}
p0 = s32[31]{0} parameter(0)
cmp = pred[31]{0} compare(bc1, p0), direction=EQ
cvt = f32[31]{0} convert(cmp)
bc2 = f32[17,31]{1,0} broadcast(cvt), dimensions={1}
c0 = f32[] constant(0)
bc0 = f32[17,16]{1,0} broadcast(c0), dimensions={}
ROOT dot = f32[31,16]{1,0} dot(bc2, bc0),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = s32[31]{0} parameter(0)
ROOT r = f32[31,16]{1,0} fusion(p0),
kind=kCustom, calls=t, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 2, 1, 2);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
}
TEST_F(SplitKTest, AvoidSplitKWithSlicedContractingDimension) {
const std::string hlo_text = R"(
t {
p0 = f16[32,1234] parameter(0)
s0 = f16[32,256] slice(p0), slice={[0:32], [41:297]}
p1 = f16[256,768] parameter(1)
ROOT d = f16[32,768] dot(s0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f16[32,1234] parameter(0)
p1 = f16[256,768] parameter(1)
ROOT r = f16[32,768] fusion(p0, p1),
kind=kCustom, calls=t, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 2, 1, 2);
EXPECT_THAT(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config),
tsl::testing::StatusIs(
tsl::error::CANCELLED,
absl::StrFormat(
"Sliced contracting dimension is not supported yet.")));
}
TEST_F(SplitKTest, MakeSplitKWithNonStandardOutputLayout) {
const std::string kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[3,128,5,32]{3,2,1,0} parameter(0)
bitcast.1 = s8[3,5,32,128]{2,1,3,0} bitcast(parameter_0)
copy.1 = s8[3,5,32,128]{3,2,1,0} copy(bitcast.1)
reshape.5 = s8[480,128]{1,0} reshape(copy.1)
convert.8 = bf16[480,128]{1,0} convert(reshape.5)
parameter_1 = bf16[16,128]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{0,1} dot(convert.8, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[3,128,5,32]{3,2,1,0} parameter(0)
p1 = bf16[16,128]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{0,1} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduce);
EXPECT_EQ(module->entry_computation()->root_instruction()->shape().layout(),
Layout({0, 1}));
}
TEST_F(SplitKTest, MakeSplitKWithExistingBatchDim) {
const std::string hlo_text = R"(
HloModule m
triton_gemm_dot.24 {
parameter_1 = bf16[1,1,800,5,128]{4,3,2,1,0} parameter(1)
bitcast.3 = bf16[800,5,128]{2,1,0} bitcast(parameter_1)
convert.3 = f32[800,5,128]{2,1,0} convert(bitcast.3)
parameter_0 = f32[1,5,700,800]{3,2,1,0} parameter(0)
bitcast.2 = f32[5,700,800]{2,1,0} bitcast(parameter_0)
ROOT dot.26 = f32[5,128,700]{2,1,0} dot(convert.3, bitcast.2),
lhs_batch_dims={1}, lhs_contracting_dims={0},
rhs_batch_dims={0}, rhs_contracting_dims={2}
}
ENTRY e {
tmp_3 = f32[1,5,700,800]{3,2,1,0} parameter(0)
tmp_0 = bf16[1,1,800,5,128]{4,3,2,1,0} parameter(1)
ROOT triton_gemm_dot.24 = f32[5,128,700]{2,1,0} fusion(tmp_3, tmp_0),
kind=kCustom, calls=triton_gemm_dot.24,
backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(32, 64, 64, 8, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduce);
}
TEST_F(SplitKTest, SupportsIndivisible) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[3,129,5,32]{3,2,1,0} parameter(0)
bitcast.1 = s8[3,5,32,129]{2,1,3,0} bitcast(parameter_0)
copy.1 = s8[3,5,32,129]{3,2,1,0} copy(bitcast.1)
reshape.5 = s8[480,129]{1,0} reshape(copy.1)
convert.8 = bf16[480,129]{1,0} convert(reshape.5)
parameter_1 = bf16[16,129]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[3,129,5,32]{3,2,1,0} parameter(0)
p1 = bf16[16,129]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
}
TEST_F(SplitKTest, SupportsIndivisibleSimpleSplitK4) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[480,129]{1,0} parameter(0)
convert_0 = bf16[480,129]{1,0} convert(parameter_0)
parameter_1 = bf16[16,129]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(convert_0, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[480,129]{1,0} parameter(0)
p1 = bf16[16,129]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
}
TEST_F(SplitKTest, SupportsIndivisibleWithCustomLayout) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[480,129]{0,1} parameter(0)
convert_0 = bf16[480,129]{0,1} convert(parameter_0)
parameter_1 = bf16[16,129]{0,1} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(convert_0, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[480,129]{0,1} parameter(0)
p1 = bf16[16,129]{0,1} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
constexpr TritonGemmConfig kConfig(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), kConfig));
TF_EXPECT_OK(HloVerifier(true,
true,
LayoutAssignment::InstructionCanChangeLayout)
.Run(module.get())
.status());
}
TEST_F(SplitKTest, SupportsIndivisibleSimpleSplitK16) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[480,255]{1,0} parameter(0)
convert_0 = bf16[480,255]{1,0} convert(parameter_0)
parameter_1 = bf16[16,255]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(convert_0, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[480,255]{1,0} parameter(0)
p1 = bf16[16,255]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 16, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
}
TEST_F(SplitKTest, SupportsIndivisibleWithTranspose) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[480,255]{1,0} parameter(0)
convert_0 = bf16[480,255]{1,0} convert(parameter_0)
transpose_0 = bf16[255,480]{1,0} transpose(convert_0), dimensions={1,0}
parameter_1 = bf16[16,255]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(transpose_0, parameter_1),
lhs_contracting_dims={0}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[480,255]{1,0} parameter(0)
p1 = bf16[16,255]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 16, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
}
TEST_F(SplitKTest, SupportIndivisibleWithBroadcast) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[] parameter(0)
convert_0 = bf16[] convert(parameter_0)
broadcast_0 = bf16[480,255]{1,0} broadcast(convert_0)
parameter_1 = bf16[16,255]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(broadcast_0, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[] parameter(0)
p1 = bf16[16,255]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 16, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
}
TEST_F(SplitKTest, SupportsIndivisibleWithBitcast) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[3,5,480,17]{3,0,1,2} parameter(0)
convert_0 = bf16[3,5,480,17]{3,0,1,2} convert(parameter_0)
bitcast_0 = bf16[480,255]{1,0} bitcast(convert_0)
parameter_1 = bf16[16,255]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(bitcast_0, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[3,5,480,17]{3,0,1,2} parameter(0)
p1 = bf16[16,255]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 16, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
}
TEST_F(SplitKTest, SkipSmallK) {
const std::string hlo_text = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[3,64,5,32]{3,2,1,0} parameter(0)
bitcast.1 = s8[3,5,32,64]{2,1,3,0} bitcast(parameter_0)
copy.1 = s8[3,5,32,64]{3,2,1,0} copy(bitcast.1)
reshape.5 = s8[480,64]{1,0} reshape(copy.1)
convert.8 = bf16[480,64]{1,0} convert(reshape.5)
parameter_1 = bf16[16,64]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[3,64,5,32]{3,2,1,0} parameter(0)
p1 = bf16[16,64]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 128, 4, 1, 4);
EXPECT_THAT(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config),
tsl::testing::StatusIs(
tsl::error::CANCELLED,
"Too small divisible part of the contracting dimension."));
}
TEST_F(SplitKTest, FragmentedKSupported) {
const std::string hlo_text = R"(
HloModule t
triton_gemm_dot {
p0 = f16[7,2,16,4,20] parameter(0)
t0 = f16[2,16,4,20,7] transpose(p0), dimensions={1,2,3,4,0}
b0 = f16[2560,7] bitcast(t0)
a1 = f16[2560,5] parameter(1)
ROOT r = f16[7,5] dot(b0, a1),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f16[7,2,16,4,20] parameter(0)
p1 = f16[2560,5] parameter(1)
ROOT fusion = f16[7,5] fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(32, 32, 16, 1, 1, 4);
config.split_k = 5;
EXPECT_THAT(
MakeDotSplitKBatch(module->entry_computation()->root_instruction(),
config),
tsl::testing::StatusIs(tsl::error::CANCELLED,
"Contracting dimension is too fragmented."));
config.split_k = 8;
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kReduce);
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
TF_ASSERT_OK_AND_ASSIGN(
const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation, config.split_k));
EXPECT_EQ(dot_computation->root_instruction()->shape(),
ShapeUtil::MakeShapeWithDescendingLayout(F16, {8, 7, 5}));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(1, 2560, 0,
2560,
ElementsAre(20, 4, 4, 4, 2))));
}
TEST_F(SplitKTest, FragmentedKUnsupported) {
const std::string hlo_text = R"(
HloModule t
triton_gemm_dot {
p0 = f32[3,128,77] parameter(0)
b0 = f32[384,77] bitcast(p0)
a1 = f32[384,25] parameter(1)
ROOT r = f32[77,25] dot(b0, a1),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f32[3,128,77] parameter(0)
p1 = f32[384,25] parameter(1)
ROOT fusion = f32[77,25] fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
EXPECT_THAT(
MakeDotSplitKBatch(module->entry_computation()->root_instruction(),
config),
tsl::testing::StatusIs(tsl::error::CANCELLED,
"Contracting dimension is too fragmented."));
}
TEST_F(SplitKTest, MakeSplitKWithNonDefaultOutputLayout) {
const std::string kHloText = R"(
triton_gemm_dot.4842_computation {
parameter_0 = bf16[96,96]{1,0} parameter(0)
parameter_1 = bf16[96,7]{1,0} parameter(1)
dot.0 = bf16[96,7]{0,1} dot(parameter_0, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT bitcast.2 = bf16[7,3,32]{2,1,0} bitcast(dot.0)
}
ENTRY e {
parameter_0.91 = bf16[96,96]{1,0} parameter(0)
parameter_1.86 = bf16[96,7]{1,0} parameter(1)
ROOT triton_gemm_dot.4842 = bf16[7,3,32]{2,1,0}
fusion(parameter_0.91, parameter_1.86), kind=kCustom,
calls=triton_gemm_dot.4842_computation
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 2, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduce);
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
}
TEST_F(SplitKTest, SparseDotWithLhsSparseOperandIsRewritten) {
const std::string hlo_text = R"(
HloModule test
triton_gemm {
lhs = f16[2,5,1600] parameter(0)
rhs = f16[2,3200,10] parameter(1)
meta = u16[2,5,200] parameter(2)
ROOT dot = f32[2,5,10] dot(lhs, rhs, meta),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}, sparsity=L.2@2:4
}
ENTRY e {
lhs = f16[2,5,1600] parameter(0)
rhs = f16[2,3200,10] parameter(1)
meta = u16[2,5,200] parameter(2)
ROOT fusion = f32[2,5,10] fusion(lhs, rhs, meta),
kind=kCustom, calls=triton_gemm, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 4, 1, 1);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kReduce);
HloInstruction* dot =
module->GetComputationWithName("triton_gemm")->root_instruction();
EXPECT_EQ(dot->operand(0)->shape(),
ShapeUtil::MakeShapeWithDescendingLayout(F16, {2, 5, 4, 400}));
EXPECT_EQ(dot->operand(1)->shape(),
ShapeUtil::MakeShapeWithDescendingLayout(F16, {2, 4, 800, 10}));
EXPECT_EQ(dot->operand(2)->shape(),
ShapeUtil::MakeShapeWithDescendingLayout(U16, {2, 5, 4, 50}));
}
TEST_F(SplitKTest, SparseDotWithRhsSparseOperandTriggersError) {
const std::string hlo_text = R"(
HloModule test
triton_gemm {
lhs = f16[2,5,3200] parameter(0)
rhs = f16[2,1600,10] parameter(1)
meta = u16[2,200,10] parameter(2)
ROOT dot = f32[2,5,10] dot(lhs, rhs, meta),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}, sparsity=R.1@2:4
}
ENTRY e {
lhs = f16[2,5,3200] parameter(0)
rhs = f16[2,1600,10] parameter(1)
meta = u16[2,200,10] parameter(2)
ROOT fusion = f32[2,5,10] fusion(lhs, rhs, meta),
kind=kCustom, calls=triton_gemm, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 4, 1, 1);
auto result = MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config);
EXPECT_FALSE(result.ok());
}
class SplitKTestWithMorePreciseReduction
: public HloTestBase,
public ::testing::WithParamInterface<int> {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_gpu_triton_gemm_disable_reduced_precision_reduction(
true);
return debug_options;
}
};
TEST_F(SplitKTestWithMorePreciseReduction, MakeSplitK) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[3,128,5,32]{3,2,1,0} parameter(0)
bitcast.1 = s8[3,5,32,128]{2,1,3,0} bitcast(parameter_0)
copy.1 = s8[3,5,32,128]{3,2,1,0} copy(bitcast.1)
reshape.5 = s8[480,128]{1,0} reshape(copy.1)
convert.8 = bf16[480,128]{1,0} convert(reshape.5)
parameter_1 = bf16[16,128]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[3,128,5,32]{3,2,1,0} parameter(0)
p1 = bf16[16,128]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Convert(m::Reduce(m::Fusion(), m::Constant()))));
}
TEST_F(SplitKTestWithMorePreciseReduction, MakeSplitKWithOutputFusion) {
const std::string hlo_text = R"(
HloModule t
triton_gemm_dot {
p0 = f16[480,128]{1,0} parameter(0)
p1 = f16[16,128]{1,0} parameter(1)
d = f16[480,16]{1,0} dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
c = bf16[] constant(123)
n = bf16[] negate(c)
bc = bf16[480,16]{1,0} broadcast(n)
cv = bf16[480,16]{1,0} convert(d)
ROOT a = bf16[480,16]{1,0} multiply(bc, cv)
}
ENTRY e {
p0 = f16[480,128]{1,0} parameter(0)
p1 = f16[16,128]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Convert(m::Reduce(m::Fusion(), m::Constant()))));
}
TEST_F(SplitKTest, MakeSplitKWithTransposeAfterDot) {
const std::string hlo_text = R"(
triton_gemm_dot {
p0 = f16[8,288,288]{2,1,0} parameter(0)
p1 = f16[8,288,32]{2,0,1} parameter(1)
d = f16[8,288,32]{2,1,0} dot(p0, p1),
lhs_batch_dims={0}, lhs_contracting_dims={2},
rhs_batch_dims={0}, rhs_contracting_dims={1}
ROOT t = f16[288,8,32]{2,1,0} transpose(d), dimensions={1,0,2}
}
ENTRY e {
p0 = f16[8,288,288]{2,1,0} parameter(0)
p1 = f16[8,288,32]{2,0,1} parameter(1)
ROOT fusion = f16[288,8,32]{2,1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 128, 32, 8, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
const auto* transpose =
Cast<HloTransposeInstruction>(module->entry_computation()
->root_instruction()
->operand(0)
->fused_instructions_computation()
->root_instruction());
EXPECT_THAT(transpose->dimensions(), ElementsAre(0, 2, 1, 3));
}
TEST_F(SplitKTest, MakeSplitKWithTrivialDimension) {
const std::string hlo_text = R"(
triton_gemm_dot {
parameter_0 = f32[1001,1]{1,0} parameter(0)
parameter_1 = f32[1001,2048]{1,0} parameter(1)
ROOT dot = f32[1,2048]{1,0} dot(parameter_0, parameter_1),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY %entry_computation {
p0 = f32[1001,1]{1,0} parameter(0)
p1 = f32[1001,2048]{1,0} parameter(1)
ROOT fusion = f32[1,2048]{1,0} fusion(p0, p1), kind=kCustom,
calls=triton_gemm_dot
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 128, 64, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Reduce(m::Fusion(), m::Constant())));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/split_k_gemm_rewriter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/split_k_gemm_rewriter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9c586ff2-998f-46bb-8cd1-365898f411cf | cpp | tensorflow/tensorflow | triton_fusion_analysis | third_party/xla/xla/service/gpu/triton_fusion_analysis.cc | third_party/xla/xla/service/gpu/triton_fusion_analysis_test.cc | #include "xla/service/gpu/triton_fusion_analysis.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/gpu/cudnn_support_utils.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/triton_tiling_propagation.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tools/hlo_decomposer.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using triton_fusion::DimOrdersAndReqs;
using triton_fusion::DimOrdersAndReqsOrError;
using triton_fusion::DotRequirements;
using triton_fusion::FusionContext;
using triton_fusion::GetPropagatedDimOrdersAndRequirements;
using triton_fusion::kNoSplitRequirement;
using triton_fusion::TransformDirection;
}
namespace triton_fusion {
absl::StatusOr<FusionContext> FusionContext::FromDotOperand(
const HloInstruction& dot, const int operand_number, const int split_k) {
const int num_split_k_batch_dims = split_k > 1;
int split_k_dimension_index = kNoDimensionIndex;
TF_ASSIGN_OR_RETURN(int contracting_dimension_index,
ContractingDimensionIndex(dot, operand_number));
TF_ASSIGN_OR_RETURN(int non_contracting_dimension_index,
NonContractingDimensionIndex(dot, operand_number));
if (split_k > 1) {
split_k_dimension_index = contracting_dimension_index - 1;
}
int splittable_dimension_index = kNoDimensionIndex;
if (operand_number == 0 &&
dot.dot_dimension_numbers().lhs_batch_dimensions_size() -
num_split_k_batch_dims ==
0) {
splittable_dimension_index = non_contracting_dimension_index;
}
FusionContext context(DotProperties{non_contracting_dimension_index,
splittable_dimension_index},
DotRequirements(kNoSplitRequirement));
context.dim_orders_[dot.operand(operand_number)] =
DimensionOrder::FromDotOperandOrOutput(*dot.operand(operand_number),
split_k_dimension_index);
return context;
}
FusionContext FusionContext::FromDotOutput(
const HloInstruction& dot, const int split_k,
DotRequirements requirements) {
int splittable_dimension_index = kNoDimensionIndex;
if (requirements.splittable_dimension_major_part_size > 1) {
splittable_dimension_index = (split_k > 1) ? 1 : 0;
}
FusionContext context(DotProperties{-1,
splittable_dimension_index},
std::move(requirements));
context.dim_orders_[&dot] = DimensionOrder::FromDotOperandOrOutput(dot);
return context;
}
namespace {
int64_t NumAddedParameters(const HloInstruction& hlo) {
if (hlo.opcode() == HloOpcode::kConstant &&
!ShapeUtil::IsScalar(hlo.shape())) {
return 0;
}
return hlo.operand_count() - 1;
}
}
bool FusionContext::CombineDimOrdersAndReqs(const DimOrdersAndReqs& update) {
for (const auto& [key, value] : update.dim_orders) {
auto it = dim_orders_.find(key);
if (it != dim_orders_.cend() && !it->second.IsPhysicallyEquivalent(value)) {
return false;
}
}
DotRequirementsOrError requirements_or_error =
CombineDotRequirements(requirements_, update.requirements);
if (std::holds_alternative<FusionDecision>(requirements_or_error)) {
return false;
}
requirements_ = std::move(std::get<DotRequirements>(requirements_or_error));
dim_orders_.insert(update.dim_orders.begin(), update.dim_orders.end());
return true;
}
absl::Status FusionContext::PropagateDimensionOrdersToParameters(
const HloInstruction& origin, ConstHloInstructionSet& parameters,
ConstHloInstructionMap<TensorIterationSpec>& iter_specs) {
absl::flat_hash_set<const HloInstruction*> visited;
std::queue<const HloInstruction*> to_process;
visited.insert(&origin);
to_process.push(&origin);
while (!to_process.empty()) {
const HloInstruction* hlo = to_process.front();
to_process.pop();
if (hlo->opcode() == HloOpcode::kParameter) {
if (!parameters.insert(hlo).second) {
return FailedPrecondition(
"A parameter is read differently by different users. hlo: %s",
hlo->ToString());
}
VLOG(5) << hlo->ToString();
}
DimOrdersAndReqsOrError result = GetPropagatedDimOrdersAndRequirements(
*hlo, dim_orders_.at(hlo), TransformDirection::kOutputToInput,
properties_);
if (!std::holds_alternative<DimOrdersAndReqs>(result)) {
return FailedPrecondition(
"Can not propagate dim orders and requirements.");
}
if (!CombineDimOrdersAndReqs(std::get<DimOrdersAndReqs>(result))) {
return FailedPrecondition("Can not combine dim orders and requirements.");
}
iter_specs[hlo] = dim_orders_.at(hlo).ToTensorIterationSpec();
for (const HloInstruction* operand : hlo->operands()) {
if (!visited.insert(operand).second) {
continue;
}
if (operand->opcode() == HloOpcode::kDot) {
continue;
}
to_process.push(operand);
}
}
return absl::OkStatus();
}
}
absl::StatusOr<TritonFusionAnalysis> TritonFusionAnalysis::Execute(
const HloComputation& computation, const int split_k) {
VLOG(5) << computation.ToString(HloPrintOptions::ShortParsable());
TritonFusionAnalysis analysis;
const HloInstruction* dot =
hlo_query::GetFirstInstructionWithOpcode(computation, HloOpcode::kDot);
TF_RET_CHECK(dot != nullptr);
TF_RETURN_IF_ERROR(analysis.ExecuteForDotFusion(*dot, split_k));
return analysis;
}
absl::StatusOr<TritonFusionAnalysis> TritonFusionAnalysis::Execute(
const HloDotInstruction& dot, int split_k) {
TritonFusionAnalysis analysis;
TF_RETURN_IF_ERROR(analysis.ExecuteForDotFusion(dot, split_k));
return analysis;
}
absl::Status TritonFusionAnalysis::ExecuteForProducerConsumer(
const HloInstruction& producer, const HloInstruction& consumer,
int split_k) {
std::unique_ptr<HloModule> new_module =
ExtractProducerConsumerIntoNewModule(producer, consumer);
auto* new_producer =
new_module->entry_computation()->GetInstructionWithName(producer.name());
auto* new_consumer =
new_module->entry_computation()->GetInstructionWithName(consumer.name());
std::unique_ptr<HloInstruction> fusion_instruction_holder;
HloInstruction* fusion_instruction;
if (new_consumer->opcode() == HloOpcode::kFusion) {
fusion_instruction = new_consumer;
} else {
fusion_instruction_holder = HloInstruction::CreateFusion(
new_consumer->shape(), new_producer->fusion_kind(), new_consumer);
fusion_instruction = fusion_instruction_holder.get();
}
if (new_producer->opcode() == HloOpcode::kFusion) {
fusion_instruction->MergeFusionInstruction(new_producer);
} else {
fusion_instruction->FuseInstruction(new_producer);
}
auto* fused_computation =
fusion_instruction->fused_instructions_computation();
return Execute(*fused_computation, split_k).status();
}
bool TritonFusionAnalysis::IsBatchDimMinorForInt4Parameter(
const HloInstruction& dot, Scope scope) const {
CHECK(scope == Scope::LHS || scope == Scope::RHS);
const auto& dims = dot.dot_dimension_numbers();
const auto& batch_dims = (scope == Scope::LHS) ? dims.lhs_batch_dimensions()
: dims.rhs_batch_dimensions();
if (batch_dims.empty()) return true;
int32_t batch_dim = batch_dims.Get(0);
CHECK_EQ(batch_dims.size(), 1);
const auto& params = parameters_.at(scope);
for (const auto& param : params) {
if (param->shape().element_type() != S4) continue;
const auto* strides = IterSpec(scope, param, batch_dim);
if (strides == nullptr) continue;
if (strides->front().stride == 1) return false;
}
return true;
}
absl::Status TritonFusionAnalysis::ExecuteForDotFusion(
const HloInstruction& dot, const int split_k) {
DotRequirements lhs_requirements(kNoSplitRequirement);
for (const Scope scope : {Scope::LHS, Scope::RHS, Scope::META}) {
const int operand_number = static_cast<int>(scope);
if (dot.operand_count() < operand_number + 1) {
continue;
}
TF_ASSIGN_OR_RETURN(auto context, FusionContext::FromDotOperand(
dot, operand_number, split_k));
TF_RETURN_IF_ERROR(context.PropagateDimensionOrdersToParameters(
*dot.operand(operand_number), parameters_[scope], iter_specs_[scope]));
if (scope == Scope::LHS) {
lhs_requirements = context.requirements();
}
}
auto context = FusionContext::FromDotOutput(dot, split_k, lhs_requirements);
const HloInstruction* output = ˙
while (!output->IsRoot()) {
TF_RET_CHECK(output->user_count() == 1);
const HloInstruction* input = output;
if (IsWorkspaceAllocationRoot(*output->users()[0])) {
break;
}
output = output->users()[0];
DimOrdersAndReqsOrError result = GetPropagatedDimOrdersAndRequirements(
*output, context.dim_orders().at(input),
TransformDirection::kInputToOutput, context.dot_properties());
if (std::holds_alternative<FusionDecision>(result)) {
auto decision = std::get<FusionDecision>(result);
return FailedPrecondition("Failed to propagate tiling with error: %s",
decision.Explain());
}
TF_RET_CHECK(
context.CombineDimOrdersAndReqs(std::get<DimOrdersAndReqs>(result)));
}
TF_RET_CHECK(
iter_specs_[Scope::OUTPUT]
.insert(
{output, context.dim_orders().at(output).ToTensorIterationSpec()})
.second);
parameters_[Scope::OUTPUT] = {};
if (output != &dot) {
TF_RETURN_IF_ERROR(context.PropagateDimensionOrdersToParameters(
*output, parameters_[Scope::OUTPUT], iter_specs_[Scope::OUTPUT]));
}
return absl::OkStatus();
}
std::optional<TritonFusionAnalysis::Scope>
TritonFusionAnalysis::QueryInstructionScope(const HloInstruction& hlo) const {
for (const Scope& scope : {Scope::LHS, Scope::RHS, Scope::OUTPUT}) {
if (iter_specs_.at(scope).count(&hlo) > 0) {
return scope;
}
}
LOG(WARNING) << "No scope for hlo: " << hlo.ToString();
return std::nullopt;
}
const TensorIterationSpec::DimIterationSpec* TritonFusionAnalysis::IterSpec(
const TritonFusionAnalysis::Scope scope, const HloInstruction* hlo,
const int dimension) const {
auto hlo_spec = iter_specs_.at(scope).find(hlo);
if (hlo_spec != iter_specs_.at(scope).cend()) {
return hlo_spec->second.Find(dimension);
}
return nullptr;
}
namespace {
std::string IterationSpecByInstructionMapToString(
const TritonFusionAnalysis::IterationSpecByInstructionMap& m) {
return absl::StrCat("IterSpec{",
absl::StrJoin(m, ", ",
[&](std::string* s, const auto& kv) {
absl::StrAppend(s, kv.first->name(), ": ",
kv.second.ToString());
}),
"}");
}
std::string ScopeToString(TritonFusionAnalysis::Scope s) {
switch (s) {
case TritonFusionAnalysis::Scope::LHS:
return "LHS";
case TritonFusionAnalysis::Scope::RHS:
return "RHS";
case TritonFusionAnalysis::Scope::META:
return "META";
case TritonFusionAnalysis::Scope::OUTPUT:
return "OUTPUT";
}
}
}
std::string TritonFusionAnalysis::ToString() const {
return absl::StrCat(
"TritonFusionAnalysis{\n",
absl::StrJoin(iter_specs_, ",\n",
[&](std::string* s, const auto& kv) {
absl::StrAppend(
s, ScopeToString(kv.first), ": ",
IterationSpecByInstructionMapToString(kv.second));
}),
"\n}");
}
}
} | #include "xla/service/gpu/triton_fusion_analysis.h"
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/transforms/gemm_fusion.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::testing::FieldsAre;
using TritonDotAnalysisTest = HloTestBase;
TEST_F(TritonDotAnalysisTest, QueryingOutputScopeParametersAlwaysWorks) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_dot {
p0 = f32[8,8] parameter(0)
ROOT dot = f32[8,8] dot(p0, p0),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f32[8,8] parameter(0)
ROOT r = f32[8,8] fusion(p0), kind=kCustom, calls=triton_dot
})"));
TF_ASSERT_OK_AND_ASSIGN(
const auto analysis,
TritonFusionAnalysis::Execute(*module->entry_computation()
->root_instruction()
->called_computations()[0]));
EXPECT_TRUE(
analysis.ScopeParameters(TritonFusionAnalysis::Scope::OUTPUT).empty());
}
TEST_F(TritonDotAnalysisTest, NopBitcasts) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
param_0.1 = s8[48,4]{1,0} parameter(0)
bitcast.18 = s8[1,48,4]{2,1,0} bitcast(param_0.1)
bitcast.19 = s8[48,4]{1,0} bitcast(bitcast.18)
convert.4 = bf16[48,4]{1,0} convert(bitcast.19)
param_1.1 = bf16[4,3]{1,0} parameter(1)
ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = s8[48,4]{1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
custom-call = bf16[48,3]{1,0} custom-call(p0, p1),
custom_call_target="__triton",
called_computations={triton_dot}
ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(4, 48, 0,
48, ElementsAre(48))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(1, 4, 0,
4, ElementsAre(4))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(3, 4, 0,
4, ElementsAre(4))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 3, 0,
3, ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, DoNotRemoveTrivialDimensionForDot) {
const std::string hlo_text = R"(
HloModule t, is_scheduled=true
triton_dot {
param_0.1 = f32[137,115]{1,0} parameter(0)
param_1.1 = f32[1,115]{1,0} parameter(1)
ROOT dot = f32[137,1]{1,0} dot(param_0.1, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = f32[137,115]{1,0} parameter(0)
p1 = f32[1,115]{1,0} parameter(1)
ROOT custom-call = f32[137,1]{1,0} fusion(p0, p1), kind=kCustom,
calls=triton_dot,
backend_config={"fusion_backend_config": {kind: "__triton_gemm",
triton_gemm_config: {"block_m":16,"block_n":64,"block_k":32,
"split_k":1,"num_stages":1,"num_warps":2,
"num_ctas":1}}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(115, 137, 0,
137, ElementsAre(137))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(1, 115, 0,
115, ElementsAre(115))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(115, 1, 0,
1, ElementsAre(1))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 115, 0,
115, ElementsAre(115))));
}
TEST_F(TritonDotAnalysisTest, Merge) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
param_0.1 = s8[1,8,6,4]{3,2,1,0} parameter(0)
bitcast.18 = s8[48,4]{1,0} bitcast(param_0.1)
convert.4 = bf16[48,4]{1,0} convert(bitcast.18)
param_1.1 = bf16[4,3]{1,0} parameter(1)
ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = s8[1,8,6,4]{3,2,1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
custom-call = bf16[48,3]{1,0} custom-call(p0, p1),
custom_call_target="__triton",
called_computations={triton_dot}
ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(4, 6 * 8,
0, 6 * 8,
ElementsAre(6, 8))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(1, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(3, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 3,
0, 3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, Split) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
%parameter_1 = f32[24000,2]{1,0} parameter(1)
%convert.15 = f16[24000,2]{1,0} convert(%parameter_1)
%parameter_0 = f16[4]{0} parameter(0)
%bitcast.45 = f16[2,2]{1,0} bitcast(%parameter_0)
ROOT %dot.26 = f16[24000,2]{1,0} dot(%convert.15, %bitcast.45),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f16[4]{0} parameter(0)
p1 = f32[24000,2]{1,0} parameter(1)
ROOT r = f16[24000,2]{1,0} custom-call(p0, p1),
custom_call_target="__triton",
called_computations={triton_dot}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p1);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p0);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p1, 0),
ElementsAre(FieldsAre(2, 24000,
0, 24000,
ElementsAre(24000))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p1, 1),
ElementsAre(FieldsAre(1, 2,
0, 2,
ElementsAre(2))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p0, 0),
ElementsAre(FieldsAre(2, 2,
0, 2,
ElementsAre(2))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p0, 1),
ElementsAre(FieldsAre(1, 2,
0, 2,
ElementsAre(2))));
}
TEST_F(TritonDotAnalysisTest, TransposeMerge) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
param_0.1 = s8[1,4,8,6]{3,2,1,0} parameter(0)
transpose.3 = s8[1,8,6,4]{3,2,1,0} transpose(param_0.1), dimensions={0,2,3,1}
bitcast.18 = s8[48,4]{1,0} bitcast(transpose.3)
convert.4 = bf16[48,4]{1,0} convert(bitcast.18)
param_1.1 = bf16[4,3]{1,0} parameter(1)
ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = s8[1,4,8,6]{3,2,1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
custom-call = bf16[48,3]{1,0} custom-call(p0, p1),
custom_call_target="__triton",
called_computations={triton_dot}
ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(1, 8 * 6,
0, 8 * 6,
ElementsAre(6, 8))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(8 * 6, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(3, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 3,
0, 3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, CopyMerge) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
param_0.1 = s8[1,4,8,6]{3,2,1,0} parameter(0)
bitcast.99 = s8[1,8,6,4]{2,1,3,0} bitcast(param_0.1)
copy.3 = s8[1,8,6,4]{3,2,1,0} copy(bitcast.99)
bitcast.18 = s8[48,4]{1,0} bitcast(copy.3)
convert.4 = bf16[48,4]{1,0} convert(bitcast.18)
param_1.1 = bf16[4,3]{1,0} parameter(1)
ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = s8[1,4,8,6]{3,2,1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
custom-call = bf16[48,3]{1,0} custom-call(p0, p1),
custom_call_target="__triton",
called_computations={triton_dot}
ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(1, 8 * 6,
0, 8 * 6,
ElementsAre(6, 8))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(8 * 6, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(3, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 3,
0, 3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, TransposeMergeNCN) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
param_0.1 = bf16[3,4,8,1]{3,2,1,0} parameter(0)
transpose.3 = bf16[3,8,1,4]{3,2,1,0} transpose(param_0.1), dimensions={0,2,3,1}
bitcast.18 = bf16[24,4]{1,0} bitcast(transpose.3)
param_1.1 = bf16[4,3]{1,0} parameter(1)
ROOT dot = bf16[24,3]{1,0} dot(bitcast.18, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = bf16[3,4,8,1]{3,2,1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
custom-call = bf16[24,3]{1,0} custom-call(p0, p1),
custom_call_target="__triton", called_computations={triton_dot}
ROOT bitcast.2 = bf16[3,8,1,3]{3,2,1,0} bitcast(custom-call)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(1, 8,
0, 8,
ElementsAre(8)),
FieldsAre(4 * 8, 3,
0, 3,
ElementsAre(3))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(8, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(3, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 3,
0, 3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, TransposeOutput) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
dot = bf16[24,3]{1,0} dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
bc = bf16[12,2,3]{2,1,0} bitcast(dot)
ROOT t = bf16[3,12,2]{2,1,0} transpose(bc), dimensions={2,0,1}
}
ENTRY e {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
ROOT r = bf16[3,12,2]{2,1,0} fusion(p0, p1), kind=kCustom,
calls=triton_dot
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* dot_output = dot_computation->root_instruction();
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, dot_output, 0),
ElementsAre(FieldsAre(1, 24, 0,
24,
ElementsAre(2, 12))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, dot_output, 1),
ElementsAre(FieldsAre(24, 3, 0,
3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, OutputParameterIsHandled) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule t
triton_dot {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
dot = bf16[24,3]{1,0} dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
p2 = f16[3,24]{1,0} parameter(2)
p2t = f16[24,3]{1,0} transpose(p2), dimensions={1,0}
p2tc = bf16[24,3]{1,0} convert(p2t)
ROOT r = bf16[24,3]{1,0} divide(p2tc, dot)
}
ENTRY e {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
p2 = f16[3,24]{1,0} parameter(2)
ROOT r = bf16[24,3]{1,0} fusion(p0, p1, p2), kind=kCustom,
calls=triton_dot
})"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* output_param =
dot_computation->parameter_instruction(2);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(
analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 0)
->size(),
1);
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 0),
ElementsAre(FieldsAre(1, 24, 0,
24,
ElementsAre(24))));
EXPECT_EQ(
analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 1)
->size(),
1);
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 1),
ElementsAre(FieldsAre(24, 3, 0,
3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, InputBroadcastFromScalarIsHandled) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule t
triton_dot {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[] parameter(1)
p1b = bf16[4,3] broadcast(p1)
ROOT dot = bf16[24,3]{1,0} dot(p0, p1b),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[] parameter(1)
ROOT r = bf16[24,3]{1,0} fusion(p0, p1), kind=kCustom,
calls=triton_dot
})"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* scalar = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, scalar, 0),
nullptr);
EXPECT_EQ(analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, scalar, 1),
nullptr);
}
TEST_F(TritonDotAnalysisTest, InputBroadcastFromVectorIsHandled) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule t
triton_dot {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4] parameter(1)
p1b = bf16[4,3] broadcast(p1), dimensions={0}
ROOT dot = bf16[24,3]{1,0} dot(p0, p1b),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4] parameter(1)
ROOT r = bf16[24,3]{1,0} fusion(p0, p1), kind=kCustom,
calls=triton_dot
})"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* vector = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(
analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, vector, 0)->size(),
1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, vector, 0),
ElementsAre(FieldsAre(1, 4,
0, 4,
ElementsAre(4))));
}
TEST_F(TritonDotAnalysisTest, OutputBroadcastIsNotAccepted) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule t
ENTRY e {
p0 = f16[2,35] parameter(0)
p0c = bf16[2,35] convert(p0)
p1 = bf16[35,2] parameter(1)
dot = bf16[2,2] dot(p0c, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT bc = bf16[2,2,100] broadcast(dot), dimensions={0,1}
})"));
EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{
se::CudaComputeCapability::AMPERE, 0})
.Run(module.get())
.value());
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kBroadcast);
}
TEST_F(TritonDotAnalysisTest, DegenerateSplitFragmentIsHandled) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_gemm_r {
Arg_0.1 = s8[30,913,8,21]{3,2,1,0} parameter(0)
bitcast.6 = s8[30,8,21,913]{2,1,3,0} bitcast(Arg_0.1)
copy.7 = s8[30,8,21,913]{3,2,1,0} copy(bitcast.6)
bitcast.8 = s8[5040,913]{1,0} bitcast(copy.7)
convert.9 = bf16[5040,913]{1,0} convert(bitcast.8)
bitcast.32 = bf16[58,913]{1,0} parameter(1)
dot.33 = bf16[5040,58]{1,0} dot(convert.9, bitcast.32),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
bitcast.34 = bf16[30,8,21,58]{3,2,1,0} bitcast(dot.33)
copy.35 = bf16[30,8,21,58]{2,1,3,0} copy(bitcast.34)
ROOT bitcast.41 = bf16[30,1,58,8,21]{4,3,2,1,0} bitcast(copy.35)
}
ENTRY e {
Arg_0.1 = s8[30,913,8,21]{3,2,1,0} parameter(0)
Arg_1.2 = bf16[58,913]{1,0} parameter(1)
ROOT r = bf16[30,1,58,8,21]{4,3,2,1,0} fusion(Arg_0.1, Arg_1.2), kind=kCustom,
calls=triton_gemm_r,
backend_config={kind: "__triton_gemm"}
})"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT,
dot_computation->root_instruction(), 0),
ElementsAre(FieldsAre(1, 8 * 21,
0, 8 * 21,
ElementsAre(21, 8)),
FieldsAre(8 * 21 * 58, 30,
0, 30,
ElementsAre(30))));
}
TEST_F(TritonDotAnalysisTest,
HandlesFurtherPropagationFromTrivialSizedTensorGracefully) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_gemm_r {
a = f32[3,3]{1,0} parameter(0)
constant = f32[1,1]{1,0} constant({ {0} })
broadcast = f32[1,1]{1,0} broadcast(constant), dimensions={0,1}
reshape = f32[] reshape(broadcast)
broadcast2 = f32[3,3]{1,0} broadcast(reshape), dimensions={}
ROOT dot = f32[3,3]{1,0} dot(a, broadcast2),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY e {
a = f32[3,3]{1,0} parameter(0)
ROOT dot = f32[3,3]{1,0} fusion(a), kind=kCustom, calls=triton_gemm_r,
backend_config={kind: "__triton_gemm"}
}
)"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
absl::StatusOr<TritonFusionAnalysis> analysis =
TritonFusionAnalysis::Execute(*dot_computation);
(void)analysis;
}
TEST_F(TritonDotAnalysisTest, DynamicSliceIsSupported) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_gemm {
dot_lhs = f32[2,18]{1,0} parameter(0)
dynamic_slice_input = f32[96,2]{1,0} parameter(1)
start_index0 = s32[] parameter(2)
start_index1 = s32[] parameter(3)
dynamic_slice = f32[64,2]{1,0} dynamic-slice(dynamic_slice_input,
start_index0, start_index1),
dynamic_slice_sizes={64,2}
ROOT dot = f32[18,64]{1,0} dot(dot_lhs, dynamic_slice),
lhs_contracting_dims={0}, rhs_contracting_dims={1}
}
ENTRY e {
dot_lhs = f32[2,18]{1,0} parameter(0)
dynamic_slice_input = f32[96,2]{1,0} parameter(1)
start_index0 = s32[] parameter(2)
start_index1 = s32[] parameter(3)
ROOT triton_gemm_d = f32[18,64]{1,0} fusion(dot_lhs, dynamic_slice_input,
start_index0, start_index1),
kind=kCustom,
calls=triton_gemm,
backend_config={"kind":"__triton_gemm"}
}
)"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(18, 2,
0, 2,
ElementsAre(2))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(1, 18,
0, 18,
ElementsAre(18))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(2, 96,
0, 96,
ElementsAre(96))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 2,
0, 2,
ElementsAre(2))));
}
TEST_F(TritonDotAnalysisTest, SparseDot) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_gemm {
lhs = bf16[5,16] parameter(0)
rhs = bf16[32,10] parameter(1)
meta = u16[5,2] parameter(2)
ROOT dot = f32[5,10] dot(lhs, rhs, meta),
lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4
}
ENTRY main {
lhs = bf16[5,16] parameter(0)
rhs = bf16[32,10] parameter(1)
meta = u16[5,2] parameter(2)
ROOT out = f32[5,10] fusion(lhs, rhs, meta),
kind=kCustom, calls=triton_gemm, backend_config={kind:"__triton_gemm"}
}
)"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::META,
dot_computation->parameter_instruction(2), 0),
::testing::SizeIs(1));
}
TEST_F(TritonDotAnalysisTest, QueryScopeAlwaysWorks) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_gemm_r {
Arg_0.1 = s8[30,913,8,21]{3,2,1,0} parameter(0)
bitcast.6 = s8[30,8,21,913]{2,1,3,0} bitcast(Arg_0.1)
copy.7 = s8[30,8,21,913]{3,2,1,0} copy(bitcast.6)
bitcast.8 = s8[5040,913]{1,0} bitcast(copy.7)
convert.9 = bf16[5040,913]{1,0} convert(bitcast.8)
bitcast.32 = bf16[58,913]{1,0} parameter(1)
dot.33 = bf16[5040,58]{1,0} dot(convert.9, bitcast.32),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
bitcast.34 = bf16[30,8,21,58]{3,2,1,0} bitcast(dot.33)
copy.35 = bf16[30,8,21,58]{2,1,3,0} copy(bitcast.34)
ROOT bitcast.41 = bf16[30,1,58,8,21]{4,3,2,1,0} bitcast(copy.35)
}
ENTRY e {
Arg_0.1 = s8[30,913,8,21]{3,2,1,0} parameter(0)
Arg_1.2 = bf16[58,913]{1,0} parameter(1)
ROOT r = bf16[30,1,58,8,21]{4,3,2,1,0} fusion(Arg_0.1, Arg_1.2), kind=kCustom,
calls=triton_gemm_r,
backend_config={kind: "__triton_gemm"}
})"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
for (const auto& hlo : dot_computation->instructions()) {
if (hlo->opcode() != HloOpcode::kDot) {
EXPECT_TRUE(analysis.QueryInstructionScope(*hlo).has_value());
}
}
}
TEST_F(TritonDotAnalysisTest, PadWithTrivialDimension) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule t
triton_gemm_dot {
parameter_0 = f32[1001,1]{1,0} parameter(0)
constant = f32[] constant(0)
pad = f32[1004,1]{1,0} pad(parameter_0, constant), padding=0_3x0_0
bitcast = f32[4,251,1]{2,1,0} bitcast(pad)
parameter_1 = f32[4,251,2048]{2,1,0} parameter(1)
ROOT dot = f32[4,1,2048]{2,1,0} dot(bitcast, parameter_1),
lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0},
rhs_contracting_dims={1}
})"));
const HloComputation* dot_computation = *module->computations().begin();
TF_ASSERT_OK_AND_ASSIGN(
TritonFusionAnalysis analysis,
TritonFusionAnalysis::Execute(*dot_computation, 4));
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(1, 1001, 0,
1001, ElementsAre(1001))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 2),
ElementsAre(FieldsAre(1, 1, 0,
1, ElementsAre(1))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(2048, 1004, 0,
1004, ElementsAre(251, 4))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 2),
ElementsAre(FieldsAre(1, 2048, 0,
2048, ElementsAre(2048))));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/triton_fusion_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/triton_fusion_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
873af691-a7ed-4983-a8cd-f970eb8fa7af | cpp | tensorflow/tensorflow | gpu_latency_hiding_scheduler | third_party/xla/xla/service/gpu/gpu_latency_hiding_scheduler.cc | third_party/xla/xla/service/gpu/gpu_latency_hiding_scheduler_test.cc | #include "xla/service/gpu/gpu_latency_hiding_scheduler.h"
#include <cstdint>
#include <tuple>
#include <utility>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/latency_hiding_scheduler.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
namespace xla {
namespace gpu {
namespace {
static constexpr int64_t kCostlyAllReduceThreshold = 30 * 1024 * 1024;
static constexpr int64_t kCostlyAllReduceMultiplier = 4;
bool IsNopInstruction(const HloInstruction& hlo) {
HloOpcode op = hlo.opcode();
return op == HloOpcode::kGetTupleElement || op == HloOpcode::kBitcast ||
op == HloOpcode::kConstant || op == HloOpcode::kParameter ||
op == HloOpcode::kTuple || op == HloOpcode::kPartitionId ||
op == HloOpcode::kReplicaId || hlo.IsEffectiveBitcast() ||
op == HloOpcode::kOptimizationBarrier;
}
bool IsAsyncComputeOp(const HloInstruction& hlo) {
return (hlo.opcode() == HloOpcode::kAsyncStart ||
hlo.opcode() == HloOpcode::kAsyncDone) &&
!hlo_query::IsCollectiveCommunicationOp(hlo.async_wrapped_opcode()) &&
hlo.async_execution_thread() != hlo.parent()->execution_thread();
}
int64_t GetPipelineStream(const HloInstruction& start) {
auto it = start.frontend_attributes().map().find(kSendRecvPipelineAttr);
if (it != start.frontend_attributes().map().end() && it->second == "1") {
return 1;
}
return 0;
}
std::pair<GpuResourceType, ResourceUsageType> GetP2PResourceAndUsage(
const HloInstruction& instr, const CanonicalAsyncOp& op) {
ResourceUsageType usage = op.outer == HloOpcode::kAsyncStart
? ResourceUsageType::kResourceRelease
: ResourceUsageType::kResourceOccupy;
int64_t pipeline = GetPipelineStream(instr);
HloOpcode opcode = op.inner;
GpuResourceType resource;
if (pipeline == 0) {
resource = opcode == HloOpcode::kSend
? GpuResourceType::kGpuAsyncStreamSend0
: GpuResourceType::kGpuAsyncStreamRecv0;
} else {
resource = opcode == HloOpcode::kSend
? GpuResourceType::kGpuAsyncStreamSend1
: GpuResourceType::kGpuAsyncStreamRecv1;
}
return {resource, usage};
}
bool IsGpuAsyncStart(const HloInstruction& hlo) {
return (hlo_query::IsAsyncCollectiveStartOp(&hlo,
true) &&
!IsSyncCollective(&hlo)) ||
IsAsyncComputeOp(hlo);
}
bool IsGpuAsyncDone(const HloInstruction& hlo) {
return (hlo_query::IsAsyncCollectiveDoneOp(&hlo,
true) &&
!IsSyncCollective(hlo.operand(0))) ||
IsAsyncComputeOp(hlo);
}
bool IsAsyncPair(const HloInstruction& from, const HloInstruction& target) {
return IsGpuAsyncStart(from) && IsGpuAsyncDone(target);
}
}
int64_t GetSizeOfShape(const Shape& shape, int pointer_size) {
int64_t size = ShapeUtil::ByteSizeOf(shape, pointer_size);
if (shape.IsTuple() || shape.is_static()) {
return size;
}
int64_t metadata_size = sizeof(int32_t) * shape.dimensions_size();
return size + metadata_size;
}
CanonicalAsyncOp GpuGetCanonicalAsyncOp(const HloInstruction& hlo) {
switch (hlo.opcode()) {
case HloOpcode::kSend:
return {HloOpcode::kAsyncStart, HloOpcode::kSend};
case HloOpcode::kSendDone:
return {HloOpcode::kAsyncDone, HloOpcode::kSend};
case HloOpcode::kRecv:
return {HloOpcode::kAsyncStart, HloOpcode::kRecv};
case HloOpcode::kRecvDone:
return {HloOpcode::kAsyncDone, HloOpcode::kRecv};
default:
return DefaultGetCanonicalAsyncOp(hlo);
}
}
GpuAsyncTrackerBase::GpuAsyncTrackerBase(const SchedulerConfig& config,
GetCanonicalAsyncOpFunc func)
: AsyncTracker(config, func) {}
bool GpuAsyncTrackerBase::IsSupportedAsyncDone(
const HloInstruction& hlo) const {
return IsGpuAsyncDone(hlo);
}
bool GpuAsyncTrackerBase::IsSupportedAsyncStart(
const HloInstruction& hlo) const {
return IsGpuAsyncStart(hlo);
}
void GpuAsyncTrackerBase::PostProcessScheduleGraph(
HloScheduleGraph* schedule_graph,
const LatencyEstimator* latency_estimator) const {
for (auto inst : schedule_graph->GetOriginalInstrList()) {
if (inst->opcode() == HloOpcode::kRecv) {
if (inst->frontend_attributes().map().count(kSendRecvPipelineAttr) > 0) {
HloGraphNode& node = schedule_graph->GetNode(inst);
node.SetForceEarly(true);
VLOG(5) << "Setting force early for instruction: " << inst->ToString();
}
}
if (inst->has_backend_config()) {
auto gpu_config = inst->backend_config<GpuBackendConfig>();
if (gpu_config.ok()) {
HloGraphNode& node = schedule_graph->GetNode(inst);
node.SetForceDelay(gpu_config->force_earliest_schedule());
VLOG(5) << "Setting force delay for instruction: " << inst->ToString();
}
}
}
}
GpuAsyncTracker::GpuAsyncTracker(const SchedulerConfig& config)
: GpuAsyncTrackerBase(config) {}
ResourcesVector GpuAsyncTracker::GetResourcesFromInstruction(
const HloInstruction& instr) const {
CanonicalAsyncOp op = GetCanonicalAsyncOp(instr);
if (op.outer == HloOpcode::kAsyncStart || op.outer == HloOpcode::kAsyncDone) {
ResourceUsageType usage;
GpuResourceType resource;
if (op.inner == HloOpcode::kSend || op.inner == HloOpcode::kRecv) {
std::tie(resource, usage) = GetP2PResourceAndUsage(instr, op);
} else {
usage = op.outer == HloOpcode::kAsyncStart
? ResourceUsageType::kResourceRelease
: ResourceUsageType::kResourceOccupy;
resource = hlo_query::IsCollectiveCommunicationOp(op.inner)
? GpuResourceType::kGpuAsyncStreamCollectives
: GpuResourceType::kGpuAsyncStreamComputes;
}
return {std::make_pair(
GetFirstTargetDefinedResource() + static_cast<int64_t>(resource),
usage)};
}
return GpuAsyncTrackerBase::GetResourcesFromInstruction(instr);
}
int64_t GpuAsyncTracker::GetNumTargetDefinedResources() const {
return static_cast<int64_t>(GpuResourceType::kNumTargetResources);
};
int64_t GpuAsyncTracker::GetNumAvailableResources(int64_t resource_type) const {
const int64_t first_target_resource = GetFirstTargetDefinedResource();
if (resource_type < first_target_resource) {
return GpuAsyncTrackerBase::GetNumAvailableResources(resource_type);
}
CHECK_LT(resource_type,
first_target_resource +
static_cast<int64_t>(GpuResourceType::kNumTargetResources));
if ((resource_type - first_target_resource) ==
static_cast<int64_t>(GpuResourceType::kGpuAsyncStreamComputes)) {
return 2;
}
return 1;
}
absl::string_view GpuAsyncTracker::GetResourceName(
int64_t resource_type) const {
const int64_t first_target_resource = GetFirstTargetDefinedResource();
if (resource_type < first_target_resource) {
return GpuAsyncTrackerBase::GetResourceName(resource_type);
}
CHECK_LE(resource_type,
first_target_resource + GetNumTargetDefinedResources());
switch (static_cast<GpuResourceType>(resource_type - first_target_resource)) {
case GpuResourceType::kGpuAsyncStreamSend0:
return "kGpuAsyncStreamSend0";
case GpuResourceType::kGpuAsyncStreamSend1:
return "kGpuAsyncStreamSend1";
case GpuResourceType::kGpuAsyncStreamRecv0:
return "kGpuAsyncStreamRecv0";
case GpuResourceType::kGpuAsyncStreamRecv1:
return "kGpuAsyncStreamRecv1";
case GpuResourceType::kGpuAsyncStreamCollectives:
return "kGpuAsyncStreamCollectives";
case GpuResourceType::kGpuAsyncStreamComputes:
return "kGpuAsyncStreamComputes";
default:
return "kUnsupportedResource";
}
}
ResourceHazardType GpuAsyncTracker::GetResourceHazardType(
int64_t resource_type) const {
const int64_t first_target_resource = GetFirstTargetDefinedResource();
if (resource_type < first_target_resource) {
return GpuAsyncTrackerBase::GetResourceHazardType(resource_type);
}
CHECK_LE(resource_type,
first_target_resource + GetNumTargetDefinedResources());
return ResourceHazardType::kUnshareable;
}
int64_t GpuAsyncTracker::GetNumResourcesPerInstruction(
int64_t resource_type, const HloInstruction& instr) const {
int64_t num_resources =
GpuAsyncTrackerBase::GetNumResourcesPerInstruction(resource_type, instr);
if (num_resources <= 0 || instr.opcode() != HloOpcode::kWhile) {
return num_resources;
}
int64_t first_p2p_resource =
GetFirstTargetDefinedResource() +
static_cast<int64_t>(GpuResourceType::kGpuAsyncStreamSend0);
if (resource_type < first_p2p_resource ||
resource_type > first_p2p_resource + 4) {
return num_resources;
}
auto find_instruction_for_pipeline = [&](HloOpcode opcode, int64_t pipeline) {
for (auto user1 : instr.users()) {
if (user1->opcode() == HloOpcode::kGetTupleElement) {
for (auto user2 : user1->users()) {
if (user2->opcode() == opcode) {
if (GetPipelineStream(*user2) == pipeline) {
return true;
}
}
}
}
}
return false;
};
bool found;
if (resource_type == first_p2p_resource) {
found = find_instruction_for_pipeline(HloOpcode::kSendDone, 0);
} else if (resource_type == first_p2p_resource + 1) {
found = find_instruction_for_pipeline(HloOpcode::kSendDone, 1);
} else if (resource_type == first_p2p_resource + 2) {
found = find_instruction_for_pipeline(HloOpcode::kRecvDone, 0);
} else {
found = find_instruction_for_pipeline(HloOpcode::kRecvDone, 1);
}
return num_resources - (found ? 1 : 0);
}
GpuLatencyEstimator::GpuLatencyEstimator(int64_t pointer_size,
GetCanonicalAsyncOpFunc func)
: ApproximateLatencyEstimator(func), pointer_size_(pointer_size) {}
ApproximateLatencyEstimator::TimeCost GpuLatencyEstimator::NodeCost(
const HloInstruction* instr) const {
if (IsNopInstruction(*instr)) {
return 0.0;
}
if (instr->opcode() == HloOpcode::kCustomCall) {
if (IsCublasGemm(*instr) || IsCustomCallToDnnConvolution(*instr)) {
return ApproximateLatencyEstimator::kMediumCost;
}
return ApproximateLatencyEstimator::kMediumCost;
}
return ApproximateLatencyEstimator::NodeCost(instr);
}
ApproximateLatencyEstimator::TimeCost GpuLatencyEstimator::GetLatencyBetween(
const HloGraphNode& from, const HloGraphNode& to) const {
if (IsAsyncPair(from, to)) {
if (from.GetInstr().opcode() == HloOpcode::kRecv) {
return ApproximateLatencyEstimator::kLowLatency;
} else if (from.GetInstr().opcode() == HloOpcode::kSend) {
return ApproximateLatencyEstimator::kHighLatency * 10;
}
bool enable_approx_collectives =
from.GetInstr()
.GetModule()
->config()
.debug_options()
.xla_gpu_enable_approx_costly_collectives();
bool is_all_reduce = from.GetInstr().opcode() == HloOpcode::kAllReduceStart;
bool collective_size_exceeds_threshold =
GetSizeOfShape(from.GetInstr().shape(), pointer_size_) >
kCostlyAllReduceThreshold;
if (enable_approx_collectives && is_all_reduce &&
collective_size_exceeds_threshold) {
return ApproximateLatencyEstimator::kHighLatency *
kCostlyAllReduceMultiplier;
}
return ApproximateLatencyEstimator::kHighLatency;
}
return ApproximateLatencyEstimator::kLowLatency;
}
void GPUProfileStatisticsAggregator::HandleMissingInstructionCost(
const HloInstruction& instruction) {
if (!IsNopInstruction(instruction) &&
instruction.opcode() != HloOpcode::kWhile) {
missing_instructions_.insert(&instruction);
}
}
void GPUProfileStatisticsAggregator::HandleFoundInstructionCost(
const HloInstruction& instruction) {
found_instructions_count_++;
}
void GPUProfileStatisticsAggregator::HandleMissingInstructionLatency(
const HloInstruction& from, const HloInstruction& to) {
if (IsAsyncPair(from, to)) {
missing_instructions_.insert(&from);
}
}
void GPUProfileStatisticsAggregator::HandleFoundInstructionLatency(
const HloInstruction& from, const HloInstruction& to) {
found_instructions_count_++;
}
}
} | #include "xla/service/gpu/gpu_latency_hiding_scheduler.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_hlo_schedule.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/profile_guided_latency_estimator.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
using ::testing::Property;
using ::testing::UnorderedElementsAre;
using ::tsl::testing::StatusIs;
class GpuLatencyHidingSchedulerBaseTest : public HloTestBase {
protected:
absl::StatusOr<HloModule*> ScheduleModule(HloModule* module) {
auto& test_backend = backend();
const auto& gpu_device_info =
test_backend.default_stream_executor()->GetDeviceDescription();
HloModuleConfig config(module->config());
DebugOptions dboptions(config.debug_options());
dboptions.set_xla_gpu_enable_pgle_accuracy_checker(true);
config.set_debug_options(dboptions);
module->set_config(config);
TF_RETURN_IF_ERROR(
ScheduleGpuModule(module, 8, gpu_device_info)
.status());
return module;
}
HloModuleConfig GetModuleConfig(absl::string_view fdo_profile) {
HloModuleConfig config;
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_latency_hiding_scheduler(true);
debug_options.set_xla_gpu_lhs_enable_gpu_async_tracker(true);
config.set_debug_options(debug_options);
*config.mutable_fdo_profile() = fdo_profile;
return config;
}
};
TEST_F(GpuLatencyHidingSchedulerBaseTest,
GPUProfileStatisticsAggregatorDoesNotCountMissingNoops) {
GPUProfileStatisticsAggregator aggregator;
ProfileStatisticsAggregator::Statistics before_stats = aggregator.GetStats();
ASSERT_EQ(before_stats.missing_instructions.size(), 0);
ASSERT_EQ(before_stats.found_instructions_count, 0);
absl::string_view kFdoProfile = "";
absl::string_view kHloModule = R"(
HloModule m
ENTRY main {
parameter0 = f32[] parameter(0)
parameter1 = f32[32] parameter(1)
const0 = f32[] constant(42)
bitcast0 = f32[2,16] bitcast(parameter1)
partition-id0 = u32[] partition-id()
replica-id0 = u32[] replica-id()
tuple0 = (f32[], f32[2,16], u32[], u32[]) tuple(parameter0, bitcast0, partition-id0, replica-id0)
opt-barrier = (f32[], f32[2,16], u32[], u32[]) opt-barrier(tuple0)
ROOT _ = get-tuple-element(opt-barrier), index=0
}
)";
auto config = GetModuleConfig(kFdoProfile);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloModule, config));
for (const HloInstruction* instr :
module->entry_computation()->instructions()) {
aggregator.HandleMissingInstructionCost(*instr);
ProfileStatisticsAggregator::Statistics after_stats = aggregator.GetStats();
EXPECT_EQ(after_stats.missing_instructions.size(), 0);
EXPECT_EQ(after_stats.found_instructions_count, 0);
}
}
TEST_F(GpuLatencyHidingSchedulerBaseTest,
GPUProfileStatisticsAggregatorCountsMissingInstruction) {
GPUProfileStatisticsAggregator aggregator;
ProfileStatisticsAggregator::Statistics before_stats = aggregator.GetStats();
ASSERT_EQ(before_stats.missing_instructions.size(), 0);
ASSERT_EQ(before_stats.found_instructions_count, 0);
absl::string_view kFdoProfile = R"pb(
costs { name: "dot0" cost_us: 100.0 }
)pb";
absl::string_view kHloModule = R"(
HloModule m
ENTRY main {
parameter0 = f32[] parameter(0)
parameter1 = f32[32] parameter(1)
const0 = f32[] constant(42)
add0 = f32[] add(parameter0, const0)
bitcast0 = f32[2,16] bitcast(parameter1)
tuple0 = (f32[], f32[2,16]) tuple(add0, bitcast0)
ROOT _ = get-tuple-element(tuple0), index=0
}
)";
auto config = GetModuleConfig(kFdoProfile);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloModule, config));
for (const HloInstruction* instr :
module->entry_computation()->instructions()) {
aggregator.HandleMissingInstructionCost(*instr);
}
ProfileStatisticsAggregator::Statistics after_stats = aggregator.GetStats();
EXPECT_EQ(after_stats.missing_instructions.size(), 1);
EXPECT_EQ((*after_stats.missing_instructions.begin())->opcode(),
HloOpcode::kAdd);
EXPECT_EQ(after_stats.found_instructions_count, 0);
}
TEST_F(GpuLatencyHidingSchedulerBaseTest,
GPUProfileStatisticsAggregatorCountsMissingAsyncPairs) {
GPUProfileStatisticsAggregator aggregator;
ProfileStatisticsAggregator::Statistics before_stats = aggregator.GetStats();
ASSERT_EQ(before_stats.missing_instructions.size(), 0);
ASSERT_EQ(before_stats.found_instructions_count, 0);
absl::string_view kFdoProfile = "";
absl::string_view kHloModule = R"(
HloModule m
reduce {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT _ = f32[] add(x, y)
}
ENTRY main {
p0 = f32[] parameter(0)
p1 = f32[2] parameter(1)
ar_0 = f32[] all-reduce-start(p0), to_apply=reduce
ar_1 = f32[] all-reduce-done(ar_0)
rs_0 = ((f32[2]), f32[1]) reduce-scatter-start(p1), to_apply=reduce, dimensions={0}
rs_1 = f32[1] reduce-scatter-done(rs_0)
ag_0 = (f32[2], f32[4]) all-gather-start(p1), replica_groups={{0,1}}, dimensions={0}
ag_1 = f32[4] all-gather-done(ag_0)
ROOT _ = (f32[], f32[1], f32[4]) tuple(ar_1, rs_1, ag_1)
}
)";
auto config = GetModuleConfig(kFdoProfile);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloModule, config));
for (const HloInstruction* instr :
module->entry_computation()->instructions()) {
for (const HloInstruction* user : instr->users()) {
aggregator.HandleMissingInstructionLatency(*instr, *user);
}
}
ProfileStatisticsAggregator::Statistics after_stats = aggregator.GetStats();
EXPECT_EQ(after_stats.found_instructions_count, 0);
EXPECT_EQ(after_stats.missing_instructions.size(), 3);
EXPECT_THAT(
after_stats.missing_instructions,
UnorderedElementsAre(
Property(&HloInstruction::opcode, HloOpcode::kAllReduceStart),
Property(&HloInstruction::opcode, HloOpcode::kAsyncStart),
Property(&HloInstruction::opcode, HloOpcode::kAllGatherStart)));
}
TEST_F(GpuLatencyHidingSchedulerBaseTest,
ScheduleGpuModuleErrorsOutOnMissingInstrucitonsForAWhileLoopBody) {
absl::string_view kFdoProfile = R"pb(
costs { name: "dot0" cost_us: 100.0 }
)pb";
absl::string_view kHloModule = R"(
HloModule m
loop_body {
p = (u32[], f32[1]) parameter(0)
t0 = u32[] get-tuple-element(p), index=0
t1 = f32[1] get-tuple-element(p), index=1
add0 = f32[1] add(t1, t1)
ROOT _ = (u32[],f32[1]) tuple(t0,t1)
}
loop_cond {
p1 = (u32[], f32[1]) parameter(0)
count = u32[] get-tuple-element(p1), index=0
ub = u32[] constant(2)
ROOT _ = pred[] compare(count, ub), direction=LT
}
ENTRY main {
p2 = f32[1] parameter(0)
ind = u32[] constant(1)
t = (u32[],f32[1]) tuple(ind,p2)
w = (u32[],f32[1]) while(t), body=loop_body, condition=loop_cond
ROOT _ = f32[1] get-tuple-element(w), index=1
}
)";
auto config = GetModuleConfig(kFdoProfile);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule, config));
EXPECT_THAT(ScheduleModule(module.get()),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST_F(GpuLatencyHidingSchedulerBaseTest,
ScheduleGpuModuleErrorsOutOnMissingInstrucitonsForAnEntryComputation) {
absl::string_view kFdoProfile = R"pb(
costs { name: "dot0" cost_us: 100.0 }
)pb";
absl::string_view kHloModule = R"(
HloModule m
ENTRY main {
p0 = f32[1] parameter(0)
ROOT add0 = f32[1] add(p0,p0)
}
)";
auto config = GetModuleConfig(kFdoProfile);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule, config));
EXPECT_THAT(ScheduleModule(module.get()),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST_F(GpuLatencyHidingSchedulerBaseTest,
ScheduleGpuModulePassesOnFullFDOProfile) {
absl::string_view kFdoProfile = R"pb(
costs { name: "add0" cost_us: 100.0 }
)pb";
absl::string_view kHloModule = R"(
HloModule m
ENTRY main {
p0 = f32[1] parameter(0)
ROOT add0 = f32[1] add(p0,p0)
}
)";
auto config = GetModuleConfig(kFdoProfile);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule, config));
TF_EXPECT_OK(ScheduleModule(module.get()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_latency_hiding_scheduler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_latency_hiding_scheduler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ae38afae-cea4-480b-bc81-c353972f7926 | cpp | tensorflow/tensorflow | reduction_utils | third_party/xla/xla/service/gpu/reduction_utils.cc | third_party/xla/xla/service/gpu/reduction_utils_test.cc | #include "xla/service/gpu/reduction_utils.h"
#include <algorithm>
#include <array>
#include <atomic>
#include <cstdint>
#include <ostream>
#include "absl/algorithm/container.h"
#include "absl/base/const_init.h"
#include "absl/strings/str_join.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/semantic_version.h"
#include "xla/util.h"
#include "tsl/platform/logging.h"
#ifdef GOOGLE_CUDA
#include "xla/service/gpu/gpu_asm_opts_util.h"
#include "xla/stream_executor/cuda/cuda_asm_compiler.h"
#endif
namespace xla {
namespace gpu {
namespace {
Vector3 PartitionShapeByMiddleDimensions(
const Shape& shape, absl::Span<const int64_t> dims_middle) {
CHECK(LayoutUtil::AreDimensionsConsecutive(shape.layout(), dims_middle));
Vector3 values = {1, 1, 1};
enum Segment { kMajor = 0, kMiddle = 1, kMinor = 2 };
Segment cur_segment = kMinor;
for (int64_t cur_dim : LayoutUtil::MinorToMajor(shape)) {
if (cur_segment != kMajor) {
bool cur_dim_in_middle = absl::c_linear_search(dims_middle, cur_dim);
if (cur_segment == kMinor) {
if (cur_dim_in_middle) {
cur_segment = kMiddle;
}
} else if (cur_segment == kMiddle) {
if (!cur_dim_in_middle) {
cur_segment = kMajor;
}
}
}
values[cur_segment] *= shape.dimensions(cur_dim);
}
return values;
}
}
int64_t MinThreadsXRowReduction(const HloModuleConfig& hlo_module_config) {
#ifdef GOOGLE_CUDA
static absl::Mutex mutex(absl::kConstInit);
static std::atomic<bool*> use_reduced_thread_count_atomic = nullptr;
bool* use_reduced_thread_count =
use_reduced_thread_count_atomic.load(std::memory_order_acquire);
if (use_reduced_thread_count == nullptr) {
absl::MutexLock lock(&mutex);
use_reduced_thread_count =
use_reduced_thread_count_atomic.load(std::memory_order_relaxed);
if (use_reduced_thread_count == nullptr) {
auto ptxas_config =
PtxOptsFromDebugOptions(hlo_module_config.debug_options());
auto ptxas_version_tuple =
se::GetAsmCompilerVersion(ptxas_config.preferred_cuda_dir);
use_reduced_thread_count = new bool(false);
if (!ptxas_version_tuple.ok() ||
ptxas_version_tuple.value() <
stream_executor::SemanticVersion{12, 2, 0}) {
*use_reduced_thread_count = true;
}
use_reduced_thread_count_atomic.store(use_reduced_thread_count,
std::memory_order_release);
}
}
if (*use_reduced_thread_count) {
return 512;
}
#endif
return 1024;
}
Vector3 GetReductionTiling(const ReductionDimensions& reduction_dimensions) {
if (reduction_dimensions.is_row_reduction) {
int64_t tile_z = std::min(reduction_dimensions.dimensions[0],
BatchedReductionRaceFreeBound());
return {tile_z, 1, 16};
}
return {1, 128, 1};
}
int64_t ReductionDimensionRaceFreeBound(
const HloModuleConfig& hlo_module_config,
const ReductionDimensions& reduction_dimensions) {
Vector3 reduction_tiling = GetReductionTiling(reduction_dimensions);
if (reduction_dimensions.is_row_reduction) {
return MinThreadsXRowReduction(hlo_module_config) * reduction_tiling[2];
}
return WarpSize() * reduction_tiling[1];
}
bool IsUnnestedReductionFasterThanElemental(
const ReductionDimensions& reduction_dimensions) {
if (reduction_dimensions.is_row_reduction) {
return (reduction_dimensions.dimensions[2] >= WarpSize()) ||
((WarpSize() % reduction_dimensions.dimensions[2]) == 0);
}
int64_t major_size = reduction_dimensions.dimensions[1];
int64_t minor_size = reduction_dimensions.dimensions[2];
bool prefer_elemental_emitter =
(major_size < WarpSize()) ||
(major_size < 2 * WarpSize() && minor_size < WarpSize()) ||
(major_size < 4 * WarpSize() && minor_size < 8) ||
(major_size < 8 * WarpSize() && minor_size < 3);
return !prefer_elemental_emitter;
}
bool IsReductionFromOrToContiguousDimensions(const HloInstruction& reduce) {
if (reduce.opcode() != HloOpcode::kReduce) {
return false;
}
const Shape& operand_shape = reduce.operand(0)->shape();
absl::Span<const int64_t> dims_to_reduce = reduce.dimensions();
DimensionVector dims_to_keep;
for (int64_t dim = 0; dim < operand_shape.dimensions().size(); ++dim) {
if (!absl::c_linear_search(dims_to_reduce, dim)) {
dims_to_keep.push_back(dim);
}
}
return (LayoutUtil::AreDimensionsConsecutive(operand_shape.layout(),
dims_to_keep) ||
LayoutUtil::AreDimensionsConsecutive(operand_shape.layout(),
dims_to_reduce)) &&
IsUnnestedReductionFasterThanElemental(
GetReductionKindAndContiguousComponents(reduce));
}
bool ReductionIsRaceFree(const HloModuleConfig& hlo_module_config,
const ReductionDimensions& reduction_dimensions) {
if (reduction_dimensions.is_row_reduction) {
return reduction_dimensions.dimensions[2] <=
ReductionDimensionRaceFreeBound(hlo_module_config,
reduction_dimensions) &&
reduction_dimensions.dimensions[0] <=
BatchedReductionRaceFreeBound();
}
return reduction_dimensions.dimensions[1] <=
ReductionDimensionRaceFreeBound(hlo_module_config,
reduction_dimensions);
}
std::ostream& operator<<(std::ostream& os,
const ReductionDimensions& reduction_dimensions) {
bool is_row_reduction = reduction_dimensions.is_row_reduction;
os << (is_row_reduction ? "row " : "column ") << "reduction ["
<< absl::StrJoin(reduction_dimensions.dimensions, ",") << "] -> ["
<< reduction_dimensions.dimensions[0] << ", "
<< reduction_dimensions
.dimensions[is_row_reduction
? ReductionDimensions::kRowKeptDimension
: ReductionDimensions::kColMinorKeptDimension]
<< "]";
return os;
}
ReductionDimensions GetReductionKindAndContiguousComponents(
const HloInstruction& reduce) {
Shape input_shape = reduce.operand(0)->shape();
absl::Span<const int64_t> dims_to_reduce = reduce.dimensions();
DimensionVector dims_to_keep;
for (int64_t dim = 0; dim < input_shape.rank(); ++dim) {
if (!absl::c_linear_search(dims_to_reduce, dim)) {
dims_to_keep.push_back(dim);
}
}
if (dims_to_keep.empty()) {
return {true,
{1, 1, ShapeUtil::ElementsIn(input_shape)}};
}
if (LayoutUtil::AreDimensionsConsecutive(input_shape.layout(),
dims_to_keep)) {
Vector3 shape_partition =
PartitionShapeByMiddleDimensions(input_shape, dims_to_keep);
if (shape_partition[1] == 1) {
return {true,
{1, 1, shape_partition[0] * shape_partition[2]}};
}
if (shape_partition[2] == 1) {
return {false,
{1, shape_partition[0], shape_partition[1]}};
}
return {true, shape_partition};
}
Vector3 shape_partition =
PartitionShapeByMiddleDimensions(input_shape, dims_to_reduce);
if (shape_partition[2] == 1) {
return {true,
{1, shape_partition[0], shape_partition[1]}};
}
return {false, shape_partition};
}
bool IsRealReductionHero(const HloInstruction& root,
const HloInstruction& hero) {
if (!IsReductionFromOrToContiguousDimensions(hero)) {
return false;
}
return &root == &hero ||
ReductionIsRaceFree(hero.GetModule()->config(),
GetReductionKindAndContiguousComponents(hero));
}
bool AreReductionsMultiOutputFusionCompatible(
const HloInstruction* reduce_hero, const HloInstruction* first_reduce) {
return GetReductionKindAndContiguousComponents(*reduce_hero) ==
GetReductionKindAndContiguousComponents(*first_reduce);
}
}
} | #include "xla/service/gpu/reduction_utils.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ReductionUtilsTest = HloTestBase;
const char kModulePrefix[] = R"(
HloModule test_module
scalar_add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
})";
TEST_F(ReductionUtilsTest, ReductionsAreMultioutputFusionCompatible) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_sibling1 {
p_0 = f32[32,64]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add
}
fused_sibling2 {
p_0 = f32[32,64]{1,0} parameter(0)
neg = f32[32,64]{1,0} negate(p_0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(neg, constant), dimensions={1}, to_apply=scalar_add
}
ENTRY entry {
p_0 = f32[32,64]{1,0} parameter(0)
fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1
fusion2 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling2
ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion1 = root->operand(0);
const HloInstruction* fusion2 = root->operand(1);
EXPECT_TRUE(AreReductionsMultiOutputFusionCompatible(
fusion1->fused_expression_root(), fusion2->fused_expression_root()));
}
TEST_F(ReductionUtilsTest,
ReductionsWithSameCanonicalizedDimsAreMultioutputFusionCompatible) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_sibling1 {
p_0 = f32[32,64]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add
}
fused_sibling2 {
p_0 = f32[32,64]{1,0} parameter(0)
bitcast = f32[32,8,8]{2,1,0} bitcast(p_0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(bitcast, constant), dimensions={1,2}, to_apply=scalar_add
}
ENTRY entry {
p_0 = f32[32,64]{1,0} parameter(0)
fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1
fusion2 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling2
ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion1 = root->operand(0);
const HloInstruction* fusion2 = root->operand(1);
EXPECT_TRUE(AreReductionsMultiOutputFusionCompatible(
fusion1->fused_expression_root(), fusion2->fused_expression_root()));
}
TEST_F(ReductionUtilsTest,
ReductionsAreNotMultioutputFusionCompatible_DifferentOperandShapes) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_sibling1 {
p_0 = f32[32,64]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add
}
fused_sibling2 {
p_0 = f32[64,32]{1,0} parameter(0)
neg = f32[64,32]{1,0} negate(p_0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(neg, constant), dimensions={0}, to_apply=scalar_add
}
ENTRY entry {
p_0 = f32[32,64]{1,0} parameter(0)
p_1 = f32[64,32]{1,0} parameter(1)
fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1
fusion2 = f32[32]{0} fusion(p_1), kind=kInput, calls=fused_sibling2
ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion1 = root->operand(0);
const HloInstruction* fusion2 = root->operand(1);
EXPECT_FALSE(AreReductionsMultiOutputFusionCompatible(
fusion1->fused_expression_root(), fusion2->fused_expression_root()));
}
TEST_F(ReductionUtilsTest,
ReductionsAreNotMultioutputFusionCompatible_DifferentOutputShapes) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_sibling1 {
p_0 = f32[32,64]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add
}
fused_sibling2 {
p_0 = f32[64,32]{1,0} parameter(0)
neg = f32[64,32]{1,0} negate(p_0)
constant = f32[] constant(0)
ROOT reduce = f32[64]{0} reduce(neg, constant), dimensions={1}, to_apply=scalar_add
}
ENTRY entry {
p_0 = f32[32,64]{1,0} parameter(0)
p_1 = f32[64,32]{1,0} parameter(1)
fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1
fusion2 = f32[64]{0} fusion(p_1), kind=kInput, calls=fused_sibling2
ROOT root = (f32[32]{0}, f32[64]{0}) tuple(fusion1, fusion2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion1 = root->operand(0);
const HloInstruction* fusion2 = root->operand(1);
EXPECT_FALSE(AreReductionsMultiOutputFusionCompatible(
fusion1->fused_expression_root(), fusion2->fused_expression_root()));
}
TEST_F(ReductionUtilsTest,
ReductionsAreNotMultioutputFusionCompatible_DifferentReduceDimensions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_sibling1 {
p_0 = f32[32,32]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={0}, to_apply=scalar_add
}
fused_sibling2 {
p_0 = f32[32,32]{1,0} parameter(0)
neg = f32[32,32]{1,0} negate(p_0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(neg, constant), dimensions={1}, to_apply=scalar_add
}
ENTRY entry {
p_0 = f32[32,32]{1,0} parameter(0)
fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1
fusion2 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling2
ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion1 = root->operand(0);
const HloInstruction* fusion2 = root->operand(1);
EXPECT_FALSE(AreReductionsMultiOutputFusionCompatible(
fusion1->fused_expression_root(), fusion2->fused_expression_root()));
}
TEST(ReductionDimensionsTest, GetOutputShape) {
ReductionDimensions row_reduction{true, {1, 2, 3}};
ReductionDimensions col_reduction{false, {1, 2, 3}};
EXPECT_THAT(row_reduction.GetOutputShape(), ElementsAre(2));
EXPECT_THAT(col_reduction.GetOutputShape(), ElementsAre(1, 3));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/reduction_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/reduction_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
757b4fb9-3ffb-4887-8a6f-740c46156148 | cpp | tensorflow/tensorflow | runtime_intrinsics | third_party/xla/xla/service/gpu/runtime_intrinsics.cc | third_party/xla/xla/service/gpu/runtime_intrinsics_test.cc | #include "xla/service/gpu/runtime_intrinsics.h"
#include <cstdint>
#include <string>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "absl/strings/string_view.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/custom_call_status.h"
#include "xla/service/custom_call_target_registry.h"
#include "xla/service/platform_util.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_finder.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
std::string GetGpuPlatformName() {
return absl::AsciiStrToUpper(
PlatformUtil::CanonicalPlatformName("gpu").value());
}
absl::Status AssertOnGpu(void* stream_handle, void* buffer,
absl::string_view error_msg) {
TF_ASSIGN_OR_RETURN(
se::Platform * platform,
se::PlatformManager::PlatformWithName(GetGpuPlatformName()));
TF_ASSIGN_OR_RETURN(se::Stream * stream,
stream_executor::FindStream(platform, stream_handle));
if (!stream) {
return Internal("Stream not found for: %p", stream_handle);
}
int8_t expected = false;
int64_t byte_size = sizeof(int8_t);
CHECK_EQ(byte_size, ShapeUtil::ByteSizeOfPrimitiveType(PrimitiveType::PRED));
TF_RETURN_IF_ERROR(stream->Memcpy(
&expected, se::DeviceMemoryBase{buffer, static_cast<uint64_t>(byte_size)},
byte_size));
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
if (!static_cast<bool>(expected)) {
return Internal("%s", error_msg);
}
return absl::OkStatus();
}
void AssertionCustomCall(void* stream_handle, void** buffers,
const char* opaque, int opaque_len,
XlaCustomCallStatus* status) {
absl::Status s =
AssertOnGpu(stream_handle, buffers[0],
absl::string_view{opaque, static_cast<uint64_t>(opaque_len)});
if (!s.ok()) {
auto msg = s.message();
XlaCustomCallStatusSetFailure(status, msg.data(), msg.size());
}
}
void NopReturnTokenCustomCall(void* stream_handle, void** buffers,
const char* opaque, int opaque_len,
XlaCustomCallStatus* status) {
VLOG(1) << "NopReturnTokenCustomCall called.";
}
}
XLA_REGISTER_CUSTOM_CALL_TARGET_WITH_SYM(
std::string(kXlaGpuAssertCustomCallTag), AssertionCustomCall,
GetGpuPlatformName());
XLA_REGISTER_CUSTOM_CALL_TARGET_WITH_SYM(
std::string(kNopReturnTokenCustomCallTarget), NopReturnTokenCustomCall,
GetGpuPlatformName());
} | #include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using RuntimeIntrinsicsTest = HloTestBase;
TEST_F(RuntimeIntrinsicsTest, NopReturnTokenWorks) {
constexpr absl::string_view kHloText = R"(
HloModule m
ENTRY e {
constant = u32[2]{0} constant({0, 1})
ROOT nop_return_token = token[] custom-call(constant), custom_call_target="NopReturnToken", custom_call_has_side_effect=true
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
GetOptimizedModule(kHloText));
EXPECT_EQ(module->entry_computation()->instruction_count(), 2);
EXPECT_TRUE(Run(std::move(module), false));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime_intrinsics.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime_intrinsics_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7606cb75-ccae-4f79-b2f0-741f5d1073cb | cpp | tensorflow/tensorflow | ir_emitter | third_party/xla/xla/service/cpu/ir_emitter.cc | third_party/xla/xla/service/cpu/ir_emitter_test.cc | #include "xla/service/cpu/ir_emitter.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cstddef>
#include <iterator>
#include <limits>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/cleanup/cleanup.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/FMF.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsX86.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Value.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/map_util.h"
#include "xla/primitive_util.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/cpu_options.h"
#include "xla/service/cpu/cpu_runtime.h"
#include "xla/service/cpu/dot_op_emitter.h"
#include "xla/service/cpu/elemental_math_emitter.h"
#include "xla/service/cpu/ir_emission_utils.h"
#include "xla/service/cpu/ir_function.h"
#include "xla/service/cpu/onednn_config.pb.h"
#include "xla/service/cpu/parallel_loop_emitter.h"
#include "xla/service/elemental_ir_emitter.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/llvm_ir/buffer_assignment_util.h"
#include "xla/service/llvm_ir/dynamic_update_slice_util.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/service/llvm_ir/llvm_loop.h"
#include "xla/service/llvm_ir/llvm_type_conversion_util.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/service/llvm_ir/loop_emitter.h"
#include "xla/service/llvm_ir/tuple_ops.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tsl/lib/math/math_util.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_memory_util.h"
#endif
namespace xla {
namespace {
using llvm_ir::IrName;
using llvm_ir::SetToFirstInsertPoint;
}
namespace cpu {
class IrEmitter::CpuElementalIrEmitter : public ElementalIrEmitter {
public:
CpuElementalIrEmitter(const HloModuleConfig& module_config,
IrEmitter* ir_emitter, llvm::Module* module)
: ElementalIrEmitter(
module, ir_emitter->b(),
Options{true}),
hlo_module_config_(module_config),
ir_emitter_(ir_emitter) {}
protected:
absl::StatusOr<llvm::Value*> EmitAtan2(PrimitiveType prim_type,
llvm::Value* lhs, llvm::Value* rhs,
absl::string_view) override {
return xla::cpu::EmitAtan2(module(), *b(), prim_type, lhs, rhs);
}
absl::StatusOr<llvm::Value*> EmitTanh(PrimitiveType prim_type,
llvm::Value* value) override {
return xla::cpu::EmitTanh(module(), *b(), prim_type, value);
}
absl::StatusOr<llvm::Value*> EmitErf(PrimitiveType prim_type,
llvm::Value* value) override {
return xla::cpu::EmitErf(module(), *b(), prim_type, value);
}
absl::StatusOr<std::vector<llvm::Value*>> EmitThreadLocalCall(
const HloComputation& callee, absl::Span<llvm::Value* const> parameters,
absl::string_view name, bool is_reducer) override {
return ir_emitter_->EmitThreadLocalCall(callee, parameters, name,
is_reducer);
}
bool fast_min_max() override {
return hlo_module_config_.debug_options().xla_cpu_enable_fast_min_max();
}
const HloModuleConfig& hlo_module_config_;
IrEmitter* ir_emitter_;
};
IrEmitter::IrEmitter(mlir::MLIRContext* mlir_context,
const HloModule& hlo_module,
const BufferAssignment& assignment,
llvm::Module* llvm_module,
absl::flat_hash_map<const HloInstruction*, int64_t>
instruction_to_profile_idx,
absl::flat_hash_map<const HloComputation*, int64_t>
computation_to_profile_idx,
absl::flat_hash_map<const HloComputation*, bool>
computation_transitively_contains_custom_call,
const TargetMachineFeatures* target_machine_features,
bool emit_code_for_msan)
: assignment_(assignment),
module_(llvm_module),
arch_type_(llvm::Triple(llvm_module->getTargetTriple()).getArch()),
main_builder_(llvm_module->getContext()),
current_builder_(&main_builder_),
mlir_context_(mlir_context),
instruction_to_profile_idx_(std::move(instruction_to_profile_idx)),
computation_to_profile_idx_(std::move(computation_to_profile_idx)),
computation_transitively_contains_custom_call_(
std::move(computation_transitively_contains_custom_call)),
alias_analysis_(hlo_module, assignment, &llvm_module->getContext()),
hlo_module_config_(hlo_module.config()),
is_top_level_computation_(false),
target_machine_features_(*target_machine_features),
emit_code_for_msan_(emit_code_for_msan) {
b()->setFastMathFlags(llvm_ir::GetCpuFastMathFlags(hlo_module_config_));
absl::Status s = GatherComputationsByAllocationType(
&hlo_module, &thread_local_computations_, &global_computations_);
absl::c_sort(thread_local_computations_);
absl::c_sort(global_computations_);
TF_CHECK_OK(s) << "Should have failed buffer assignment.";
}
IrEmitter::~IrEmitter() {
if (!compute_function_.empty()) {
LOG(WARNING) << "Compute function stack is not empty: "
<< compute_function_.size();
}
};
void IrEmitter::EmitThreadLocalFunctionEpilogue(HloComputation* computation) {
llvm::Argument* out_parameter = compute_function()->result_arg();
llvm_ir::IrArray root_value = GetIrArrayFor(computation->root_instruction());
const Shape& return_shape = computation->root_instruction()->shape();
if (ShapeUtil::IsScalar(return_shape)) {
llvm::Value* ret_value =
Load(root_value.GetBasePointeeType(), root_value.GetBasePointer(),
"load_ret_value");
Store(ret_value, out_parameter);
} else {
CHECK(return_shape.IsTuple());
llvm::Type* tuple_type = llvm_ir::ShapeToIrType(return_shape, module_);
for (int i = 0; i < return_shape.tuple_shapes_size(); i++) {
const Shape& element_shape = return_shape.tuple_shapes(i);
llvm::Value* destination = llvm_ir::EmitGetTupleElement(
element_shape,
i,
MinimumAlignmentForShape(element_shape), out_parameter,
tuple_type, b());
llvm::Value* source = llvm_ir::EmitGetTupleElement(
element_shape,
i,
MinimumAlignmentForShape(element_shape),
root_value.GetBasePointer(), root_value.GetBasePointeeType(), b());
Store(Load(IrShapeType(element_shape), source), destination);
}
}
}
absl::StatusOr<llvm::Function*> IrEmitter::EmitComputation(
HloComputation* computation, absl::string_view function_name_prefix,
bool is_top_level_computation,
absl::Span<HloInstruction* const> instruction_order,
bool allow_reassociation,
absl::Span<const llvm::Attribute::AttrKind> function_attributes) {
std::string function_name = name_uniquer_.GetUniqueName(function_name_prefix);
VLOG(2) << "Emitting IR for CPU function [" << function_name_prefix << "]";
is_top_level_computation_ = is_top_level_computation;
allow_reassociation_ = allow_reassociation;
num_dynamic_loop_bounds_ = 0;
auto backend_config_or =
computation->root_instruction()->backend_config<BackendConfig>();
if (backend_config_or.ok() &&
!backend_config_or->outer_dimension_partitions().empty()) {
num_dynamic_loop_bounds_ =
backend_config_or->outer_dimension_partitions().size();
}
if (computation->root_instruction()->opcode() != HloOpcode::kOutfeed) {
TF_ASSIGN_OR_RETURN(
computation_root_allocation_,
assignment_.GetUniqueTopLevelSlice(computation->root_instruction()));
}
bool has_thread_local_param = false;
for (const HloInstruction* param : computation->parameter_instructions()) {
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice param_slice,
assignment_.GetUniqueTopLevelSlice(param));
has_thread_local_param |= param_slice.allocation()->is_thread_local();
computation_parameter_allocations_[param_slice.allocation()->index()] =
param->parameter_number();
}
InitializeIrFunction(function_name);
bool use_rdtscp = arch_type_ == llvm::Triple::ArchType::x86 ||
arch_type_ == llvm::Triple::ArchType::x86_64;
profiling_state_ = ProfilingState(use_rdtscp);
tracing_state_.set_enabled(
computation->parent()->config().cpu_traceme_enabled());
llvm::IRBuilderBase::FastMathFlagGuard guard(*b());
llvm::FastMathFlags flags = b()->getFastMathFlags();
flags.setAllowReassoc(flags.allowReassoc() || allow_reassociation);
b()->setFastMathFlags(flags);
TF_RETURN_IF_ERROR(computation->AcceptOrdered(this, instruction_order));
llvm::Function* ir_function = compute_function()->function();
for (llvm::Attribute::AttrKind attr : function_attributes) {
ir_function->addFnAttr(attr);
}
InsertOrDie(&emitted_functions_,
ComputationToEmit{computation, allow_reassociation}, ir_function);
const BufferAllocation* root_allocation =
computation_root_allocation_.allocation();
if (root_allocation &&
(root_allocation->is_thread_local() ||
(root_allocation->is_constant() && has_thread_local_param))) {
EmitThreadLocalFunctionEpilogue(computation);
}
PopComputeFunction();
computation_root_allocation_ = BufferAllocation::Slice();
computation_parameter_allocations_.clear();
return ir_function;
}
void IrEmitter::InitializeIrFunction(const std::string& function_name) {
llvm::Function::LinkageTypes linkage =
is_top_level_computation_ ? llvm::GlobalValue::ExternalLinkage
: llvm::GlobalValue::InternalLinkage;
compute_function_.emplace(function_name, linkage, hlo_module_config_, module_,
b(), num_dynamic_loop_bounds_);
}
absl::Status IrEmitter::HandleBitcast(HloInstruction* bitcast) {
VLOG(2) << "HandleBitcast: " << bitcast->ToString();
emitted_value_[bitcast] = GetEmittedValueFor(bitcast->operand(0));
return absl::OkStatus();
}
llvm::Constant* IrEmitter::EmitGlobalForLiteral(const Literal& literal) {
llvm::Constant* initializer =
llvm_ir::ConvertLiteralToIrConstant(literal, module_);
llvm::GlobalVariable* result_global = new llvm::GlobalVariable(
*module_,
initializer->getType(),
true,
llvm::GlobalValue::PrivateLinkage,
initializer,
"");
result_global->setAlignment(
llvm::Align(MinimumAlignmentForShape(literal.shape())));
result_global->setUnnamedAddr(llvm::GlobalVariable::UnnamedAddr::Global);
return result_global;
}
absl::Status IrEmitter::EmitConstantGlobals() {
for (const BufferAllocation& allocation : assignment_.Allocations()) {
if (!allocation.is_constant()) {
continue;
}
const Literal& literal = llvm_ir::LiteralForConstantAllocation(allocation);
llvm::Constant* global_for_const;
auto it = emitted_literals_.find(LayoutSensitiveLiteralWrapper{literal});
if (it != emitted_literals_.end()) {
global_for_const = it->second;
} else {
global_for_const = EmitGlobalForLiteral(literal);
InsertOrDie(&emitted_literals_, LayoutSensitiveLiteralWrapper{literal},
global_for_const);
}
InsertOrDie(&constant_buffer_to_global_, allocation.index(),
global_for_const);
}
return absl::OkStatus();
}
absl::Status IrEmitter::HandleConstant(HloInstruction* constant) {
VLOG(2) << "HandleConstant: " << constant->ToString();
return EmitTargetAddressForOp(constant);
}
absl::Status IrEmitter::HandleCopy(HloInstruction* copy) {
if (copy->shape().IsTuple() ||
(copy->shape().IsArray() &&
LayoutUtil::Equal(copy->operand(0)->shape().layout(),
copy->shape().layout()))) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(copy));
return EmitMemcpy(*(copy->operand(0)), *copy);
} else if (copy->shape().IsArray()) {
return DefaultAction(copy);
}
return Unimplemented("unsupported operand type %s for copy instruction",
PrimitiveType_Name(copy->shape().element_type()));
}
int MinimumAlignmentForPrimitiveType(PrimitiveType primitive_type) {
int64_t byte_size = ShapeUtil::ByteSizeOfPrimitiveType(primitive_type);
DCHECK_GE(byte_size, 0);
DCHECK_LE(byte_size, 16);
return std::min(int64_t{8}, byte_size);
}
int IrEmitter::MinimumAlignmentForPrimitiveType(PrimitiveType primitive_type) {
return ::xla::cpu::MinimumAlignmentForPrimitiveType(primitive_type);
}
int64_t IrEmitter::ByteSizeOf(const Shape& shape) const {
return llvm_ir::ByteSizeOf(shape, module_->getDataLayout());
}
int IrEmitter::MinimumAlignmentForShape(const Shape& shape) {
if (ShapeUtil::IsScalar(shape)) {
return MinimumAlignmentForPrimitiveType(shape.element_type());
}
int64_t buffer_size = ByteSizeOf(shape);
DCHECK_GE(buffer_size, 0);
DCHECK_LE(buffer_size, SIZE_MAX);
return target_machine_features_.minimum_alignment_for_allocation(buffer_size);
}
void IrEmitter::AttachAlignmentMetadataForLoad(llvm::LoadInst* load,
const Shape& shape) {
int alignment = MinimumAlignmentForShape(shape);
if (alignment > 1) {
llvm_ir::SetAlignmentMetadataForLoad(load, alignment);
}
}
void IrEmitter::AttachAlignmentMetadataForLoad(llvm::LoadInst* load,
int64_t buffer_size) {
int alignment =
target_machine_features_.minimum_alignment_for_allocation(buffer_size);
if (alignment > 1) {
llvm_ir::SetAlignmentMetadataForLoad(load, alignment);
}
}
void IrEmitter::AttachDereferenceableMetadataForLoad(llvm::LoadInst* load,
const Shape& shape) {
AttachDereferenceableMetadataForLoad(load, ByteSizeOf(shape));
}
void IrEmitter::AttachDereferenceableMetadataForLoad(llvm::LoadInst* load,
int64_t buffer_size) {
if (buffer_size > 0) {
llvm_ir::SetDereferenceableMetadataForLoad(load, buffer_size);
}
}
void IrEmitter::AttachInvariantLoadMetadataForLoad(llvm::LoadInst* load) const {
AttachInvariantLoadMetadataForLoad(load, hlo_module_config_);
}
void IrEmitter::AttachInvariantLoadMetadataForLoad(
llvm::LoadInst* load, const HloModuleConfig& config) {
if (config.debug_options().xla_llvm_enable_invariant_load_metadata()) {
load->setMetadata(llvm::LLVMContext::MD_invariant_load,
llvm::MDNode::get(load->getContext(), {}));
}
}
absl::Status IrEmitter::HandleGetTupleElement(
HloInstruction* get_tuple_element) {
const HloInstruction* operand = get_tuple_element->operand(0);
const Shape& shape = get_tuple_element->shape();
emitted_value_[get_tuple_element] = llvm_ir::EmitGetTupleElement(
shape, get_tuple_element->tuple_index(), MinimumAlignmentForShape(shape),
GetEmittedValueFor(operand), IrShapeType(operand->shape()), b());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleSelect(HloInstruction* select) {
auto pred = select->operand(0);
TF_RET_CHECK(pred->shape().element_type() == PRED);
return DefaultAction(select);
}
absl::Status IrEmitter::HandleInfeed(HloInstruction* instruction) {
HloInfeedInstruction* infeed = Cast<HloInfeedInstruction>(instruction);
VLOG(2) << "HandleInfeed: " << infeed->ToString();
const Shape& data_shape = infeed->infeed_shape();
DCHECK(ShapeUtil::Equal(data_shape,
ShapeUtil::GetTupleElementShape(infeed->shape(), 0)));
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(infeed));
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice data_slice,
assignment_.GetUniqueSlice(infeed, {0}));
llvm::Value* data_address = EmitBufferPointer(data_slice, data_shape);
llvm::Type* data_type = IrShapeType(data_shape);
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice token_slice,
assignment_.GetUniqueSlice(infeed, {1}));
llvm::Value* token_address = EmitBufferPointer(
token_slice, ShapeUtil::GetTupleElementShape(infeed->shape(), 1));
llvm_ir::EmitTuple(GetIrArrayFor(infeed), {data_address, token_address}, b());
if (data_shape.IsTuple()) {
TF_RET_CHECK(!ShapeUtil::IsNestedTuple(data_shape));
std::vector<llvm::Value*> tuple_element_addresses;
for (int i = 0; i < data_shape.tuple_shapes_size(); ++i) {
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice buffer,
assignment_.GetUniqueSlice(infeed, {0, i}));
const Shape& tuple_element_shape =
ShapeUtil::GetTupleElementShape(data_shape, i);
llvm::Value* tuple_element_address =
EmitBufferPointer(buffer, tuple_element_shape);
TF_RETURN_IF_ERROR(EmitXfeedTransfer(
XfeedKind::kInfeed, tuple_element_shape, tuple_element_address));
tuple_element_addresses.push_back(tuple_element_address);
}
llvm_ir::EmitTuple(llvm_ir::IrArray(data_address, data_type, data_shape),
tuple_element_addresses, b());
} else {
TF_RETURN_IF_ERROR(
EmitXfeedTransfer(XfeedKind::kInfeed, data_shape, data_address));
}
return absl::OkStatus();
}
absl::Status IrEmitter::EmitXfeedTransfer(XfeedKind kind, const Shape& shape,
llvm::Value* program_buffer_address) {
int64_t length = ByteSizeOf(shape);
if (length < 0 || length > std::numeric_limits<int32_t>::max()) {
return InvalidArgument(
"xfeed (infeed or outfeed) buffer length %d is outside the valid "
"size range",
length);
}
int32_t length_32 = static_cast<int32_t>(length);
int32_t shape_length;
TF_ASSIGN_OR_RETURN(
llvm::Value * shape_ptr,
llvm_ir::EncodeSelfDescribingShapeConstant(shape, &shape_length, b()));
const char* acquire_func_name =
kind == XfeedKind::kInfeed
? runtime::kAcquireInfeedBufferForDequeueSymbolName
: runtime::kAcquireOutfeedBufferForPopulationSymbolName;
llvm::Value* acquired_pointer = EmitCallToFunc(
acquire_func_name,
{GetExecutableRunOptionsArgument(), b()->getInt32(length_32), shape_ptr,
b()->getInt32(shape_length)},
b()->getPtrTy());
if (kind == XfeedKind::kInfeed) {
MemCpy(program_buffer_address, llvm::Align(1),
acquired_pointer,
llvm::Align(1), length_32);
} else {
MemCpy(acquired_pointer, llvm::Align(1),
program_buffer_address,
llvm::Align(1), length_32);
if (emit_code_for_msan_) {
const llvm::DataLayout& dl = module_->getDataLayout();
llvm::Type* intptr_type = b()->getIntPtrTy(dl);
EmitCallToFunc(
"__msan_unpoison",
{acquired_pointer, llvm::ConstantInt::get(intptr_type, length)},
b()->getVoidTy());
}
}
const char* release_func_name =
kind == XfeedKind::kInfeed
? runtime::kReleaseInfeedBufferAfterDequeueSymbolName
: runtime::kReleaseOutfeedBufferAfterPopulationSymbolName;
EmitCallToFunc(release_func_name,
{GetExecutableRunOptionsArgument(), b()->getInt32(length_32),
acquired_pointer, shape_ptr, b()->getInt32(shape_length)},
b()->getVoidTy());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleOutfeed(HloInstruction* outfeed) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(outfeed));
HloInstruction* operand = outfeed->operands()[0];
const Shape& operand_shape = operand->shape();
llvm::Value* value = GetEmittedValueFor(operand);
if (!operand_shape.IsTuple()) {
return EmitXfeedTransfer(XfeedKind::kOutfeed, operand_shape, value);
}
TF_RET_CHECK(!ShapeUtil::IsNestedTuple(operand_shape));
for (int i = 0; i < operand_shape.tuple_shapes_size(); ++i) {
const Shape& tuple_element_shape =
ShapeUtil::GetTupleElementShape(operand_shape, i);
llvm::Value* tuple_element = llvm_ir::EmitGetTupleElement(
tuple_element_shape, i, MinimumAlignmentForShape(tuple_element_shape),
value, IrShapeType(operand_shape), b());
TF_RETURN_IF_ERROR(EmitXfeedTransfer(XfeedKind::kOutfeed,
tuple_element_shape, tuple_element));
}
return absl::OkStatus();
}
absl::Status IrEmitter::HandleSort(HloInstruction* hlo) {
const HloSortInstruction* sort = Cast<HloSortInstruction>(hlo);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(sort));
Shape keys_shape = sort->keys()->shape();
PrimitiveType keys_type = keys_shape.element_type();
if (!primitive_util::IsArrayType(keys_type)) {
return Unimplemented("Element type %s not supported in the Sort op on CPU.",
PrimitiveType_Name(keys_type));
}
std::vector<llvm::Value*> destination_addresses(sort->operand_count());
for (int64_t i = 0; i < sort->operand_count(); ++i) {
ShapeIndex shape_index =
sort->values_count() > 0 ? ShapeIndex({i}) : ShapeIndex({});
const HloInstruction* operand = sort->operand(i);
TF_RET_CHECK(
LayoutUtil::LayoutsInShapesEqual(keys_shape, operand->shape()));
TF_RET_CHECK(LayoutUtil::LayoutsInShapesEqual(
keys_shape, ShapeUtil::GetSubshape(sort->shape(), shape_index)));
auto destination_buffer = GetAllocationSlice(*sort, shape_index);
destination_addresses[i] =
EmitBufferPointer(destination_buffer, operand->shape());
auto source_address = GetAllocationSlice(*operand);
if (destination_buffer != source_address) {
int64_t primitive_type_size =
ShapeUtil::ByteSizeOfPrimitiveType(operand->shape().element_type());
auto source_buffer = GetEmittedValueFor(operand);
int64_t size = ByteSizeOf(operand->shape());
MemCpy(destination_addresses[i],
llvm::Align(primitive_type_size), source_buffer,
llvm::Align(primitive_type_size), size);
}
}
Shape normalized_keys_shape =
ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(keys_shape);
auto logical_to_physical =
LayoutUtil::MakeLogicalToPhysical(keys_shape.layout());
TF_RET_CHECK(sort->sort_dimension() < logical_to_physical.size());
int64_t physical_dimension_to_sort =
logical_to_physical[sort->sort_dimension()];
int64_t sort_dimension_elements =
normalized_keys_shape.dimensions(physical_dimension_to_sort);
int64_t higher_dimensions = 1;
for (int64_t i = 0; i < physical_dimension_to_sort; ++i) {
higher_dimensions *= normalized_keys_shape.dimensions(i);
}
int64_t lower_dimensions = 1;
for (int64_t i = normalized_keys_shape.rank() - 1;
i > physical_dimension_to_sort; --i) {
lower_dimensions *= normalized_keys_shape.dimensions(i);
}
CHECK(absl::c_binary_search(thread_local_computations_, sort->to_apply()));
llvm::Value* values = llvm_ir::EmitAllocaAtFunctionEntryWithCount(
b()->getPtrTy(), b()->getInt32(sort->operand_count()), "cc_values_alloca",
b());
llvm::Value* sizes = llvm_ir::EmitAllocaAtFunctionEntryWithCount(
b()->getInt32Ty(), b()->getInt32(sort->operand_count()),
"cc_sizes_alloca", b());
for (int64_t i = 0; i < sort->operand_count(); ++i) {
llvm::Value* slot_in_values_alloca =
ConstInBoundsGEP1_32(b()->getPtrTy(), values, i);
Store(destination_addresses[i], slot_in_values_alloca);
llvm::Value* slot_in_sizes_alloca =
ConstInBoundsGEP1_32(b()->getInt32Ty(), sizes, i);
llvm::Value* size = b()->getInt32(ShapeUtil::ByteSizeOfPrimitiveType(
sort->operand(i)->shape().element_type()));
Store(size, slot_in_sizes_alloca);
}
auto less_than_function =
FindOrDie(emitted_functions_,
ComputationToEmit{sort->to_apply(), allow_reassociation_});
EmitCallToFunc(
runtime::kKeyValueSortSymbolName,
{b()->getInt64(higher_dimensions), b()->getInt64(sort_dimension_elements),
b()->getInt64(lower_dimensions), values,
b()->getInt32(sort->operand_count()), sizes,
b()->getInt1(sort->is_stable()), GetExecutableRunOptionsArgument(),
GetProfileCountersArgument(), less_than_function},
b()->getVoidTy());
if (sort->values_count() > 0) {
llvm_ir::EmitTuple(GetIrArrayFor(sort), destination_addresses, b());
}
return absl::OkStatus();
}
absl::Status IrEmitter::HandleTuple(HloInstruction* tuple) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(tuple));
llvm::SmallVector<llvm::Value*> base_ptrs;
for (auto operand : tuple->operands()) {
base_ptrs.push_back(GetEmittedValueFor(operand));
}
llvm_ir::EmitTuple(GetIrArrayFor(tuple), base_ptrs, b());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleReduceWindow(HloInstruction* reduce_window) {
bool saved_allow_reassociation = allow_reassociation_;
allow_reassociation_ = true;
absl::Status status = DefaultAction(reduce_window);
allow_reassociation_ = saved_allow_reassociation;
return status;
}
absl::Status IrEmitter::HandleSelectAndScatter(
HloInstruction* select_and_scatter) {
CHECK_EQ(select_and_scatter->operand_count(), 3);
const auto operand = select_and_scatter->operand(0);
const auto source = select_and_scatter->operand(1);
return HandleSelectAndScatter(select_and_scatter, GetIrArrayFor(operand),
GetIrArrayFor(source),
GetIrArrayFor(select_and_scatter));
}
absl::Status IrEmitter::HandleSelectAndScatter(
HloInstruction* select_and_scatter, const llvm_ir::IrArray& operand_array,
const llvm_ir::IrArray& source_array,
const llvm_ir::IrArray& output_array) {
CHECK_EQ(select_and_scatter->operand_count(), 3);
const auto operand = select_and_scatter->operand(0);
const auto source = select_and_scatter->operand(1);
const auto init_value = select_and_scatter->operand(2);
const Window& window = select_and_scatter->window();
PrimitiveType operand_element_type = operand->shape().element_type();
const int64_t rank = operand->shape().rank();
CHECK_EQ(rank, source->shape().rank());
CHECK_EQ(rank, window.dimensions_size());
if (window_util::HasDilation(window)) {
return Unimplemented(
"Dilation for SelectAndScatter is not implemented on CPU. ");
}
TF_RETURN_IF_ERROR(EmitTargetElementLoop(
select_and_scatter, IrName(select_and_scatter, "init"),
[this, init_value](const llvm_ir::IrArray::Index& target_index) {
llvm::Value* init_value_addr = GetEmittedValueFor(init_value);
return Load(IrShapeType(init_value->shape()), init_value_addr);
},
std::optional<llvm_ir::IrArray>(output_array)));
llvm_ir::ForLoopNest source_loops(IrName(select_and_scatter), b());
const llvm_ir::IrArray::Index source_index =
source_loops.AddLoopsForShape(source->shape(), "source");
SetToFirstInsertPoint(source_loops.GetInnerLoopBodyBasicBlock(), b());
llvm::AllocaInst* selected_value_address = llvm_ir::EmitAllocaAtFunctionEntry(
llvm_ir::PrimitiveTypeToIrType(operand_element_type, module_),
"selected_value_address", b(),
MinimumAlignmentForPrimitiveType(operand_element_type));
llvm::AllocaInst* selected_index_address =
llvm_ir::EmitAllocaAtFunctionEntryWithCount(
b()->getInt64Ty(), b()->getInt32(rank), "selected_index_address",
b());
llvm::AllocaInst* initialized_flag_address =
llvm_ir::EmitAllocaAtFunctionEntry(b()->getInt1Ty(),
"initialized_flag_address", b());
Store(b()->getInt1(false), initialized_flag_address);
llvm_ir::ForLoopNest window_loops(IrName(select_and_scatter, "window"), b());
llvm::SmallVector<int64_t> window_size;
for (const auto& dim : window.dimensions()) {
window_size.push_back(dim.size());
}
const llvm_ir::IrArray::Index window_index = window_loops.AddLoopsForShape(
ShapeUtil::MakeShape(operand_element_type, window_size), "window");
SetToFirstInsertPoint(window_loops.GetInnerLoopBodyBasicBlock(), b());
llvm::SmallVector<llvm::Value*> operand_multi_index(source_index.size());
llvm::Value* in_bounds_condition = b()->getTrue();
for (int64_t i = 0; i < rank; ++i) {
llvm::Value* strided_index =
NSWMul(source_index[i], b()->getInt64(window.dimensions(i).stride()));
operand_multi_index[i] =
NSWSub(NSWAdd(strided_index, window_index[i]),
b()->getInt64(window.dimensions(i).padding_low()));
llvm::Value* index_condition =
ICmpULT(operand_multi_index[i],
b()->getInt64(ShapeUtil::GetDimension(operand->shape(), i)));
in_bounds_condition = And(in_bounds_condition, index_condition);
}
CHECK(in_bounds_condition != nullptr);
llvm_ir::LlvmIfData if_in_bounds =
llvm_ir::EmitIfThenElse(in_bounds_condition, "in-bounds", b());
SetToFirstInsertPoint(if_in_bounds.true_block, b());
llvm_ir::LlvmIfData if_initialized =
llvm_ir::EmitIfThenElse(Load(initialized_flag_address->getAllocatedType(),
initialized_flag_address),
"initialized", b());
SetToFirstInsertPoint(if_initialized.false_block, b());
const auto save_operand_index =
[&](const llvm_ir::IrArray::Index& operand_index) {
for (int64_t i = 0; i < rank; ++i) {
llvm::Value* selected_index_address_slot =
InBoundsGEP(selected_index_address->getAllocatedType(),
selected_index_address, {b()->getInt32(i)});
Store(operand_index[i], selected_index_address_slot);
}
};
llvm_ir::IrArray::Index operand_index(
operand_multi_index, operand_array.GetShape(), b()->getInt64Ty());
llvm::Value* operand_data =
operand_array.EmitReadArrayElement(operand_index, b());
Store(operand_data, selected_value_address);
save_operand_index(operand_index);
Store(b()->getInt1(true), initialized_flag_address);
SetToFirstInsertPoint(if_initialized.true_block, b());
llvm::Value* operand_address =
operand_array.EmitArrayElementAddress(operand_index, b());
llvm::Value* operand_element =
Load(operand_array.GetElementLlvmType(), operand_address);
llvm::Value* result = EmitScalarReturningThreadLocalCall(
*select_and_scatter->select(),
{Load(selected_value_address->getAllocatedType(), selected_value_address),
operand_element},
"select_function");
llvm::Value* cond = ICmpNE(
result,
llvm::ConstantInt::get(llvm_ir::PrimitiveTypeToIrType(PRED, module_), 0),
"boolean_predicate");
llvm_ir::LlvmIfData if_select_lhs =
llvm_ir::EmitIfThenElse(cond, "if-select-lhs", b());
SetToFirstInsertPoint(if_select_lhs.false_block, b());
Store(Load(operand_array.GetElementLlvmType(), operand_address),
selected_value_address);
save_operand_index(operand_index);
SetToFirstInsertPoint(window_loops.GetOuterLoopExitBasicBlock(), b());
llvm::SmallVector<llvm::Value*> selected_multi_index;
for (int64_t i = 0; i < rank; ++i) {
const std::vector<llvm::Value*> gep_index = {b()->getInt32(i)};
llvm::Value* selected_index_address_slot =
InBoundsGEP(selected_index_address->getAllocatedType(),
selected_index_address, gep_index);
llvm::Type* type = llvm::GetElementPtrInst::getIndexedType(
selected_index_address->getAllocatedType(), gep_index);
selected_multi_index.push_back(Load(type, selected_index_address_slot));
}
llvm::Value* source_value =
source_array.EmitReadArrayElement(source_index, b());
llvm_ir::IrArray::Index selected_index(
selected_multi_index, output_array.GetShape(), source_index.GetType());
llvm::Value* output_value =
output_array.EmitReadArrayElement(selected_index, b());
llvm::Value* scatter_value = EmitScalarReturningThreadLocalCall(
*select_and_scatter->scatter(), {output_value, source_value},
"scatter_function");
output_array.EmitWriteArrayElement(selected_index, scatter_value, b());
SetToFirstInsertPoint(source_loops.GetOuterLoopExitBasicBlock(), b());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleDot(HloInstruction* dot) {
auto lhs = dot->operand(0);
auto rhs = dot->operand(1);
TF_RETURN_IF_ERROR(ElementTypesSameAndSupported(
*dot, {lhs, rhs},
{PRED, S8, U8, S16, U16, S32, U32, S64, U64, F16, F32, F64, C64, C128}));
const DotDimensionNumbers& dnums = dot->dot_dimension_numbers();
if (dnums.lhs_contracting_dimensions_size() != 1) {
return Unimplemented(
"Dot with multiple contracting dimensions not implemented.");
}
llvm_ir::IrArray lhs_array(GetIrArrayFor(lhs));
llvm_ir::IrArray rhs_array(GetIrArrayFor(rhs));
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(dot));
llvm_ir::IrArray target_array = GetIrArrayFor(dot);
VLOG(2) << "HandleDot: ";
VLOG(2) << " lhs operand: "
<< llvm_ir::DumpToString(lhs_array.GetBasePointer());
VLOG(2) << " rhs operand: "
<< llvm_ir::DumpToString(rhs_array.GetBasePointer());
VLOG(2) << " target: "
<< llvm_ir::DumpToString(target_array.GetBasePointer());
return EmitDotOperation(*dot, target_array, lhs_array, rhs_array,
nullptr,
GetExecutableRunOptionsArgument(), b(),
hlo_module_config_, target_machine_features_);
}
absl::Status IrEmitter::HandleConvolution(HloInstruction* convolution) {
auto lhs = convolution->operand(0);
auto rhs = convolution->operand(1);
TF_RETURN_IF_ERROR(ElementTypesSameAndSupported(
*convolution, {lhs, rhs},
{PRED, S8, U8, S16, U16, S32, U32, S64, U64, F16, F32, F64, C64, C128}));
if (PotentiallyImplementedAsEigenConvolution(*convolution,
target_machine_features_)) {
const Shape& lhs_shape = lhs->shape();
const Shape& rhs_shape = rhs->shape();
const Shape& convolution_shape = convolution->shape();
if (LayoutUtil::IsMonotonicWithDim0Major(lhs_shape.layout()) &&
LayoutUtil::IsMonotonicWithDim0Major(rhs_shape.layout()) &&
LayoutUtil::IsMonotonicWithDim0Major(convolution_shape.layout())) {
bool one_dim_convolution = lhs_shape.dimensions_size() == 3;
llvm::Value* lhs_address = GetEmittedValueFor(lhs);
llvm::Value* rhs_address = GetEmittedValueFor(rhs);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(convolution));
const ConvolutionDimensionNumbers& dnums =
convolution->convolution_dimension_numbers();
absl::InlinedVector<int64_t, 2> input_dims;
absl::InlinedVector<int64_t, 2> kernel_dims;
absl::InlinedVector<int64_t, 2> output_dims;
if (one_dim_convolution) {
input_dims.push_back(1);
kernel_dims.push_back(1);
output_dims.push_back(1);
}
const Shape& input_shape = convolution->operand(0)->shape();
int64_t input_batch =
input_shape.dimensions(dnums.input_batch_dimension());
for (int d : dnums.input_spatial_dimensions()) {
input_dims.push_back(input_shape.dimensions(d));
}
int64_t input_channels =
input_shape.dimensions(dnums.input_feature_dimension());
const Shape& kernel_shape = convolution->operand(1)->shape();
for (int d : dnums.kernel_spatial_dimensions()) {
kernel_dims.push_back(kernel_shape.dimensions(d));
}
int64_t kernel_channels =
kernel_shape.dimensions(dnums.kernel_input_feature_dimension());
int64_t kernel_filters =
kernel_shape.dimensions(dnums.kernel_output_feature_dimension());
const Shape& convolution_shape = convolution->shape();
for (int d : dnums.output_spatial_dimensions()) {
output_dims.push_back(convolution_shape.dimensions(d));
}
const Window& window = convolution->window();
absl::InlinedVector<int64_t, 2> strides;
absl::InlinedVector<std::pair<int64_t, int64_t>, 2> padding;
absl::InlinedVector<int64_t, 2> base_dilation;
absl::InlinedVector<int64_t, 2> window_dilation;
if (one_dim_convolution) {
strides.push_back(1);
padding.push_back({0, 0});
base_dilation.push_back(1);
window_dilation.push_back(1);
}
for (const auto& d : window.dimensions()) {
strides.push_back(d.stride());
padding.push_back({d.padding_low(), d.padding_high()});
base_dilation.push_back(d.base_dilation());
window_dilation.push_back(d.window_dilation());
}
PrimitiveType primitive_type = lhs->shape().element_type();
bool multi_threaded =
hlo_module_config_.debug_options().xla_cpu_multi_thread_eigen();
bool use_mkl_dnn =
hlo_module_config_.debug_options().xla_cpu_use_mkl_dnn() &&
convolution->feature_group_count() == 1;
bool use_acl = hlo_module_config_.debug_options().xla_cpu_use_acl();
auto valid_num_dims = [](absl::Span<const int64_t> xs) {
return xs.size() >= 2 && xs.size() <= 3;
};
TF_RET_CHECK(valid_num_dims(input_dims)) << input_dims.size();
TF_RET_CHECK(valid_num_dims(kernel_dims));
TF_RET_CHECK(valid_num_dims(output_dims));
TF_RET_CHECK(valid_num_dims(strides));
TF_RET_CHECK(padding.size() >= 2 && padding.size() <= 3);
TF_RET_CHECK(valid_num_dims(base_dilation));
TF_RET_CHECK(valid_num_dims(window_dilation));
const char* fn_name;
if (input_dims.size() == 2) {
fn_name =
primitive_type == F16
? (multi_threaded
? runtime::kEigenConv2DF16SymbolName
: runtime::kEigenSingleThreadedConv2DF16SymbolName)
: (multi_threaded
? (use_mkl_dnn
? runtime::kMKLConv2DF32SymbolName
: (use_acl ? runtime::kACLConv2DF32SymbolName
: runtime::kEigenConv2DF32SymbolName))
: runtime::kEigenSingleThreadedConv2DF32SymbolName);
} else if (input_dims.size() == 3) {
fn_name =
primitive_type == F16
? (multi_threaded
? runtime::kEigenConv3DF16SymbolName
: runtime::kEigenSingleThreadedConv3DF16SymbolName)
: (multi_threaded
? runtime::kEigenConv3DF32SymbolName
: runtime::kEigenSingleThreadedConv3DF32SymbolName);
} else {
LOG(FATAL) << "Invalid number of dimensions " << input_dims.size();
}
if (!multi_threaded && use_mkl_dnn) {
LOG(WARNING) << "Using Eigen instead of MKL-DNN for single-threaded "
"convolution.";
}
std::vector<llvm::Value*> args = {
GetExecutableRunOptionsArgument(),
GetEmittedValueFor(convolution),
lhs_address,
rhs_address,
b()->getInt64(input_batch),
};
for (int64_t d : input_dims) {
args.push_back(b()->getInt64(d));
}
args.push_back(b()->getInt64(input_channels));
for (int64_t d : kernel_dims) {
args.push_back(b()->getInt64(d));
}
args.push_back(b()->getInt64(kernel_channels));
args.push_back(b()->getInt64(kernel_filters));
for (int64_t d : output_dims) {
args.push_back(b()->getInt64(d));
}
for (int64_t d : strides) {
args.push_back(b()->getInt64(d));
}
for (const auto& p : padding) {
args.push_back(b()->getInt64(p.first));
args.push_back(b()->getInt64(p.second));
}
for (int64_t d : base_dilation) {
args.push_back(b()->getInt64(d));
}
for (int64_t d : window_dilation) {
args.push_back(b()->getInt64(d));
}
args.push_back(b()->getInt64(convolution->feature_group_count()));
VLOG(1) << "Ir emitter emitted Convolution to runtime:" << fn_name;
EmitCallToFunc(fn_name, args, b()->getVoidTy(),
true,
true);
return absl::OkStatus();
}
}
return DefaultAction(convolution);
}
absl::Status IrEmitter::HandleFft(HloInstruction* fft) {
auto operand = fft->operand(0);
TF_RETURN_IF_ERROR(ElementTypesSameAndSupported(
*fft, {operand},
{F32, F64, C64, C128}));
TF_RET_CHECK(LayoutUtil::IsMonotonicWithDim0Major(operand->shape().layout()));
TF_RET_CHECK(LayoutUtil::IsMonotonicWithDim0Major(fft->shape().layout()));
VLOG(3) << "operand=" << ShapeUtil::HumanStringWithLayout(operand->shape());
VLOG(3) << "fft=" << ShapeUtil::HumanStringWithLayout(fft->shape());
llvm::Value* operand_address = GetEmittedValueFor(operand);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(fft));
const std::vector<int64_t>& fft_length = fft->fft_length();
const int fft_rank = fft_length.size();
absl::InlinedVector<int64_t, 4> operand_shape_flat(fft_rank + 1);
int64_t input_batch = 1;
int64_t input_batch_length = fft->shape().dimensions_size() - fft_rank;
for (int i = 0; i < input_batch_length; i++) {
input_batch *= operand->shape().dimensions(i);
}
operand_shape_flat[0] = input_batch;
for (int i = 0; i < fft_rank; ++i) {
operand_shape_flat[i + 1] =
operand->shape().dimensions(i + input_batch_length);
}
bool multi_threaded_eigen =
hlo_module_config_.debug_options().xla_cpu_multi_thread_eigen();
const char* fn_name = multi_threaded_eigen
? runtime::kDuccFftSymbolName
: runtime::kDuccSingleThreadedFftSymbolName;
auto* fft_lengths =
EmitGlobalForLiteral(LiteralUtil::CreateR1<int64_t>(fft_length));
auto* input_shape =
EmitGlobalForLiteral(LiteralUtil::CreateR1<int64_t>(operand_shape_flat));
EmitCallToFunc(fn_name,
{GetExecutableRunOptionsArgument(), GetEmittedValueFor(fft),
operand_address, b()->getInt32(fft->fft_type()),
b()->getInt32(operand->shape().element_type() == F64 ||
operand->shape().element_type() == C128),
b()->getInt32(fft_rank), input_shape, fft_lengths},
b()->getVoidTy(), true,
false,
true);
return absl::OkStatus();
}
absl::Status IrEmitter::HandleAllReduceSingleReplica(HloInstruction* crs) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(crs));
if (crs->operand_count() == 1) {
return EmitMemcpy(*crs->operand(0), *crs);
}
std::vector<llvm::Value*> operand_ptrs;
for (int64_t i = 0; i < crs->operand_count(); ++i) {
llvm::Value* in_ptr = GetEmittedValueFor(crs->operand(i));
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_slice,
assignment_.GetUniqueSlice(crs, {i}));
const Shape& operand_shape = crs->operand(i)->shape();
CHECK(operand_shape.IsArray())
<< "Operands to all-reduce must be arrays: " << crs->ToString();
operand_ptrs.push_back(EmitBufferPointer(out_slice, operand_shape));
MemCpy(operand_ptrs.back(), llvm::Align(1), in_ptr,
llvm::Align(1), ShapeUtil::ByteSizeOf(operand_shape));
}
llvm_ir::EmitTuple(GetIrArrayFor(crs), operand_ptrs, b());
return absl::OkStatus();
}
static bool DataTypeIsSupportedByReduceScatter(PrimitiveType datatype) {
switch (datatype) {
case PRED:
case S8:
case U8:
case S16:
case U16:
case S32:
case U32:
case S64:
case U64:
case F16:
case F32:
case F64:
case C64:
case C128:
return true;
default:
return false;
}
}
absl::Status IrEmitter::HandleAllReduceMultipleReplica(HloInstruction* crs) {
CHECK_GE(crs->operand_count(), 1);
PrimitiveType datatype = crs->operand(0)->shape().element_type();
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(crs));
if (!DataTypeIsSupportedByReduceScatter(datatype)) {
return Unimplemented("AllReduce for datatype '%s' is not supported",
primitive_util::LowercasePrimitiveTypeName(datatype));
}
if (!MatchReductionComputation(crs->to_apply()).has_value()) {
return Unimplemented("AllReduce for computation '%s' is not supported",
crs->to_apply()->ToString());
}
std::string replica_groups = ReplicaGroupsToString(crs->replica_groups());
int32_t replica_groups_size = replica_groups.size();
llvm::Value* replica_groups_v = b()->CreateGlobalStringPtr(replica_groups);
bool is_tuple = crs->operand_count() > 1;
std::vector<llvm::Value*> input_buffer_ptrs;
std::vector<llvm::Value*> output_buffer_ptrs;
if (is_tuple) {
CHECK(crs->shape().IsTuple());
for (int64_t i = 0; i < crs->operand_count(); i++) {
const HloInstruction* op = crs->operand(i);
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_slice,
assignment_.GetUniqueSlice(crs, {i}));
const Shape& operand_shape = crs->operand(i)->shape();
CHECK(operand_shape.IsArray())
<< "Operands to all-reduce must be arrays: " << crs->ToString();
output_buffer_ptrs.push_back(EmitBufferPointer(out_slice, operand_shape));
input_buffer_ptrs.push_back(GetEmittedValueFor(op));
}
} else {
Shape shape = crs->operand(0)->shape();
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice input_slice,
assignment_.GetUniqueSlice(crs->operand(0), {}));
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice output_slice,
assignment_.GetUniqueSlice(crs, {}));
input_buffer_ptrs.push_back(EmitBufferPointer(input_slice, shape));
output_buffer_ptrs.push_back(EmitBufferPointer(output_slice, shape));
}
llvm::Value* input_buffers =
EncodeArrayFunctionArguments(input_buffer_ptrs, "input_buffers", b());
llvm::Value* output_buffers =
EncodeArrayFunctionArguments(output_buffer_ptrs, "output_buffers", b());
int32_t shape_length;
TF_ASSIGN_OR_RETURN(llvm::Value * shape_ptr,
llvm_ir::EncodeSelfDescribingShapeConstant(
crs->shape(), &shape_length, b()));
bool use_global_device_ids =
Cast<HloAllReduceInstruction>(crs)->use_global_device_ids();
EmitCallToFunc(
runtime::kAllReduceSymbolName,
{GetExecutableRunOptionsArgument(),
replica_groups_v,
b()->getInt32(replica_groups_size),
b()->getInt32(static_cast<int32_t>(crs->channel_id().has_value())),
b()->getInt32(static_cast<int32_t>(use_global_device_ids)),
b()->getInt64(crs->channel_id().has_value()
? *crs->channel_id()
: crs->GetModule()->unique_id()),
b()->getInt32(
static_cast<int32_t>(*MatchReductionComputation(crs->to_apply()))),
shape_ptr,
b()->getInt32(shape_length),
b()->getInt32(crs->operand_count()),
input_buffers,
output_buffers},
b()->getVoidTy());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleAllReduce(HloInstruction* crs) {
if (hlo_module_config_.replica_count() == 1 &&
hlo_module_config_.num_partitions() == 1) {
return HandleAllReduceSingleReplica(crs);
}
return HandleAllReduceMultipleReplica(crs);
}
absl::Status IrEmitter::HandleReduceScatter(HloInstruction* rs) {
CHECK_EQ(rs->operand_count(), 1);
PrimitiveType datatype = rs->operand(0)->shape().element_type();
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(rs));
if (!DataTypeIsSupportedByReduceScatter(datatype)) {
return Unimplemented("ReduceScatter for datatype '%s' is not supported",
primitive_util::LowercasePrimitiveTypeName(datatype));
}
if (!MatchReductionComputation(rs->to_apply()).has_value()) {
return Unimplemented("ReduceScatter for computation '%s' is not supported",
rs->to_apply()->ToString());
}
std::string replica_groups = ReplicaGroupsToString(rs->replica_groups());
int32_t replica_groups_size = replica_groups.size();
llvm::Value* replica_groups_v = b()->CreateGlobalStringPtr(replica_groups);
Shape shape = rs->operand(0)->shape();
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice input_slice,
assignment_.GetUniqueSlice(rs->operand(0), {}));
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice output_slice,
assignment_.GetUniqueSlice(rs, {}));
llvm::Value* input_buffer = EmitBufferPointer(input_slice, shape);
llvm::Value* output_buffer = EmitBufferPointer(output_slice, shape);
bool use_global_device_ids =
Cast<HloReduceScatterInstruction>(rs)->use_global_device_ids();
EmitCallToFunc(
runtime::kReduceScatterSymbolName,
{GetExecutableRunOptionsArgument(),
replica_groups_v,
b()->getInt32(replica_groups_size),
b()->getInt32(static_cast<int32_t>(rs->channel_id().has_value())),
b()->getInt32(static_cast<int32_t>(use_global_device_ids)),
b()->getInt64(rs->channel_id().has_value()
? *rs->channel_id()
: rs->GetModule()->unique_id()),
b()->getInt32(
static_cast<int32_t>(*MatchReductionComputation(rs->to_apply()))),
b()->getInt32(static_cast<int32_t>(datatype)),
b()->getInt64(ShapeUtil::ElementsIn(rs->shape())),
input_buffer,
output_buffer},
b()->getVoidTy());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleAllToAll(HloInstruction* instruction) {
auto* instr = Cast<HloAllToAllInstruction>(instruction);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(instruction));
CHECK(!instr->split_dimension() && instr->shape().IsTuple())
<< "Only tuple AllToAll is supported";
std::string replica_groups =
ReplicaGroupsToString(instruction->replica_groups());
int32_t replica_groups_size = replica_groups.size();
llvm::Value* replica_groups_v = b()->CreateGlobalStringPtr(replica_groups);
int64_t buffer_size = -1;
std::vector<llvm::Value*> input_buffer_ptrs;
std::vector<llvm::Value*> output_buffer_ptrs;
for (int64_t i = 0; i < instruction->operand_count(); i++) {
const HloInstruction* op = instruction->operand(i);
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_slice,
assignment_.GetUniqueSlice(instruction, {i}));
const Shape& operand_shape = instruction->operand(i)->shape();
CHECK(operand_shape.IsArray())
<< "Operands to all-to-all must be arrays: " << instruction->ToString();
output_buffer_ptrs.push_back(EmitBufferPointer(out_slice, operand_shape));
input_buffer_ptrs.push_back(GetEmittedValueFor(op));
CHECK(buffer_size == -1 || buffer_size == out_slice.size());
buffer_size = out_slice.size();
}
llvm::Value* input_buffers =
EncodeArrayFunctionArguments(input_buffer_ptrs, "input_buffers", b());
llvm::Value* output_buffers =
EncodeArrayFunctionArguments(output_buffer_ptrs, "output_buffers", b());
EmitCallToFunc(
runtime::kAllToAllSymbolName,
{
GetExecutableRunOptionsArgument(),
b()->getInt32(
static_cast<int32_t>(instruction->channel_id().has_value())),
b()->getInt64(instruction->channel_id().has_value()
? *instruction->channel_id()
: instruction->GetModule()->unique_id()),
replica_groups_v,
b()->getInt32(replica_groups_size),
b()->getInt32(instruction->operand_count()),
b()->getInt64(buffer_size),
input_buffers,
output_buffers,
},
b()->getVoidTy());
llvm_ir::EmitTuple(GetIrArrayFor(instruction), output_buffer_ptrs, b());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleAllGather(HloInstruction* instruction) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(instruction));
std::string replica_groups =
ReplicaGroupsToString(instruction->replica_groups());
int32_t replica_groups_size = replica_groups.size();
llvm::Value* replica_groups_v = b()->CreateGlobalStringPtr(replica_groups);
std::vector<llvm::Value*> input_buffer_ptrs;
std::vector<llvm::Value*> output_buffer_ptrs;
const HloInstruction* op = instruction->operand(0);
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice in_slice,
assignment_.GetUniqueSlice(op, {}));
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_slice,
assignment_.GetUniqueSlice(instruction, {}));
const Shape& operand_shape = op->shape();
CHECK(op->shape().IsArray())
<< "Operand to all-gather must be arrays: " << instruction->ToString();
llvm::Value* output_buffer = EmitBufferPointer(out_slice, operand_shape);
llvm::Value* input_buffer = GetEmittedValueFor(op);
int64_t buffer_size = in_slice.size();
bool use_global_device_ids =
Cast<HloAllGatherInstruction>(instruction)->use_global_device_ids();
EmitCallToFunc(
runtime::kAllGatherSymbolName,
{
GetExecutableRunOptionsArgument(),
b()->getInt32(
static_cast<int32_t>(instruction->channel_id().has_value())),
b()->getInt32(static_cast<int32_t>(use_global_device_ids)),
b()->getInt64(instruction->channel_id().has_value()
? *instruction->channel_id()
: instruction->GetModule()->unique_id()),
replica_groups_v,
b()->getInt32(replica_groups_size),
b()->getInt64(buffer_size),
input_buffer,
output_buffer,
},
b()->getVoidTy());
llvm_ir::EmitTuple(GetIrArrayFor(instruction), output_buffer_ptrs, b());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleCollectivePermute(HloInstruction* crs) {
auto* instr = Cast<HloCollectivePermuteInstruction>(crs);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(instr));
std::string source_target_pairs = absl::StrJoin(
instr->source_target_pairs(), ",", absl::PairFormatter("="));
llvm::Value* source_target_pairs_v =
b()->CreateGlobalStringPtr(source_target_pairs);
Shape shape = crs->operand(0)->shape();
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice input_slice,
assignment_.GetUniqueSlice(crs->operand(0), {}));
llvm::Value* input_buffer = EmitBufferPointer(input_slice, shape);
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice output_slice,
assignment_.GetUniqueSlice(crs, {}));
llvm::Value* output_buffer = EmitBufferPointer(output_slice, shape);
EmitCallToFunc(
runtime::kCollectivePermuteSymbolName,
{GetExecutableRunOptionsArgument(),
b()->getInt32(static_cast<int32_t>(crs->channel_id().has_value())),
b()->getInt64(crs->channel_id().has_value()
? *crs->channel_id()
: crs->GetModule()->unique_id()),
b()->getInt32(ShapeUtil::ByteSizeOf(shape)),
input_buffer,
output_buffer,
source_target_pairs_v,
b()->getInt32(source_target_pairs.size())},
b()->getVoidTy());
return absl::OkStatus();
}
absl::Status IrEmitter::HandlePartitionId(HloInstruction* hlo) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(hlo));
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice output_slice,
assignment_.GetUniqueSlice(hlo, {}));
llvm::Value* output_buffer = EmitBufferPointer(output_slice, hlo->shape());
EmitCallToFunc(runtime::kPartitionIdSymbolName,
{GetExecutableRunOptionsArgument(),
output_buffer},
b()->getVoidTy());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleReplicaId(HloInstruction* hlo) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(hlo));
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice output_slice,
assignment_.GetUniqueSlice(hlo, {}));
llvm::Value* output_buffer = EmitBufferPointer(output_slice, hlo->shape());
EmitCallToFunc(runtime::kReplicaIdSymbolName,
{GetExecutableRunOptionsArgument(),
output_buffer},
b()->getVoidTy());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleParameter(HloInstruction* parameter) {
VLOG(2) << "HandleParameter: " << parameter->ToString();
return EmitTargetAddressForOp(parameter);
}
static bool ReductionPreservesLayout(const HloInstruction& reduce) {
DCHECK_EQ(reduce.opcode(), HloOpcode::kReduce);
absl::flat_hash_map<int64_t, int64_t> unreduced_dim_map;
absl::flat_hash_set<int64_t> reduced_dims(reduce.dimensions().begin(),
reduce.dimensions().end());
const Shape& operand_shape = reduce.operand(0)->shape();
const Shape& result_shape = reduce.shape();
int64_t delta = 0;
for (int64_t i = 0; i < operand_shape.dimensions_size(); i++) {
if (reduced_dims.contains(i)) {
delta++;
} else {
InsertOrDie(&unreduced_dim_map, i, i - delta);
}
}
int64_t result_dim_idx = 0;
for (int64_t operand_dim_idx = 0;
operand_dim_idx < operand_shape.dimensions_size(); operand_dim_idx++) {
int64_t operand_dim =
operand_shape.layout().minor_to_major(operand_dim_idx);
if (!reduced_dims.contains(operand_dim)) {
if (FindOrDie(unreduced_dim_map, operand_dim) !=
result_shape.layout().minor_to_major(result_dim_idx++)) {
return false;
}
}
}
CHECK_EQ(result_dim_idx, result_shape.dimensions_size());
return true;
}
IrEmitter::ReductionGenerator IrEmitter::MatchReductionGenerator(
HloComputation* function, std::string* failure_reason) const {
CHECK_EQ(function->num_parameters(), 2);
auto root_instruction = function->root_instruction();
CHECK(ShapeUtil::IsScalar(root_instruction->shape()));
if (root_instruction->operand_count() != 2) {
*failure_reason = "root instruction is not a binary operation";
return nullptr;
}
const Shape& root_shape = root_instruction->shape();
if (ShapeUtil::ElementIsComplex(root_shape)) {
*failure_reason = "complex values not supported";
return nullptr;
}
bool root_is_floating_point = ShapeUtil::ElementIsFloating(root_shape);
bool root_is_integral = ShapeUtil::ElementIsIntegral(root_shape);
bool root_is_signed = ShapeUtil::ElementIsSigned(root_shape);
auto lhs = root_instruction->operand(0);
auto rhs = root_instruction->operand(1);
auto param_0 = function->parameter_instruction(0);
auto param_1 = function->parameter_instruction(1);
if (!(lhs == param_0 && rhs == param_1) &&
!(rhs == param_0 && lhs == param_1)) {
*failure_reason =
"root instruction is not a binary operation on the incoming arguments";
return nullptr;
}
CHECK(ShapeUtil::IsScalar(lhs->shape()) && ShapeUtil::IsScalar(rhs->shape()));
switch (root_instruction->opcode()) {
default:
*failure_reason = "did not recognize root instruction opcode";
return nullptr;
case HloOpcode::kAdd:
return [root_is_integral](llvm::IRBuilder<>* b, llvm::Value* lhs,
llvm::Value* rhs) {
return root_is_integral ? b->CreateAdd(lhs, rhs)
: b->CreateFAdd(lhs, rhs);
};
case HloOpcode::kMultiply:
return [root_is_integral](llvm::IRBuilder<>* b, llvm::Value* lhs,
llvm::Value* rhs) {
return root_is_integral ? b->CreateMul(lhs, rhs)
: b->CreateFMul(lhs, rhs);
};
case HloOpcode::kAnd:
return [](llvm::IRBuilder<>* b, llvm::Value* lhs, llvm::Value* rhs) {
return b->CreateAnd(lhs, rhs);
};
case HloOpcode::kOr:
return [](llvm::IRBuilder<>* b, llvm::Value* lhs, llvm::Value* rhs) {
return b->CreateOr(lhs, rhs);
};
case HloOpcode::kXor:
return [](llvm::IRBuilder<>* b, llvm::Value* lhs, llvm::Value* rhs) {
return b->CreateXor(lhs, rhs);
};
case HloOpcode::kMaximum:
return [root_is_floating_point, root_is_signed, this](
llvm::IRBuilder<>* b, llvm::Value* lhs,
llvm::Value* rhs) -> llvm::Value* {
if (root_is_floating_point) {
return llvm_ir::EmitFloatMax(
lhs, rhs, b,
hlo_module_config_.debug_options().xla_cpu_enable_fast_min_max());
}
return b->CreateSelect(
b->CreateICmp(root_is_signed ? llvm::ICmpInst::ICMP_SGE
: llvm::ICmpInst::ICMP_UGE,
lhs, rhs),
lhs, rhs);
};
case HloOpcode::kMinimum:
return [root_is_floating_point, root_is_signed, this](
llvm::IRBuilder<>* b, llvm::Value* lhs,
llvm::Value* rhs) -> llvm::Value* {
if (root_is_floating_point) {
return llvm_ir::EmitFloatMin(
lhs, rhs, b,
hlo_module_config_.debug_options().xla_cpu_enable_fast_min_max());
}
return b->CreateSelect(
b->CreateICmp(root_is_signed ? llvm::ICmpInst::ICMP_SLE
: llvm::ICmpInst::ICMP_ULE,
lhs, rhs),
lhs, rhs);
};
}
}
IrEmitter::ShardedVectorType IrEmitter::CreateShardedVectorType(
PrimitiveType element_type, unsigned element_count) {
int vector_register_size_in_elements =
target_machine_features_.vector_register_byte_size(
*compute_function()->function()) /
ShapeUtil::ByteSizeOfPrimitiveType(element_type);
ShardedVectorType sharded_vector_type;
llvm::Type* element_ir_type =
llvm_ir::PrimitiveTypeToIrType(element_type, module_);
for (int i = 0, e = 1 + Log2Ceiling(element_count); i < e; i++) {
const unsigned current_size_fragment = 1u << i;
if (!(element_count & current_size_fragment)) {
continue;
}
if (current_size_fragment == 1) {
sharded_vector_type.push_back(element_ir_type);
continue;
}
if (current_size_fragment >= vector_register_size_in_elements) {
auto vector_type = llvm::VectorType::get(
element_ir_type, vector_register_size_in_elements, false);
sharded_vector_type.insert(
sharded_vector_type.end(),
current_size_fragment / vector_register_size_in_elements,
vector_type);
CHECK_EQ(current_size_fragment % vector_register_size_in_elements, 0);
continue;
}
sharded_vector_type.push_back(
llvm::VectorType::get(element_ir_type, current_size_fragment, false));
}
return sharded_vector_type;
}
absl::StatusOr<IrEmitter::ShardedVector>
IrEmitter::EmitInnerLoopForVectorizedReduction(
const ReductionGenerator& reduction_generator,
const llvm_ir::IrArray::Index& output_index,
const ShardedVectorType& accumulator_type, HloInstruction* init_value,
HloInstruction* arg, absl::Span<const int64_t> dimensions,
llvm::Align element_alignment) {
ShardedVector accumulator;
accumulator.reserve(accumulator_type.size());
for (auto accumulator_shard_type : accumulator_type) {
accumulator.push_back(llvm_ir::EmitAllocaAtFunctionEntry(
accumulator_shard_type, "accumulator", b(), 0));
}
llvm::Value* init_value_ssa =
Load(IrShapeType(init_value->shape()), GetEmittedValueFor(init_value));
for (llvm::Value* accumulator_shard : accumulator) {
llvm::Value* initial_value;
auto shard_type =
llvm::cast<llvm::AllocaInst>(accumulator_shard)->getAllocatedType();
if (auto vector_type = llvm::dyn_cast<llvm::VectorType>(shard_type)) {
initial_value =
VectorSplat(vector_type->getElementCount(), init_value_ssa);
} else {
initial_value = init_value_ssa;
}
AlignedStore(initial_value, accumulator_shard, element_alignment);
}
llvm_ir::ForLoopNest reduction_loop_nest(IrName(arg, "vectorized_inner"),
b());
std::vector<llvm::Value*> input_multi_index =
reduction_loop_nest.AddLoopsForShapeOnDimensions(arg->shape(), dimensions,
"reduction_dim");
SetToFirstInsertPoint(reduction_loop_nest.GetInnerLoopBodyBasicBlock(), b());
llvm_ir::IrArray arg_array(GetIrArrayFor(arg));
llvm_ir::IrArray::Index::const_iterator it = output_index.begin();
for (auto& i : input_multi_index) {
if (i == nullptr) {
i = *it++;
}
}
CHECK(output_index.end() == it);
llvm_ir::IrArray::Index input_index(input_multi_index, arg->shape(),
b()->getInt64Ty());
llvm::Value* input_address =
arg_array.EmitArrayElementAddress(input_index, b());
for (int i = 0; i < accumulator.size(); i++) {
auto alloca = llvm::cast<llvm::AllocaInst>(accumulator[i]);
auto current_accumulator_value = AlignedLoad(
alloca->getAllocatedType(), accumulator[i], element_alignment);
auto addend = AlignedLoad(alloca->getAllocatedType(), input_address,
element_alignment);
arg_array.AnnotateLoadStoreInstructionWithMetadata(addend);
auto reduced_result =
reduction_generator(b(), current_accumulator_value, addend);
AlignedStore(reduced_result, accumulator[i], element_alignment);
if (i != (accumulator.size() - 1)) {
input_address =
ConstInBoundsGEP1_32(reduced_result->getType(), input_address, 1);
}
}
SetToFirstInsertPoint(reduction_loop_nest.GetOuterLoopExitBasicBlock(), b());
ShardedVector result_ssa;
result_ssa.reserve(accumulator.size());
for (auto accumulator_shard : accumulator) {
auto alloca = llvm::cast<llvm::AllocaInst>(accumulator_shard);
result_ssa.push_back(AlignedLoad(alloca->getAllocatedType(),
accumulator_shard, element_alignment));
}
return result_ssa;
}
void IrEmitter::EmitShardedVectorStore(
llvm::Value* store_address, const std::vector<llvm::Value*>& value_to_store,
llvm::Align alignment, const llvm_ir::IrArray& containing_array) {
for (int i = 0; i < value_to_store.size(); i++) {
auto store_instruction =
AlignedStore(value_to_store[i], store_address, alignment);
containing_array.AnnotateLoadStoreInstructionWithMetadata(
store_instruction);
if (i != (value_to_store.size() - 1)) {
store_address =
ConstInBoundsGEP1_32(value_to_store[i]->getType(), store_address, 1);
}
}
}
absl::StatusOr<bool> IrEmitter::EmitVectorizedReduce(
HloInstruction* reduce, HloInstruction* arg, HloInstruction* init_value,
absl::Span<const int64_t> dimensions, HloComputation* function,
std::string* failure_reason) {
if (!reduce->shape().IsArray()) {
*failure_reason = "vectorization of variadic reduce not implemented";
return false;
}
if (!ReductionPreservesLayout(*reduce)) {
return false;
}
ReductionGenerator reduction_generator =
MatchReductionGenerator(function, failure_reason);
if (!reduction_generator) {
return false;
}
int vector_register_size_in_elements =
target_machine_features_.vector_register_byte_size(
*compute_function()->function()) /
ShapeUtil::ByteSizeOfPrimitiveType(reduce->shape().element_type());
if (vector_register_size_in_elements == 0) {
return false;
}
int vectorization_factor_in_bytes =
target_machine_features_.vectorization_factor_in_bytes();
const int vectorization_factor =
vectorization_factor_in_bytes /
ShapeUtil::ByteSizeOfPrimitiveType(reduce->shape().element_type());
bool is_reduction_over_minor_dimension = absl::c_linear_search(
dimensions, LayoutUtil::Minor(arg->shape().layout(), 0));
llvm::Align element_alignment(tsl::MathUtil::GCD<unsigned>(
ShapeUtil::ByteSizeOfPrimitiveType(reduce->shape().element_type()),
MinimumAlignmentForPrimitiveType(reduce->shape().element_type())));
if (is_reduction_over_minor_dimension) {
*failure_reason = "reduction over minor dimension not implemented";
return false;
}
CHECK(!reduce->shape().IsTuple());
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(reduce));
llvm_ir::ForLoopNest loop_nest(IrName(reduce), b());
std::vector<llvm::Value*> array_multi_index(
reduce->shape().dimensions_size());
for (int i = LayoutUtil::MinorToMajor(reduce->shape()).size() - 1; i > 0;
--i) {
int64_t dimension = LayoutUtil::Minor(reduce->shape().layout(), i);
int64_t start_index = 0;
int64_t end_index = reduce->shape().dimensions(dimension);
std::unique_ptr<llvm_ir::ForLoop> loop = loop_nest.AddLoop(
start_index, end_index, absl::StrFormat("dim.%d", dimension));
array_multi_index[dimension] = loop->GetIndVarValue();
}
int64_t innermost_dimension = LayoutUtil::Minor(reduce->shape().layout(), 0);
int64_t innermost_dimension_size =
reduce->shape().dimensions(innermost_dimension);
if (llvm::BasicBlock* innermost_body_bb =
loop_nest.GetInnerLoopBodyBasicBlock()) {
SetToFirstInsertPoint(innermost_body_bb, b());
}
auto outermost_loop_exit_block = loop_nest.GetOuterLoopExitBasicBlock();
if (innermost_dimension_size >= vectorization_factor) {
int64_t start_index = 0;
int64_t end_index = (innermost_dimension_size / vectorization_factor) *
vectorization_factor;
std::unique_ptr<llvm_ir::ForLoop> loop =
loop_nest.AddLoop(start_index, end_index, vectorization_factor,
absl::StrFormat("dim.%d", innermost_dimension));
array_multi_index[innermost_dimension] = loop->GetIndVarValue();
SetToFirstInsertPoint(loop->GetBodyBasicBlock(), b());
ShardedVectorType vector_type = CreateShardedVectorType(
reduce->shape().element_type(), vectorization_factor);
llvm_ir::IrArray::Index array_index(array_multi_index, reduce->shape(),
b()->getInt64Ty());
TF_ASSIGN_OR_RETURN(std::vector<llvm::Value*> accumulator,
EmitInnerLoopForVectorizedReduction(
reduction_generator, array_index, vector_type,
init_value, arg, dimensions, element_alignment));
llvm_ir::IrArray target_array = GetIrArrayFor(reduce);
llvm::Value* output_address =
target_array.EmitArrayElementAddress(array_index, b());
EmitShardedVectorStore(output_address, accumulator, element_alignment,
target_array);
if (auto exit_terminator = loop->GetExitBasicBlock()->getTerminator()) {
CHECK_GT(LayoutUtil::MinorToMajor(reduce->shape()).size(), 1);
b()->SetInsertPoint(exit_terminator);
} else {
CHECK_EQ(LayoutUtil::MinorToMajor(reduce->shape()).size(), 1);
b()->SetInsertPoint(loop->GetExitBasicBlock());
}
}
if (innermost_dimension_size % vectorization_factor) {
array_multi_index[innermost_dimension] =
b()->getInt64(innermost_dimension_size -
(innermost_dimension_size % vectorization_factor));
ShardedVectorType vector_type = CreateShardedVectorType(
reduce->shape().element_type(),
innermost_dimension_size % vectorization_factor);
llvm_ir::IrArray::Index array_index(array_multi_index, reduce->shape(),
b()->getInt64Ty());
llvm::IRBuilderBase::FastMathFlagGuard guard(*b());
llvm::FastMathFlags flags = b()->getFastMathFlags();
flags.setAllowReassoc(true);
b()->setFastMathFlags(flags);
TF_ASSIGN_OR_RETURN(std::vector<llvm::Value*> accumulator,
EmitInnerLoopForVectorizedReduction(
reduction_generator, array_index, vector_type,
init_value, arg, dimensions, element_alignment));
llvm_ir::IrArray target_array = GetIrArrayFor(reduce);
llvm::Value* output_address =
target_array.EmitArrayElementAddress(array_index, b());
EmitShardedVectorStore(output_address, accumulator, element_alignment,
target_array);
}
if (outermost_loop_exit_block) {
b()->SetInsertPoint(outermost_loop_exit_block);
}
return true;
}
absl::Status IrEmitter::HandleReduce(HloInstruction* reduce) {
auto arg = reduce->mutable_operand(0);
auto init_value = reduce->mutable_operand(1);
absl::Span<const int64_t> dimensions(reduce->dimensions());
HloComputation* function = reduce->to_apply();
bool saved_allow_reassociation = allow_reassociation_;
allow_reassociation_ = true;
auto cleanup = absl::MakeCleanup([saved_allow_reassociation, this]() {
allow_reassociation_ = saved_allow_reassociation;
});
if (!options::VectorizedReduceDisabled(hlo_module_config_)) {
std::string vectorization_failure_reason;
TF_ASSIGN_OR_RETURN(
bool vectorization_successful,
EmitVectorizedReduce(reduce, arg, init_value, dimensions, function,
&vectorization_failure_reason));
if (vectorization_successful) {
VLOG(1) << "Successfully vectorized reduction " << reduce->ToString()
<< "\n";
return absl::OkStatus();
} else {
VLOG(1) << "Could not vectorize reduction " << reduce->ToString() << ": "
<< vectorization_failure_reason;
}
}
return DefaultAction(reduce);
}
absl::Status IrEmitter::HandleSend(HloInstruction* send) {
return Unimplemented("Send is not implemented on CPU.");
}
absl::Status IrEmitter::HandleSendDone(HloInstruction* send_done) {
return Unimplemented("Send-done is not implemented on CPU.");
}
absl::Status IrEmitter::HandleScatter(HloInstruction*) {
return Unimplemented("Scatter is not implemented on CPUs.");
}
absl::Status IrEmitter::HandleSlice(HloInstruction* slice) {
VLOG(2) << "HandleSlice: " << slice->ToString();
auto operand = slice->operand(0);
if (ShouldEmitParallelLoopFor(*slice)) {
return DefaultAction(slice);
}
if (!LayoutUtil::Equal(operand->shape().layout(), slice->shape().layout())) {
return DefaultAction(slice);
}
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(slice));
if (ShapeUtil::IsZeroElementArray(slice->shape())) {
return absl::OkStatus();
}
const Layout& layout = operand->shape().layout();
const int64_t num_dims = operand->shape().dimensions_size();
absl::flat_hash_set<int64_t> inner_dims;
for (int64_t dim : LayoutUtil::MinorToMajor(layout)) {
if (operand->shape().dimensions(dim) != slice->shape().dimensions(dim)) {
break;
}
inner_dims.insert(dim);
}
const bool is_trivial_copy = (inner_dims.size() == num_dims);
if (is_trivial_copy) {
if (ShapeUtil::IsEffectiveScalar(slice->shape())) {
return DefaultAction(slice);
} else {
return EmitMemcpy(*slice, *operand);
}
}
const Shape logical_element_shape = ShapeUtil::FilterDimensions(
[&inner_dims](int64_t dim) { return inner_dims.contains(dim); },
operand->shape());
const int64_t primitive_elements_per_logical_element =
ShapeUtil::ElementsIn(logical_element_shape);
const int64_t memcpy_dim = LayoutUtil::Minor(layout, inner_dims.size());
const bool memcpy_is_contiguous = slice->slice_strides(memcpy_dim) == 1;
const int64_t memcpy_logical_elements =
memcpy_is_contiguous
? slice->slice_limits(memcpy_dim) - slice->slice_starts(memcpy_dim)
: 1;
llvm::SmallVector<int64_t> outer_dims;
for (int64_t i = 0; i < num_dims - inner_dims.size() - 1; ++i) {
outer_dims.push_back(LayoutUtil::Major(layout, i));
}
if (!memcpy_is_contiguous) {
outer_dims.push_back(memcpy_dim);
}
llvm_ir::IrArray target_array = GetIrArrayFor(slice);
const int64_t num_outer_loops = outer_dims.size();
llvm_ir::ForLoopNest loops(IrName(slice), b());
std::vector<llvm::Value*> target_multi_index =
loops.AddLoopsForShapeOnDimensions(slice->shape(), outer_dims, "slice");
std::replace(target_multi_index.begin(), target_multi_index.end(),
static_cast<llvm::Value*>(nullptr),
static_cast<llvm::Value*>(b()->getInt64(0)));
llvm_ir::IrArray::Index target_index(target_multi_index, slice->shape(),
b()->getInt64Ty());
if (num_outer_loops > 0) {
SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), b());
}
llvm_ir::IrArray source_array = GetIrArrayFor(operand);
const llvm_ir::IrArray::Index source_index = target_index.SourceIndexOfSlice(
operand->shape(), slice->slice_starts(),
slice->slice_strides(), b());
llvm::Value* memcpy_dest =
target_array.EmitArrayElementAddress(target_index, b(), "slice.dest");
llvm::Value* memcpy_source =
source_array.EmitArrayElementAddress(source_index, b(), "slice.source");
const int64_t memcpy_elements =
primitive_elements_per_logical_element * memcpy_logical_elements;
EmitTransferElements(memcpy_dest, memcpy_source, memcpy_elements,
slice->shape().element_type(), target_array,
source_array);
if (VLOG_IS_ON(2)) {
const int64_t memcpy_bytes =
ShapeUtil::ByteSizeOf(logical_element_shape) * memcpy_elements;
VLOG(2) << " emitted copy of " << memcpy_bytes << " bytes inside "
<< num_outer_loops << " loops";
}
if (num_outer_loops > 0) {
SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), b());
}
return absl::OkStatus();
}
absl::Status IrEmitter::HandleDynamicSlice(HloInstruction* dynamic_slice) {
if (ShapeUtil::IsScalar(dynamic_slice->shape())) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(dynamic_slice));
return EmitMemcpy(*dynamic_slice->operand(0), *dynamic_slice);
}
return DefaultAction(dynamic_slice);
}
absl::Status IrEmitter::HandleDynamicUpdateSlice(
HloInstruction* dynamic_update_slice) {
auto update = dynamic_update_slice->operand(1);
if (ShapeUtil::IsScalar(dynamic_update_slice->shape())) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(dynamic_update_slice));
return EmitMemcpy(*update, *dynamic_update_slice);
} else if (llvm_ir::CanUpdateDynamicSliceInPlace(dynamic_update_slice,
assignment_)) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(dynamic_update_slice));
auto operands = GetIrArraysForOperandsOf(dynamic_update_slice);
return llvm_ir::EmitDynamicUpdateSliceInPlace(
operands, GetIrArrayFor(dynamic_update_slice),
IrName(dynamic_update_slice, "in_place"), b());
}
return DefaultAction(dynamic_update_slice);
}
absl::Status IrEmitter::HandleRecv(HloInstruction* recv) {
return Unimplemented("Recv is not implemented on CPU.");
}
absl::Status IrEmitter::HandleRecvDone(HloInstruction* recv_done) {
return Unimplemented("Recv-done is not implemented on CPU.");
}
absl::Status IrEmitter::HandlePad(HloInstruction* pad) {
CHECK_EQ(pad->operand_count(), 2);
const auto operand = pad->operand(0);
const auto padding_value = pad->operand(1);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(pad));
return HandlePad(pad, GetIrArrayFor(operand), GetIrArrayFor(padding_value),
GetIrArrayFor(pad));
}
absl::Status IrEmitter::HandlePad(HloInstruction* pad,
const llvm_ir::IrArray& operand_array,
const llvm_ir::IrArray& padding_value_array,
const llvm_ir::IrArray& output_array) {
CHECK_EQ(pad->operand_count(), 2);
for (auto& padding_dimension : pad->padding_config().dimensions()) {
if (padding_dimension.edge_padding_low() < 0 ||
padding_dimension.edge_padding_high() < 0) {
return InternalStrCat(
"Encountered negative padding in IrEmitter on CPU. "
"This should have been eliminated at the HLO level. ",
pad->ToString());
}
}
const HloInstruction* padding_value = pad->operand(1);
const auto index_type = b()->getInt64Ty();
const auto index = llvm_ir::IrArray::Index(index_type);
llvm::Value* padding_value_addr = padding_value_array.EmitArrayElementAddress(
index, b(), "padding_value_addr", true, nullptr);
const llvm_ir::ElementGenerator element_generator =
[this, padding_value,
padding_value_addr](const llvm_ir::IrArray::Index& target_index) {
return b()->CreateLoad(IrShapeType(padding_value->shape()),
padding_value_addr);
};
TF_RETURN_IF_ERROR(EmitTargetElementLoop(
pad, "initialize", element_generator,
std::optional<const llvm_ir::IrArray>(output_array)));
llvm_ir::ForLoopNest loops(IrName(pad, "assign"), b());
const HloInstruction* operand = pad->operand(0);
const llvm_ir::IrArray::Index operand_index =
loops.AddLoopsForShape(operand->shape(), "operand");
SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), b());
llvm::Value* operand_data =
operand_array.EmitReadArrayElement(operand_index, b());
const PaddingConfig& padding_config = pad->padding_config();
std::vector<llvm::Value*> output_multi_index;
for (size_t i = 0; i < operand_index.size(); ++i) {
llvm::Value* offset =
Mul(operand_index[i],
b()->getInt64(padding_config.dimensions(i).interior_padding() + 1));
llvm::Value* index = Add(
offset, b()->getInt64(padding_config.dimensions(i).edge_padding_low()));
output_multi_index.push_back(index);
}
llvm_ir::IrArray::Index output_index(
output_multi_index, output_array.GetShape(), operand_index.GetType());
output_array.EmitWriteArrayElement(output_index, operand_data, b());
SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), b());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleFusion(HloInstruction* fusion) {
auto* root = fusion->fused_expression_root();
if (llvm_ir::CanEmitFusedDynamicUpdateSliceInPlace(fusion, assignment_)) {
VLOG(3) << "HandleFusion FusedDynamicUpdateSliceInPlace";
CpuElementalIrEmitter elemental_emitter(hlo_module_config_, this, module_);
FusedIrEmitter fused_emitter(elemental_emitter);
BindFusionArguments(fusion, &fused_emitter);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(fusion));
return llvm_ir::EmitFusedDynamicUpdateSliceInPlace(
fusion, GetIrArrayFor(fusion), &fused_emitter, b());
} else if (fusion->IsLoopFusion()) {
VLOG(3) << "HandleFusion kLoop";
CpuElementalIrEmitter elemental_emitter(hlo_module_config_, this, module_);
FusedIrEmitter fused_emitter(elemental_emitter);
BindFusionArguments(fusion, &fused_emitter);
TF_ASSIGN_OR_RETURN(auto generator, fused_emitter.GetGenerator(
*fusion->fused_expression_root()));
return EmitTargetElementLoop(fusion, "kLoop_fusion", generator,
std::nullopt);
} else if (fusion->IsOutputFusion()) {
VLOG(3) << "HandleFusion kOutput";
int64_t dot_op_index =
root->operand(0)->opcode() == HloOpcode::kDot ? 0 : 1;
const HloInstruction* dot = root->operand(dot_op_index);
CHECK_EQ(dot->opcode(), HloOpcode::kDot)
<< dot->ToString() << " "
<< fusion->fused_instructions_computation()->ToString();
int64_t dot_lhs_param_number = dot->operand(0)->parameter_number();
int64_t dot_rhs_param_number = dot->operand(1)->parameter_number();
int64_t addend_param_number =
root->operand(1 - dot_op_index)->parameter_number();
Shape target_shape = fusion->shape();
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(fusion));
llvm_ir::IrArray target_array = GetIrArrayFor(fusion);
llvm_ir::IrArray lhs_array(
GetIrArrayFor(fusion->operand(dot_lhs_param_number)));
llvm_ir::IrArray rhs_array(
GetIrArrayFor(fusion->operand(dot_rhs_param_number)));
llvm_ir::IrArray addend_array(
GetIrArrayFor(fusion->operand(addend_param_number)));
TF_RETURN_IF_ERROR(
EmitDotOperation(*dot, target_array, lhs_array, rhs_array,
&addend_array, GetExecutableRunOptionsArgument(), b(),
hlo_module_config_, target_machine_features_));
return absl::OkStatus();
} else {
return Unimplemented("Fusion kind not implemented on CPU");
}
}
absl::Status IrEmitter::HandleCall(HloInstruction* call) {
HloComputation* computation = call->to_apply();
llvm::Function* call_ir_function = FindOrDie(
emitted_functions_, ComputationToEmit{computation, allow_reassociation_});
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(call));
auto backend_config_or =
computation->root_instruction()->backend_config<BackendConfig>();
if (backend_config_or.ok() &&
!backend_config_or->outer_dimension_partitions().empty()) {
std::vector<llvm::Value*> call_args = GetArrayFunctionCallArguments(
{}, b(), computation->name(),
emitted_value_[call],
GetExecutableRunOptionsArgument(),
GetBufferTableArgument(),
GetStatusArgument(),
GetProfileCountersArgument());
HloInstruction* root = computation->root_instruction();
TF_RETURN_IF_ERROR(EmitCallToParallelForkJoin(
call_args, root->shape(),
backend_config_or->outer_dimension_partitions(), b(), call_ir_function,
computation->name()));
if (ComputationTransitivelyContainsCustomCall(computation)) {
EmitEarlyReturnIfErrorStatus();
}
} else {
EmitGlobalCall(*computation, computation->name());
}
return absl::OkStatus();
}
absl::Status IrEmitter::EmitSliceToDynamic(
const HloInstruction* hlo, absl::Span<const llvm_ir::IrArray> source_arrays,
const llvm_ir::IrArray& target_array) {
std::vector<llvm::Value*> dynamic_dims;
int32_t raw_data_size =
ShapeUtil::ByteSizeOf(ShapeUtil::MakeStaticShape(hlo->shape()));
llvm::Value* dest_buffer = target_array.GetBasePointer();
for (int64_t i = 1; i < hlo->operand_count(); ++i) {
const int64_t dim_index = i - 1;
llvm::Value* source_buffer = source_arrays[i].GetBasePointer();
llvm::LoadInst* dyn_dim_size = Load(IrShapeType(hlo->operand(i)->shape()),
source_buffer, "dyn_dim_size");
llvm::Value* metadata = b()->CreateConstInBoundsGEP1_32(
b()->getInt8Ty(), dest_buffer,
raw_data_size + dim_index * sizeof(int32_t));
b()->CreateStore(dyn_dim_size, metadata);
dynamic_dims.push_back(b()->CreateIntCast(dyn_dim_size, b()->getInt64Ty(),
true,
"i64_dyn_dim_size"));
}
auto loop_body_emitter =
[&](const llvm_ir::IrArray::Index& array_index) -> absl::Status {
llvm::Value* source_element =
source_arrays[0].EmitReadArrayElement(array_index, b());
llvm::Value* linear_index = array_index.Linearize(dynamic_dims, b());
llvm_ir::IrArray::Index dest_index(linear_index, target_array.GetShape(),
b());
target_array.EmitWriteArrayElement(dest_index, source_element, b());
return absl::OkStatus();
};
return llvm_ir::LoopEmitter(loop_body_emitter, target_array.GetShape(),
dynamic_dims, b())
.EmitLoop(IrName(hlo));
}
absl::Status IrEmitter::HandleSliceToDynamic(HloInstruction* hlo) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(hlo));
llvm_ir::IrArray target_array = GetIrArrayFor(hlo);
std::vector<llvm_ir::IrArray> source_arrays;
source_arrays.reserve(hlo->operand_count());
for (auto operand : hlo->operands()) {
source_arrays.push_back(GetIrArrayFor(operand));
}
return EmitSliceToDynamic(hlo, source_arrays, target_array);
}
absl::Status IrEmitter::HandlePadToStatic(HloInstruction* hlo) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(hlo));
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice data_slice,
assignment_.GetUniqueSlice(hlo, {0}));
std::vector<llvm::Value*> dynamic_dims;
std::vector<llvm::Value*> tuple_operand_ptrs;
const Shape& data_shape = ShapeUtil::GetSubshape(hlo->shape(), {0});
const Shape& input_shape = hlo->operand(0)->shape();
llvm::Value* data_address = EmitBufferPointer(data_slice, data_shape);
llvm::Type* data_type = IrShapeType(data_shape);
llvm_ir::IrArray data_array(data_address, data_type, data_shape);
llvm::Value* source_buffer = GetEmittedValueFor(hlo->operand(0));
int64_t raw_data_size =
ShapeUtil::ByteSizeOf(ShapeUtil::MakeStaticShape(input_shape));
tuple_operand_ptrs.push_back(data_array.GetBasePointer());
for (int i = 1; i < hlo->shape().tuple_shapes_size(); ++i) {
const Shape& dim_shape = ShapeUtil::GetSubshape(hlo->shape(), {i});
TF_RET_CHECK(Shape::Equal()(dim_shape, ShapeUtil::MakeScalarShape(S32)));
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice dim_size_slice,
assignment_.GetUniqueSlice(hlo, {i}));
llvm::Value* dest_dim_size_address =
EmitBufferPointer(dim_size_slice, data_shape);
const int64_t dim_index = i - 1;
llvm::Value* metadata = b()->CreateConstInBoundsGEP1_32(
b()->getInt8Ty(), source_buffer,
raw_data_size + dim_index * sizeof(int32_t));
llvm::Value* dyn_dim_size =
b()->CreateLoad(b()->getInt32Ty(), metadata, "dyn_dim_size");
b()->CreateStore(dyn_dim_size, dest_dim_size_address);
dynamic_dims.push_back(b()->CreateIntCast(dyn_dim_size, b()->getInt64Ty(),
true,
"i64_dyn_dim_size"));
tuple_operand_ptrs.push_back(dest_dim_size_address);
}
auto loop_body_emitter =
[&](const llvm_ir::IrArray::Index& array_index) -> absl::Status {
llvm::Value* linear_index = array_index.Linearize(dynamic_dims, b());
llvm_ir::IrArray::Index source_index(linear_index, input_shape, b());
llvm::Value* source_element =
GetIrArrayFor(hlo->operand(0)).EmitReadArrayElement(source_index, b());
data_array.EmitWriteArrayElement(array_index, source_element, b());
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(
llvm_ir::LoopEmitter(loop_body_emitter, input_shape, dynamic_dims, b())
.EmitLoop(IrName(hlo)));
llvm_ir::EmitTuple(GetIrArrayFor(hlo), tuple_operand_ptrs, b());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleTopK(HloInstruction* hlo) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(hlo));
const HloInstruction* input = hlo->operand(0);
const int64_t k = hlo->shape().tuple_shapes(0).dimensions().back();
const bool has_batch = hlo->shape().tuple_shapes(0).dimensions_size() == 2;
TF_RET_CHECK(input->shape().element_type() == F32) << hlo->ToString();
TF_RET_CHECK(LayoutUtil::IsMonotonicWithDim0Major(
hlo->shape().tuple_shapes(0).layout()))
<< hlo->ToString();
TF_RET_CHECK(LayoutUtil::IsMonotonicWithDim0Major(
hlo->shape().tuple_shapes(1).layout()))
<< hlo->ToString();
TF_RET_CHECK(
LayoutUtil::IsMonotonicWithDim0Major(hlo->operand(0)->shape().layout()))
<< hlo->ToString();
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice values_slice,
assignment_.GetUniqueSlice(hlo->operand(0), {}));
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_values_slice,
assignment_.GetUniqueSlice(hlo, {0}));
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_indices_slice,
assignment_.GetUniqueSlice(hlo, {1}));
llvm::Value* values_ptr =
EmitBufferPointer(values_slice, hlo->operand(0)->shape());
llvm::Value* out_values_ptr =
EmitBufferPointer(out_values_slice, hlo->shape().tuple_shapes(0));
llvm::Value* out_indices_ptr =
EmitBufferPointer(out_indices_slice, hlo->shape().tuple_shapes(1));
EmitCallToFunc(
runtime::kTopKF32SymbolName,
{b()->getInt64(has_batch ? input->shape().dimensions(0) : 1),
b()->getInt64(input->shape().dimensions().back()), b()->getInt64(k),
values_ptr, out_values_ptr, out_indices_ptr},
b()->getVoidTy());
llvm_ir::EmitTuple(GetIrArrayFor(hlo), {out_values_ptr, out_indices_ptr},
b());
return absl::OkStatus();
}
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
std::vector<StackAlloca> IrEmitter::EmitOneDnnOperandsAlloca(
HloInstruction* custom_call, llvm::Value*& args_val, int& arg_indx) {
std::vector<StackAlloca> operands_stack_alloca;
const int num_operands = custom_call->operand_count();
operands_stack_alloca.reserve(num_operands);
for (int i = 0; i < num_operands; ++i) {
llvm_ir::IrArray ir_array(GetIrArrayFor(custom_call->operand(i)));
StackAlloca stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), ir_array);
args_val = b()->CreateInsertValue(args_val, stack_alloca.value, arg_indx++);
operands_stack_alloca.push_back(std::move(stack_alloca));
}
return operands_stack_alloca;
}
absl::Status IrEmitter::HandleOneDnnMatMulCalls(
HloInstruction* custom_call, std::string runtime_symbol_name) {
const int nargs_offset = 3;
const int num_operands = custom_call->operand_count();
const int nargs = nargs_offset + num_operands;
int arg_indx = 0;
llvm::Type* i64_type = b()->getInt64Ty();
llvm::Type* ptr_type = b()->getPtrTy();
llvm::ArrayType* ptr_array_type = llvm::ArrayType::get(ptr_type, nargs);
llvm::Value* args_val = llvm::UndefValue::get(ptr_array_type);
llvm::Value* nargs_val = b()->getInt64(nargs);
llvm::Value* nargs_ptr =
llvm_ir::EmitAllocaAtFunctionEntry(i64_type, "nargs", b());
b()->CreateLifetimeStart(nargs_ptr, b()->getInt64(-1));
b()->CreateStore(nargs_val, nargs_ptr);
args_val = b()->CreateInsertValue(args_val, nargs_ptr, arg_indx++);
llvm::Value* run_opts_val = GetExecutableRunOptionsArgument();
args_val = b()->CreateInsertValue(args_val, run_opts_val, arg_indx++);
auto typed_custom_call = Cast<HloCustomCallInstruction>(custom_call);
auto backend_config = typed_custom_call->backend_config<BackendConfig>();
OneDnnMatMulConfig matmul_config;
matmul_config.CopyFrom(backend_config->onednn_matmul_config());
std::string str_config;
matmul_config.SerializeToString(&str_config);
llvm::Value* matmul_config_val =
b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(str_config));
args_val = b()->CreateInsertValue(args_val, matmul_config_val, arg_indx++);
auto operands_stack_alloca =
EmitOneDnnOperandsAlloca(custom_call, args_val, arg_indx);
TF_RET_CHECK(nargs == arg_indx)
<< "Number of arguments don't equal the last argument index.";
llvm::Value* args_ptr =
llvm_ir::EmitAllocaAtFunctionEntry(ptr_array_type, "matmul.args", b());
b()->CreateLifetimeStart(args_ptr, b()->getInt64(-1));
b()->CreateStore(args_val, args_ptr);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(custom_call));
StackAlloca result_stack_alloca;
StackAlloca scratch_stack_alloca;
std::vector<llvm::Value*> fn_call_args;
fn_call_args.reserve(3);
const bool use_scratchpad = custom_call->shape().IsTuple();
if (use_scratchpad) {
llvm::Value* result_slice_ptr;
llvm::Value* scratch_slice_ptr;
llvm_ir::IrArray result_array;
llvm_ir::IrArray scratch_array;
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice result_slice,
assignment_.GetUniqueSlice(custom_call, {0}));
const Shape& result_shape = custom_call->shape().tuple_shapes(0);
result_slice_ptr = EmitBufferPointer(result_slice, result_shape);
llvm::Type* ir_type = IrShapeType(result_shape);
result_array = llvm_ir::IrArray(result_slice_ptr, ir_type, result_shape);
result_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), result_array);
fn_call_args.push_back(result_stack_alloca.value);
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice scratch_slice,
assignment_.GetUniqueSlice(custom_call, {1}));
const Shape& scratch_shape = custom_call->shape().tuple_shapes(1);
scratch_slice_ptr = EmitBufferPointer(scratch_slice, scratch_shape);
llvm::Type* scratch_type = IrShapeType(scratch_shape);
scratch_array =
llvm_ir::IrArray(scratch_slice_ptr, scratch_type, scratch_shape);
scratch_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), scratch_array);
fn_call_args.push_back(scratch_stack_alloca.value);
llvm_ir::EmitTuple(GetIrArrayFor(custom_call),
{result_slice_ptr, scratch_slice_ptr}, b());
} else {
llvm_ir::IrArray result_array;
result_array = GetIrArrayFor(custom_call);
result_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), result_array);
fn_call_args.push_back(result_stack_alloca.value);
fn_call_args.push_back(llvm::ConstantPointerNull::get(b()->getPtrTy()));
}
fn_call_args.push_back(args_ptr);
EmitCallToFunc(std::move(runtime_symbol_name), fn_call_args,
b()->getVoidTy());
b()->CreateLifetimeEnd(nargs_ptr, b()->getInt64(-1));
b()->CreateLifetimeEnd(args_ptr, b()->getInt64(-1));
for (auto& alloca : operands_stack_alloca) {
alloca.EmitLifetimeEnd();
}
result_stack_alloca.EmitLifetimeEnd();
if (use_scratchpad) {
scratch_stack_alloca.EmitLifetimeEnd();
}
return absl::OkStatus();
}
absl::Status IrEmitter::HandleOneDnnConvolution(HloInstruction* custom_call) {
const int nargs_offset = 3;
const int num_operands = custom_call->operand_count();
const int nargs = nargs_offset + num_operands;
int arg_indx = 0;
llvm::Type* i64_type = b()->getInt64Ty();
llvm::Type* ptr_type = b()->getPtrTy();
llvm::ArrayType* ptr_array_type = llvm::ArrayType::get(ptr_type, nargs);
llvm::Value* args_val = llvm::UndefValue::get(ptr_array_type);
llvm::Value* nargs_val = b()->getInt64(nargs);
llvm::Value* nargs_ptr =
llvm_ir::EmitAllocaAtFunctionEntry(i64_type, "nargs", b());
b()->CreateLifetimeStart(nargs_ptr, b()->getInt64(-1));
b()->CreateStore(nargs_val, nargs_ptr);
args_val = b()->CreateInsertValue(args_val, nargs_ptr, arg_indx++);
llvm::Value* run_opts_val = GetExecutableRunOptionsArgument();
args_val = b()->CreateInsertValue(args_val, run_opts_val, arg_indx++);
auto typed_custom_call = Cast<HloCustomCallInstruction>(custom_call);
auto backend_config = typed_custom_call->backend_config<BackendConfig>();
OneDnnConvolutionConfig conv_config;
conv_config.CopyFrom(backend_config->onednn_conv_config());
std::string str_config;
conv_config.SerializeToString(&str_config);
llvm::Value* conv_config_val =
b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(str_config));
args_val = b()->CreateInsertValue(args_val, conv_config_val, arg_indx++);
auto operands_stack_alloca =
EmitOneDnnOperandsAlloca(custom_call, args_val, arg_indx);
TF_RET_CHECK(nargs == arg_indx)
<< "Number of arguments don't equal the last argument index.";
llvm::Value* args_ptr = llvm_ir::EmitAllocaAtFunctionEntry(
ptr_array_type, "convolution.args", b());
b()->CreateLifetimeStart(args_ptr, b()->getInt64(-1));
b()->CreateStore(args_val, args_ptr);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(custom_call));
llvm_ir::IrArray result_array = GetIrArrayFor(custom_call);
auto result_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), result_array);
EmitCallToFunc(runtime::kOneDnnConvolutionSymbolName,
{result_stack_alloca.value, args_ptr}, b()->getVoidTy());
b()->CreateLifetimeEnd(nargs_ptr, b()->getInt64(-1));
for (int i = 0; i < num_operands; ++i) {
operands_stack_alloca[i].EmitLifetimeEnd();
}
b()->CreateLifetimeEnd(args_ptr, b()->getInt64(-1));
result_stack_alloca.EmitLifetimeEnd();
return absl::OkStatus();
}
absl::Status IrEmitter::HandleOneDnnLayerNorm(HloInstruction* custom_call) {
const int nargs_offset = 3;
const int num_operands = custom_call->operand_count();
const int nargs = nargs_offset + num_operands;
int arg_indx = 0;
llvm::Type* i64_type = b()->getInt64Ty();
llvm::Type* ptr_type = b()->getPtrTy();
llvm::ArrayType* ptr_array_type = llvm::ArrayType::get(ptr_type, nargs);
llvm::Value* args_val = llvm::UndefValue::get(ptr_array_type);
llvm::Value* nargs_val = b()->getInt64(nargs);
llvm::Value* nargs_ptr =
llvm_ir::EmitAllocaAtFunctionEntry(i64_type, "nargs", b());
b()->CreateLifetimeStart(nargs_ptr, b()->getInt64(-1));
b()->CreateStore(nargs_val, nargs_ptr);
args_val = b()->CreateInsertValue(args_val, nargs_ptr, arg_indx++);
llvm::Value* run_opts_val = GetExecutableRunOptionsArgument();
args_val = b()->CreateInsertValue(args_val, run_opts_val, arg_indx++);
auto typed_custom_call = Cast<HloCustomCallInstruction>(custom_call);
auto backend_config = typed_custom_call->backend_config<BackendConfig>();
OneDnnNormConfig ln_config;
ln_config.CopyFrom(backend_config->onednn_layer_norm_config());
std::string str_config;
ln_config.SerializeToString(&str_config);
llvm::Value* ln_config_val =
b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(str_config));
args_val = b()->CreateInsertValue(args_val, ln_config_val, arg_indx++);
auto operands_stack_alloca =
EmitOneDnnOperandsAlloca(custom_call, args_val, arg_indx);
TF_RET_CHECK(nargs == arg_indx)
<< "Number of arguments don't equal the last argument index.";
llvm::Value* args_ptr =
llvm_ir::EmitAllocaAtFunctionEntry(ptr_array_type, "layernorm.args", b());
b()->CreateLifetimeStart(args_ptr, b()->getInt64(-1));
b()->CreateStore(args_val, args_ptr);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(custom_call));
llvm_ir::IrArray result_array = GetIrArrayFor(custom_call);
auto result_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), result_array);
EmitCallToFunc(runtime::kOneDnnLayerNormSymbolName,
{result_stack_alloca.value, args_ptr}, b()->getVoidTy());
b()->CreateLifetimeEnd(nargs_ptr, b()->getInt64(-1));
for (int i = 0; i < num_operands; ++i) {
operands_stack_alloca[i].EmitLifetimeEnd();
}
b()->CreateLifetimeEnd(args_ptr, b()->getInt64(-1));
result_stack_alloca.EmitLifetimeEnd();
return absl::OkStatus();
}
absl::Status IrEmitter::HandleOneDnnSoftmax(HloInstruction* custom_call) {
auto typed_custom_call = Cast<HloCustomCallInstruction>(custom_call);
auto backend_config = typed_custom_call->backend_config<BackendConfig>();
OneDnnSoftmaxConfig softmax_config;
softmax_config.CopyFrom(backend_config->onednn_softmax_config());
std::string str_config;
softmax_config.SerializeToString(&str_config);
llvm::Value* softmax_config_val =
b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(str_config));
auto input = custom_call->operand(0);
llvm_ir::IrArray input_array(GetIrArrayFor(input));
auto input_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), input_array);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(custom_call));
llvm_ir::IrArray result_array = GetIrArrayFor(custom_call);
auto result_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), result_array);
EmitCallToFunc(runtime::kOneDnnSoftmaxSymbolName,
{GetExecutableRunOptionsArgument(), input_stack_alloca.value,
result_stack_alloca.value, softmax_config_val},
b()->getVoidTy());
input_stack_alloca.EmitLifetimeEnd();
result_stack_alloca.EmitLifetimeEnd();
return absl::OkStatus();
}
#endif
absl::Status IrEmitter::HandleCustomCall(HloInstruction* custom_call) {
if (custom_call->custom_call_target() == "PadToStatic") {
return HandlePadToStatic(custom_call);
}
if (custom_call->custom_call_target() == "SliceToDynamic") {
return HandleSliceToDynamic(custom_call);
}
if (custom_call->custom_call_target() == "TopK") {
return HandleTopK(custom_call);
}
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
if (custom_call->custom_call_target() == "__onednn$matmul") {
return HandleOneDnnMatMulCalls(custom_call,
runtime::kOneDnnMatMulSymbolName);
}
if (custom_call->custom_call_target() == "__onednn$softmax") {
return HandleOneDnnSoftmax(custom_call);
}
if (custom_call->custom_call_target() == "__onednn$layernorm") {
return HandleOneDnnLayerNorm(custom_call);
}
if (custom_call->custom_call_target() == "__onednn$convolution") {
return HandleOneDnnConvolution(custom_call);
}
if (custom_call->custom_call_target() == "__onednn$matmul_reorder") {
return HandleOneDnnMatMulCalls(custom_call,
runtime::kOneDnnMatMulReorderSymbolName);
}
#endif
absl::Span<HloInstruction* const> operands(custom_call->operands());
auto typed_custom_call = Cast<HloCustomCallInstruction>(custom_call);
auto is_typed_ffi = typed_custom_call->api_version() ==
CustomCallApiVersion::API_VERSION_TYPED_FFI;
std::vector<llvm::Value*> operand_values;
operand_values.reserve(operands.size());
for (int64_t i = 0; i < operands.size(); ++i) {
HloInstruction* operand = operands[i];
if (is_typed_ffi) {
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
operand->shape(), [&](const Shape& shape, const ShapeIndex& index) {
if (!shape.IsArray()) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice slice,
assignment_.GetUniqueSlice(operand, index));
operand_values.push_back(EmitBufferPointer(slice, shape));
return absl::OkStatus();
}));
} else {
operand_values.push_back(GetEmittedValueFor(operand));
}
}
llvm::AllocaInst* operands_alloca =
llvm_ir::EmitAllocaAtFunctionEntryWithCount(
b()->getPtrTy(), b()->getInt32(operand_values.size()),
"cc_operands_alloca", b());
if (emit_code_for_msan_) {
const llvm::DataLayout& dl = module_->getDataLayout();
llvm::Type* intptr_type = b()->getIntPtrTy(dl);
EmitCallToFunc("__msan_unpoison",
{operands_alloca,
llvm::ConstantInt::get(
intptr_type, *operands_alloca->getAllocationSize(dl))},
b()->getVoidTy());
}
for (int64_t i = 0; i < operand_values.size(); ++i) {
llvm::Value* slot_in_operands_alloca =
InBoundsGEP(operands_alloca->getAllocatedType(), operands_alloca,
{b()->getInt64(i)});
Store(operand_values[i], slot_in_operands_alloca);
}
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(custom_call));
std::vector<llvm::Value*> tuple_ptrs;
if (custom_call->shape().IsTuple()) {
for (int i = 0; i < ShapeUtil::TupleElementCount(custom_call->shape());
++i) {
const Shape& elem_shape =
ShapeUtil::GetTupleElementShape(custom_call->shape(), i);
if (!is_typed_ffi) {
TF_RET_CHECK(!elem_shape.IsTuple()) << "Nested tuples not implemented";
}
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice slice,
assignment_.GetUniqueSlice(custom_call, {i}));
tuple_ptrs.push_back(EmitBufferPointer(slice, elem_shape));
}
llvm_ir::EmitTuple(GetIrArrayFor(custom_call), tuple_ptrs, b());
}
auto* output_address = GetEmittedValueFor(custom_call);
switch (typed_custom_call->api_version()) {
case CustomCallApiVersion::API_VERSION_ORIGINAL:
EmitCallToFunc(custom_call->custom_call_target(),
{output_address, operands_alloca}, b()->getVoidTy());
break;
case CustomCallApiVersion::API_VERSION_STATUS_RETURNING:
EmitCallToFunc(custom_call->custom_call_target(),
{output_address, operands_alloca, GetStatusArgument()},
b()->getVoidTy());
EmitEarlyReturnIfErrorStatus();
break;
case CustomCallApiVersion::API_VERSION_STATUS_RETURNING_UNIFIED: {
absl::string_view opaque = typed_custom_call->opaque();
EmitCallToFunc(custom_call->custom_call_target(),
{output_address, operands_alloca,
b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(opaque)),
b()->getInt64(opaque.size()), GetStatusArgument()},
b()->getVoidTy());
EmitEarlyReturnIfErrorStatus();
break;
}
case CustomCallApiVersion::API_VERSION_TYPED_FFI: {
std::vector<llvm::Value*> buffer_ptrs;
if (custom_call->shape().IsTuple()) {
buffer_ptrs.reserve(ShapeUtil::TupleElementCount(custom_call->shape()));
}
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
custom_call->shape(),
[&](const Shape& shape, const ShapeIndex& index) {
if (!shape.IsArray()) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice slice,
assignment_.GetUniqueSlice(custom_call, index));
buffer_ptrs.push_back(EmitBufferPointer(slice, shape));
return absl::OkStatus();
}));
llvm::AllocaInst* results_alloca =
llvm_ir::EmitAllocaAtFunctionEntryWithCount(
b()->getPtrTy(), b()->getInt32(buffer_ptrs.size()),
"ffi_results_alloca", b());
if (emit_code_for_msan_) {
const llvm::DataLayout& dl = module_->getDataLayout();
llvm::Type* intptr_type = b()->getIntPtrTy(dl);
EmitCallToFunc(
"__msan_unpoison",
{results_alloca,
llvm::ConstantInt::get(intptr_type,
*results_alloca->getAllocationSize(dl))},
b()->getVoidTy());
}
for (int i = 0; i < buffer_ptrs.size(); ++i) {
llvm::Value* tuple_slot_in_results_alloca =
InBoundsGEP(results_alloca->getAllocatedType(), results_alloca,
{b()->getInt64(i)});
Store(buffer_ptrs[i], tuple_slot_in_results_alloca);
}
EmitCallToFfi(typed_custom_call, results_alloca, operands_alloca);
EmitEarlyReturnIfErrorStatus();
break;
}
default:
return Internal(
"Unknown custom-call API version enum value: %d (%s)",
typed_custom_call->api_version(),
CustomCallApiVersion_Name(typed_custom_call->api_version()));
}
return absl::OkStatus();
}
absl::Status IrEmitter::HandleWhile(HloInstruction* xla_while) {
HloComputation* condition = xla_while->while_condition();
TF_RET_CHECK(ShapeUtil::IsScalar(condition->root_instruction()->shape()) &&
condition->root_instruction()->shape().element_type() == PRED)
<< "While condition computation must return bool; got: "
<< ShapeUtil::HumanString(condition->root_instruction()->shape());
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
xla_while->shape(),
[this, &xla_while](const Shape& ,
const ShapeIndex& index) -> absl::Status {
auto check = [this](const HloInstruction* a, const HloInstruction* b,
const ShapeIndex& index) -> absl::Status {
const BufferAllocation::Slice slice_a =
assignment_.GetUniqueSlice(a, index).value();
const BufferAllocation::Slice slice_b =
assignment_.GetUniqueSlice(b, index).value();
if (slice_a != slice_b) {
return Internal(
"instruction %s %s does not share slice with "
"instruction %s %s",
a->ToString(), slice_a.ToString(), b->ToString(),
slice_b.ToString());
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(check(xla_while, xla_while->operand(0), index));
TF_RETURN_IF_ERROR(check(
xla_while, xla_while->while_condition()->parameter_instruction(0),
index));
TF_RETURN_IF_ERROR(
check(xla_while, xla_while->while_body()->parameter_instruction(0),
index));
TF_RETURN_IF_ERROR(check(
xla_while, xla_while->while_body()->root_instruction(), index));
return absl::OkStatus();
}));
const HloInstruction* init = xla_while->operand(0);
emitted_value_[xla_while] = GetEmittedValueFor(init);
llvm::BasicBlock* header_bb = llvm::BasicBlock::Create(
module_->getContext(), IrName(xla_while, "header"),
compute_function()->function());
Br(header_bb);
b()->SetInsertPoint(header_bb);
EmitGlobalCall(*xla_while->while_condition(), IrName(xla_while, "cond"));
llvm::Value* while_predicate = ICmpNE(
Load(IrShapeType(
xla_while->while_condition()->root_instruction()->shape()),
GetBufferForGlobalCallReturnValue(*xla_while->while_condition())),
llvm::ConstantInt::get(llvm_ir::PrimitiveTypeToIrType(PRED, module_), 0));
llvm::BasicBlock* body_bb =
llvm::BasicBlock::Create(module_->getContext(), IrName(xla_while, "body"),
compute_function()->function());
llvm::BasicBlock* exit_bb = llvm::BasicBlock::Create(
module_->getContext(), IrName(xla_while, "exit"));
CondBr(while_predicate, body_bb, exit_bb);
b()->SetInsertPoint(body_bb);
EmitGlobalCall(*xla_while->while_body(), IrName(xla_while, "body"));
Br(header_bb);
llvm::Function* llvm_fn = compute_function()->function();
llvm_fn->insert(llvm_fn->end(), exit_bb);
b()->SetInsertPoint(exit_bb);
return absl::OkStatus();
}
absl::Status IrEmitter::EmitFastConcatenate(
const HloInstruction* instr,
absl::Span<const llvm_ir::IrArray> source_arrays,
const llvm_ir::IrArray& target_array) {
return ::xla::cpu::EmitFastConcatenate(instr, source_arrays, target_array,
module_, *b());
}
absl::Status EmitFastConcatenate(
const HloInstruction* instr,
absl::Span<const llvm_ir::IrArray> source_arrays,
const llvm_ir::IrArray& target_array, llvm::Module* module,
llvm::IRBuilder<>& b) {
auto* concatenate = Cast<HloConcatenateInstruction>(instr);
const Shape& output_shape = concatenate->shape();
int64_t concat_dim = concatenate->concatenate_dimension();
const Layout& output_layout = output_shape.layout();
auto output_min2maj = LayoutUtil::MinorToMajor(output_layout);
auto concat_dim_layout_itr = absl::c_find(output_min2maj, concat_dim);
std::vector<int64_t> inner_dims(output_min2maj.begin(),
concat_dim_layout_itr);
std::vector<int64_t> outer_dims(std::next(concat_dim_layout_itr),
output_min2maj.end());
llvm_ir::ForLoopNest loops(IrName(concatenate), &b);
std::vector<llvm::Value*> target_multi_index =
loops.AddLoopsForShapeOnDimensions(output_shape, outer_dims, "concat");
absl::c_replace(target_multi_index, static_cast<llvm::Value*>(nullptr),
static_cast<llvm::Value*>(b.getInt64(0)));
llvm_ir::IrArray::Index target_index(target_multi_index, output_shape,
b.getInt64Ty());
if (!outer_dims.empty()) {
SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &b);
}
PrimitiveType primitive_type = output_shape.element_type();
unsigned primitive_type_size =
ShapeUtil::ByteSizeOfPrimitiveType(primitive_type);
llvm::Value* target_region_begin =
target_array.EmitArrayElementAddress(target_index, &b, "target_region");
int64_t byte_offset_into_target_region = 0;
int64_t inner_dims_product = absl::c_accumulate(
inner_dims, int64_t{1}, [&](int64_t product, int64_t inner_dim) {
return product * output_shape.dimensions(inner_dim);
});
for (int64_t i = 0; i < source_arrays.size(); ++i) {
const Shape& input_shape = concatenate->operand(i)->shape();
const llvm_ir::IrArray& source_array = source_arrays[i];
llvm_ir::IrArray::Index source_index(target_multi_index, input_shape,
b.getInt64Ty());
llvm::Value* copy_source_address =
source_array.EmitArrayElementAddress(source_index, &b, "src_addr");
llvm::Value* copy_target_address =
b.CreateGEP(b.getInt8Ty(), target_region_begin,
b.getInt64(byte_offset_into_target_region));
::xla::cpu::EmitTransferElements(
copy_target_address, copy_source_address,
inner_dims_product * input_shape.dimensions(concat_dim), primitive_type,
target_array, source_array, module, b);
byte_offset_into_target_region += inner_dims_product *
input_shape.dimensions(concat_dim) *
primitive_type_size;
}
if (!outer_dims.empty()) {
SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &b);
}
return absl::OkStatus();
}
llvm::Value* IrEmitter::EmitPrintf(absl::string_view fmt,
absl::Span<llvm::Value* const> arguments) {
std::vector<llvm::Value*> call_args;
call_args.push_back(b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(fmt)));
absl::c_copy(arguments, std::back_inserter(call_args));
return b()->CreateCall(
b()->GetInsertBlock()->getParent()->getParent()->getOrInsertFunction(
"printf",
llvm::FunctionType::get(b()->getInt32Ty(), {b()->getPtrTy()},
true)),
call_args);
}
llvm::Value* IrEmitter::EmitPrintfToStderr(
absl::string_view fmt, absl::Span<llvm::Value* const> arguments) {
std::vector<llvm::Value*> call_args;
call_args.push_back(b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(fmt)));
absl::c_copy(arguments, std::back_inserter(call_args));
return b()->CreateCall(
b()->GetInsertBlock()->getParent()->getParent()->getOrInsertFunction(
runtime::kPrintfToStderrSymbolName,
llvm::FunctionType::get(b()->getInt32Ty(), {b()->getPtrTy()},
true)),
call_args);
}
llvm::Value* IrEmitter::EmitCallToFunc(
std::string func_name, const std::vector<llvm::Value*>& arguments,
llvm::Type* return_type, bool does_not_throw, bool only_accesses_arg_memory,
bool only_accesses_inaccessible_mem_or_arg_mem) {
std::vector<llvm::Type*> types;
types.reserve(arguments.size());
absl::c_transform(arguments, std::back_inserter(types),
[&](llvm::Value* val) { return val->getType(); });
llvm::FunctionType* func_type =
llvm::FunctionType::get(return_type, types, false);
auto func = llvm::dyn_cast<llvm::Function>(
module_->getOrInsertFunction(func_name, func_type).getCallee());
func->setCallingConv(llvm::CallingConv::C);
if (does_not_throw) {
func->setDoesNotThrow();
}
if (only_accesses_arg_memory) {
func->setOnlyAccessesArgMemory();
}
if (only_accesses_inaccessible_mem_or_arg_mem) {
func->setOnlyAccessesInaccessibleMemOrArgMem();
}
return b()->CreateCall(func, arguments);
}
template <typename T>
static const Shape& GetShape(T&& arg) {
if constexpr (std::is_convertible_v<absl::remove_cvref_t<decltype(arg)>,
Shape>) {
return arg;
} else {
return arg->shape();
}
};
struct EncodedInfo {
llvm::AllocaInst* alloca;
int64_t size;
};
template <typename Args>
static EncodedInfo StoreEncodedTypes(std::string_view alloca_name,
const Args& args, llvm::IRBuilder<>& ir) {
int64_t total_elements = 0;
for (int64_t i = 0; i < args.size(); ++i) {
total_elements += ShapeUtil::GetLeafCount(GetShape(args[i]));
}
llvm::AllocaInst* types_alloca = llvm_ir::EmitAllocaAtFunctionEntryWithCount(
ir.getInt32Ty(), ir.getInt64(total_elements), alloca_name, &ir);
int64_t element_id = 0;
auto store_type = [&](const Shape& shape, const ShapeIndex& index) {
if (shape.IsTuple()) {
return;
}
llvm::Value* slot_in_types_alloca = ir.CreateConstInBoundsGEP1_32(
ir.getInt32Ty(), types_alloca, element_id++);
ir.CreateStore(ir.getInt32(shape.element_type()), slot_in_types_alloca);
};
for (int64_t i = 0; i < args.size(); ++i) {
ShapeUtil::ForEachSubshape(GetShape(args[i]), store_type);
}
CHECK_EQ(element_id, total_elements);
return {types_alloca, total_elements};
};
template <typename Args>
static EncodedInfo StoreEncodedShapes(std::string_view alloca_name,
const Args& args, llvm::IRBuilder<>& ir) {
int64_t total_dims = 0;
int64_t total_dim_counts = 0;
for (int64_t i = 0; i < args.size(); ++i) {
ShapeUtil::ForEachSubshape(
GetShape(args[i]), [&](const Shape& shape, const ShapeIndex& index) {
if (!shape.IsArray()) {
return;
}
total_dims += shape.dimensions().size();
++total_dim_counts;
});
}
int64_t shapes_encoding_size = total_dim_counts
+ total_dims;
llvm::AllocaInst* shapes_alloca = llvm_ir::EmitAllocaAtFunctionEntryWithCount(
ir.getInt64Ty(), ir.getInt64(shapes_encoding_size), alloca_name, &ir);
int64_t slot_id = 0;
auto store_shape = [&](const Shape& shape, const ShapeIndex& index) {
if (!shape.IsArray()) {
return;
}
llvm::Value* alloca_slot = ir.CreateConstInBoundsGEP1_64(
ir.getInt64Ty(), shapes_alloca, slot_id++);
ir.CreateStore(ir.getInt64(shape.dimensions().size()), alloca_slot);
for (int64_t dim : shape.dimensions()) {
alloca_slot = ir.CreateConstInBoundsGEP1_64(ir.getInt64Ty(),
shapes_alloca, slot_id++);
ir.CreateStore(ir.getInt64(dim), alloca_slot);
}
};
for (int64_t i = 0; i < args.size(); ++i) {
ShapeUtil::ForEachSubshape(GetShape(args[i]), store_shape);
}
CHECK_EQ(slot_id, shapes_encoding_size);
return {shapes_alloca, shapes_encoding_size};
};
llvm::Value* IrEmitter::EmitCallToFfi(HloCustomCallInstruction* custom_call,
llvm::AllocaInst* results_alloca,
llvm::AllocaInst* operands_alloca) {
const auto& operands = absl::MakeSpan(custom_call->operands());
const auto& shape = custom_call->shape();
const auto& result_shapes =
shape.IsTuple() ? shape.tuple_shapes() : std::vector<Shape>({shape});
EncodedInfo operand_types_encoded =
StoreEncodedTypes("operands_types", operands, *b());
EncodedInfo operand_shapes_encoded =
StoreEncodedShapes("operands_shapes", operands, *b());
EncodedInfo result_types_encoded =
StoreEncodedTypes("results_types", result_shapes, *b());
EncodedInfo result_shapes_encoded =
StoreEncodedShapes("results_shapes", result_shapes, *b());
const absl::string_view target = custom_call->custom_call_target();
const absl::string_view opaque = custom_call->opaque();
const auto target_ref = llvm_ir::AsStringRef(target);
const auto opaque_ref = llvm_ir::AsStringRef(opaque);
std::vector<llvm::Value*> arguments = {
GetExecutableRunOptionsArgument(),
b()->CreateGlobalStringPtr(target_ref),
b()->getInt64(target.size()),
results_alloca,
operands_alloca,
b()->CreateGlobalStringPtr(opaque_ref),
b()->getInt64(opaque.size()),
GetStatusArgument(),
operand_types_encoded.alloca,
b()->getInt64(operand_types_encoded.size),
operand_shapes_encoded.alloca,
result_types_encoded.alloca,
b()->getInt64(result_types_encoded.size),
result_shapes_encoded.alloca,
};
return EmitCallToFunc(runtime::kHandleFfiCallSymbolName, arguments,
b()->getVoidTy(),
false,
true);
}
void IrEmitter::EmitTransferElements(llvm::Value* target, llvm::Value* source,
int64_t element_count,
PrimitiveType primitive_type,
const llvm_ir::IrArray& target_array,
const llvm_ir::IrArray& source_array) {
::xla::cpu::EmitTransferElements(target, source, element_count,
primitive_type, target_array, source_array,
module_, *b());
}
void EmitTransferElements(llvm::Value* target, llvm::Value* source,
int64_t element_count, PrimitiveType primitive_type,
const llvm_ir::IrArray& target_array,
const llvm_ir::IrArray& source_array,
llvm::Module* module, llvm::IRBuilder<>& b) {
unsigned primitive_type_size =
ShapeUtil::ByteSizeOfPrimitiveType(primitive_type);
llvm::Align element_alignment(tsl::MathUtil::GCD<unsigned>(
primitive_type_size,
::xla::cpu::MinimumAlignmentForPrimitiveType(primitive_type)));
llvm::Type* primitive_llvm_type =
llvm_ir::PrimitiveTypeToIrType(primitive_type, module);
if (element_count == 1) {
auto* load_instruction =
b.CreateAlignedLoad(primitive_llvm_type, source, element_alignment);
source_array.AnnotateLoadStoreInstructionWithMetadata(load_instruction);
auto* store_instruction =
b.CreateAlignedStore(load_instruction, target, element_alignment);
target_array.AnnotateLoadStoreInstructionWithMetadata(store_instruction);
} else {
auto* memcpy_instruction = b.CreateMemCpy(
target, llvm::Align(element_alignment), source,
llvm::Align(element_alignment),
element_count * primitive_type_size);
std::map<int, llvm::MDNode*> merged_metadata =
llvm_ir::MergeMetadata(&module->getContext(), source_array.metadata(),
target_array.metadata());
for (const auto& kind_md_pair : merged_metadata) {
memcpy_instruction->setMetadata(kind_md_pair.first, kind_md_pair.second);
}
}
}
absl::Status IrEmitter::CanDoFastConcatenate(
const HloInstruction* instr) const {
if (ShouldEmitParallelLoopFor(*instr)) {
return absl::Status(
absl::StatusCode::kFailedPrecondition,
"Cannot generate memcpy-based concat for the parallel CPU backend");
}
const auto* concatenate = Cast<HloConcatenateInstruction>(instr);
const Shape& output_shape = concatenate->shape();
for (auto* op : concatenate->operands()) {
if (!LayoutUtil::Equal(op->shape().layout(), output_shape.layout())) {
return absl::Status(absl::StatusCode::kFailedPrecondition,
"Operand has mismatching layouts");
}
}
return absl::OkStatus();
}
absl::Status IrEmitter::HandleConcatenate(HloInstruction* concatenate) {
absl::Status fast_impl_reason = CanDoFastConcatenate(concatenate);
if (fast_impl_reason.ok()) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(concatenate));
llvm_ir::IrArray target_array = GetIrArrayFor(concatenate);
std::vector<llvm_ir::IrArray> source_arrays;
source_arrays.reserve(concatenate->operands().size());
for (HloInstruction* operand : concatenate->operands()) {
source_arrays.emplace_back(GetIrArrayFor(operand));
}
TF_RETURN_IF_ERROR(::xla::cpu::EmitFastConcatenate(
concatenate, source_arrays, target_array, module_, *b()));
VLOG(1) << "Emitted fast concatenate for " << concatenate->ToString();
return absl::OkStatus();
}
VLOG(1) << "Could not emit fast concatenate for " << concatenate->ToString()
<< ": " << fast_impl_reason.message();
return DefaultAction(concatenate);
}
absl::Status IrEmitter::HandleConditional(HloInstruction* conditional) {
auto branch_index = conditional->operand(0);
int num_branches = conditional->branch_count();
TF_RET_CHECK(ShapeUtil::IsScalar(branch_index->shape()) &&
(branch_index->shape().element_type() == PRED ||
branch_index->shape().element_type() == S32))
<< "Branch index on a conditional must be scalar bool or int32_t; got: "
<< ShapeUtil::HumanString(branch_index->shape());
for (int b = 0; b < num_branches; ++b) {
HloComputation* br_computation = conditional->branch_computation(b);
TF_RET_CHECK(ShapeUtil::Equal(conditional->shape(),
br_computation->root_instruction()->shape()))
<< "Shape of conditional should be same as the shape of the " << b
<< "th branch computation; got: "
<< ShapeUtil::HumanString(conditional->shape()) << " and "
<< ShapeUtil::HumanString(br_computation->root_instruction()->shape());
}
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(conditional));
if (branch_index->shape().element_type() == PRED) {
llvm::LoadInst* pred_value = Load(
GetIrArrayFor(branch_index).GetBasePointeeType(),
GetIrArrayFor(branch_index).GetBasePointer(), "load_predicate_value");
llvm::Value* pred_cond =
ICmpNE(pred_value,
llvm::ConstantInt::get(
llvm_ir::PrimitiveTypeToIrType(PRED, module_), 0),
"boolean_predicate");
llvm_ir::LlvmIfData if_data =
llvm_ir::EmitIfThenElse(pred_cond, "conditional", b());
SetToFirstInsertPoint(if_data.true_block, b());
EmitGlobalCall(*conditional->branch_computation(0),
IrName(conditional, "_true"));
SetToFirstInsertPoint(if_data.false_block, b());
EmitGlobalCall(*conditional->branch_computation(1),
IrName(conditional, "_false"));
SetToFirstInsertPoint(if_data.after_block, b());
return absl::OkStatus();
}
llvm::LoadInst* branch_index_value = Load(
GetIrArrayFor(branch_index).GetBasePointeeType(),
GetIrArrayFor(branch_index).GetBasePointer(), "load_branch_index_value");
auto case_block = b()->GetInsertBlock();
llvm::BasicBlock* after_block;
if (case_block->getTerminator() == nullptr) {
after_block = llvm_ir::CreateBasicBlock(nullptr, "case-after", b());
b()->SetInsertPoint(case_block);
b()->CreateBr(after_block);
} else {
after_block =
case_block->splitBasicBlock(b()->GetInsertPoint(), "case-after");
}
case_block->getTerminator()->eraseFromParent();
auto default_block = llvm_ir::CreateBasicBlock(nullptr, "case-default", b());
b()->SetInsertPoint(default_block);
EmitGlobalCall(*conditional->branch_computation(num_branches - 1),
IrName(conditional, "_default"));
b()->CreateBr(after_block);
b()->SetInsertPoint(case_block);
llvm::SwitchInst* case_inst =
b()->CreateSwitch(branch_index_value, default_block, num_branches - 1);
for (int br = 0; br < num_branches - 1; ++br) {
auto branch_block = llvm_ir::CreateBasicBlock(
nullptr, absl::StrCat("case-branch", br), b());
b()->SetInsertPoint(branch_block);
EmitGlobalCall(*conditional->branch_computation(br),
IrName(conditional, absl::StrCat("_branch", br)));
b()->CreateBr(after_block);
case_inst->addCase(b()->getInt32(br), branch_block);
}
SetToFirstInsertPoint(after_block, b());
return absl::OkStatus();
}
absl::Status IrEmitter::HandleAfterAll(HloInstruction* after_all) {
TF_RET_CHECK(ByteSizeOf(after_all->shape()) == 0);
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(after_all));
return absl::OkStatus();
}
absl::Status IrEmitter::HandleBatchNormGrad(HloInstruction* batch_norm_grad) {
return Unimplemented("BatchNormGrad should be rewritten for CPU.");
}
absl::Status IrEmitter::HandleBatchNormTraining(
HloInstruction* batch_norm_training) {
return Unimplemented("BatchNormTraining should be rewritten for CPU.");
}
absl::Status IrEmitter::HandleGetDimensionSize(HloInstruction* get_size) {
return Unimplemented("GetDimensionSize should be rewritten for CPU.");
}
absl::Status IrEmitter::HandleSetDimensionSize(HloInstruction* set_size) {
return Unimplemented("SetDimensionSize should be rewritten for CPU.");
}
absl::Status IrEmitter::HandleAddDependency(HloInstruction* add_dependency) {
emitted_value_[add_dependency] =
GetEmittedValueFor(add_dependency->operand(0));
return absl::OkStatus();
}
absl::Status IrEmitter::HandleRng(HloInstruction* rng) {
return Unimplemented("Rng should be expanded for CPU.");
}
absl::Status IrEmitter::HandleRngBitGenerator(HloInstruction* rng) {
return Unimplemented("RngBitGenerator should be expanded for CPU.");
}
absl::Status IrEmitter::HandleRngGetAndUpdateState(HloInstruction* rng_state) {
VLOG(2) << "RngGetAndUpdateState: " << rng_state->ToString();
llvm::Value* old_state = llvm_ir::RngGetAndUpdateState(
Cast<HloRngGetAndUpdateStateInstruction>(rng_state)->delta(), module_,
b());
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(rng_state));
llvm::Value* address = GetEmittedValueFor(rng_state);
llvm::StoreInst* store = Store(old_state, address);
store->setAlignment(llvm::Align(IrEmitter::MinimumAlignmentForPrimitiveType(
rng_state->shape().element_type())));
return absl::OkStatus();
}
absl::Status IrEmitter::HandleStochasticConvert(HloInstruction* instruction) {
return Unimplemented("StochasticConvert should be decomposed for CPU.");
}
absl::Status IrEmitter::FinishVisit(HloInstruction* root) {
VLOG(2) << "FinishVisit root: " << root->ToString();
if (root->opcode() == HloOpcode::kOutfeed) {
VLOG(2) << " outfeed with value: "
<< llvm_ir::DumpToString(GetEmittedValueFor(root->operand(0)));
} else {
VLOG(2) << " value: " << llvm_ir::DumpToString(GetEmittedValueFor(root));
}
auto record_complete_computation = [&](llvm::Value* prof_counter) {
if (prof_counter) {
profiling_state_.RecordCompleteComputation(b(), prof_counter);
}
};
record_complete_computation(GetProfileCounterFor(*root->parent()));
return absl::OkStatus();
}
template <typename T>
llvm::Value* IrEmitter::GetProfileCounterCommon(
const T& hlo,
const absl::flat_hash_map<const T*, int64_t>& profile_index_map) {
auto it = profile_index_map.find(&hlo);
if (it == profile_index_map.end()) {
return nullptr;
}
int64_t prof_counter_idx = it->second;
std::string counter_name = IrName("prof_counter", hlo.name());
return GEP(b()->getInt64Ty(), GetProfileCountersArgument(),
b()->getInt64(prof_counter_idx), counter_name);
}
llvm::Value* IrEmitter::GetProfileCounterFor(
const HloInstruction& instruction) {
return GetProfileCounterCommon<HloInstruction>(instruction,
instruction_to_profile_idx_);
}
llvm::Value* IrEmitter::GetProfileCounterFor(
const HloComputation& computation) {
return GetProfileCounterCommon<HloComputation>(computation,
computation_to_profile_idx_);
}
void IrEmitter::ProfilingState::UpdateProfileCounter(llvm::IRBuilder<>* b,
llvm::Value* prof_counter,
llvm::Value* cycle_end,
llvm::Value* cycle_start) {
auto* cycle_diff = b->CreateSub(cycle_end, cycle_start);
llvm::LoadInst* old_cycle_count = b->CreateLoad(
llvm::cast<llvm::GetElementPtrInst>(prof_counter)->getSourceElementType(),
prof_counter, "old_cycle_count");
auto* new_cycle_count =
b->CreateAdd(cycle_diff, old_cycle_count, "new_cycle_count");
b->CreateStore(new_cycle_count, prof_counter);
}
llvm::Value* IrEmitter::ProfilingState::ReadCycleCounter(llvm::IRBuilder<>* b) {
llvm::Module* module = b->GetInsertBlock()->getModule();
if (!use_rdtscp_) {
llvm::Function* func_llvm_readcyclecounter =
llvm::Intrinsic::getDeclaration(module,
llvm::Intrinsic::readcyclecounter);
return b->CreateCall(func_llvm_readcyclecounter);
}
llvm::Function* func_llvm_x86_rdtscp =
llvm::Intrinsic::getDeclaration(module, llvm::Intrinsic::x86_rdtscp);
llvm::Value* rdtscp_call = b->CreateCall(func_llvm_x86_rdtscp);
return b->CreateExtractValue(rdtscp_call, {0});
}
void IrEmitter::ProfilingState::RecordCycleStart(llvm::IRBuilder<>* b,
HloInstruction* hlo) {
auto* cycle_start = ReadCycleCounter(b);
cycle_start->setName(IrName(hlo, "cycle_start"));
cycle_starts_[hlo] = cycle_start;
if (first_read_cycle_start_ == nullptr) {
first_read_cycle_start_ = cycle_start;
}
}
void IrEmitter::ProfilingState::RecordCycleDelta(llvm::IRBuilder<>* b,
HloInstruction* hlo,
llvm::Value* prof_counter) {
auto* cycle_end = ReadCycleCounter(b);
cycle_end->setName(IrName(hlo, "cycle_end"));
auto* cycle_start = cycle_starts_[hlo];
UpdateProfileCounter(b, prof_counter, cycle_end, cycle_start);
last_read_cycle_end_ = cycle_end;
}
void IrEmitter::ProfilingState::RecordCompleteComputation(
llvm::IRBuilder<>* b, llvm::Value* prof_counter) {
if (last_read_cycle_end_ && first_read_cycle_start_) {
UpdateProfileCounter(b, prof_counter, last_read_cycle_end_,
first_read_cycle_start_);
}
}
void IrEmitter::TracingState::EmitTracingStart(llvm::IRBuilder<>* b,
HloInstruction* hlo,
llvm::Value* run_options) {
if (!enabled_) {
return;
}
llvm::Type* void_ptr_type = b->getPtrTy();
llvm::FunctionType* fn_type = llvm::FunctionType::get(
b->getInt64Ty(),
{void_ptr_type, void_ptr_type, void_ptr_type, b->getInt64Ty()},
false);
llvm::Function* function = b->GetInsertBlock()->getParent();
llvm::Module* module = function->getParent();
const char* fn_name = runtime::kTracingStartSymbolName;
llvm::FunctionCallee trace_func =
module->getOrInsertFunction(fn_name, fn_type);
if (auto* fn = llvm::dyn_cast<llvm::Function>(trace_func.getCallee())) {
fn->setCallingConv(llvm::CallingConv::C);
fn->setDoesNotThrow();
fn->setOnlyAccessesArgMemory();
}
auto* hlo_name = b->CreateGlobalStringPtr(hlo->name());
auto* hlo_module = b->CreateGlobalStringPtr(hlo->GetModule()->name());
auto* program_id = b->getInt64(hlo->GetModule()->unique_id());
auto* activity_id = b->CreateCall(
trace_func, {run_options, hlo_name, hlo_module, program_id});
activity_id->setName(IrName(hlo, "activity_id"));
activity_ids_[hlo] = activity_id;
}
void IrEmitter::TracingState::EmitTracingEnd(llvm::IRBuilder<>* b,
HloInstruction* hlo,
llvm::Value* run_options) {
if (!enabled_) {
return;
}
llvm::FunctionType* fn_type =
llvm::FunctionType::get(b->getVoidTy(), {b->getPtrTy(), b->getInt64Ty()},
false);
llvm::Function* function = b->GetInsertBlock()->getParent();
llvm::Module* module = function->getParent();
const char* fn_name = runtime::kTracingEndSymbolName;
llvm::FunctionCallee trace_func =
module->getOrInsertFunction(fn_name, fn_type);
if (auto* fn = llvm::dyn_cast<llvm::Function>(trace_func.getCallee())) {
fn->setCallingConv(llvm::CallingConv::C);
fn->setDoesNotThrow();
fn->setOnlyAccessesArgMemory();
}
auto* activity_id = activity_ids_.at(hlo);
b->CreateCall(trace_func, {run_options, activity_id});
}
namespace {
bool IsHloVeryCheap(const HloInstruction* hlo) {
return hlo->opcode() == HloOpcode::kBitcast ||
hlo->opcode() == HloOpcode::kTuple ||
hlo->opcode() == HloOpcode::kGetTupleElement ||
hlo->opcode() == HloOpcode::kParameter ||
hlo->opcode() == HloOpcode::kConstant ||
hlo->opcode() == HloOpcode::kReplicaId;
}
}
absl::Status IrEmitter::Preprocess(HloInstruction* hlo) {
VLOG(3) << "Visiting: " << hlo->ToString();
if (instruction_to_profile_idx_.count(hlo) ||
(hlo_module_config_.cpu_traceme_enabled() && !IsHloVeryCheap(hlo) &&
hlo->parent()->IsEntryComputation())) {
tracing_state_.EmitTracingStart(b(), hlo,
GetExecutableRunOptionsArgument());
profiling_state_.RecordCycleStart(b(), hlo);
}
return absl::OkStatus();
}
absl::Status IrEmitter::Postprocess(HloInstruction* hlo) {
if (auto* prof_counter = GetProfileCounterFor(*hlo)) {
profiling_state_.RecordCycleDelta(b(), hlo, prof_counter);
}
if (instruction_to_profile_idx_.count(hlo) ||
(hlo_module_config_.cpu_traceme_enabled() && !IsHloVeryCheap(hlo) &&
hlo->parent()->IsEntryComputation())) {
tracing_state_.EmitTracingEnd(b(), hlo, GetExecutableRunOptionsArgument());
}
return absl::OkStatus();
}
llvm_ir::IrArray IrEmitter::GetIrArrayFor(const HloInstruction* hlo) {
llvm::Value* value_for_op = GetEmittedValueFor(hlo);
llvm::Type* ir_type = IrShapeType(hlo->shape());
llvm_ir::IrArray array(value_for_op, ir_type, hlo->shape());
AddAliasingInformationToIrArray(*hlo, &array);
return array;
}
std::vector<llvm_ir::IrArray> IrEmitter::GetIrArraysForOperandsOf(
const HloInstruction* hlo) {
std::vector<llvm_ir::IrArray> arrays;
std::transform(
hlo->operands().begin(), hlo->operands().end(),
std::back_inserter(arrays),
[&](const HloInstruction* operand) { return GetIrArrayFor(operand); });
return arrays;
}
llvm::Value* IrEmitter::GetEmittedValueFor(const HloInstruction* hlo) {
auto it = emitted_value_.find(hlo);
if (it == emitted_value_.end()) {
LOG(FATAL) << "could not find emitted value for: " << hlo->ToString();
}
return it->second;
}
llvm::Type* IrEmitter::IrShapeType(const Shape& shape) {
return llvm_ir::ShapeToIrType(shape, module_);
}
llvm::Value* IrEmitter::GetProfileCountersArgument() {
return compute_function()->profile_counters_arg();
}
llvm::Value* IrEmitter::GetStatusArgument() {
return compute_function()->status_arg();
}
llvm::Value* IrEmitter::GetBufferTableArgument() {
return compute_function()->buffer_table_arg();
}
llvm::Value* IrEmitter::GetExecutableRunOptionsArgument() {
return compute_function()->exec_run_options_arg();
}
llvm::BasicBlock* IrEmitter::GetReturnBlock() {
return compute_function()->return_block();
}
void IrEmitter::EmitEarlyReturnIfErrorStatus() {
llvm::Value* succeeded =
EmitCallToFunc(runtime::kStatusIsSuccessSymbolName, {GetStatusArgument()},
b()->getInt1Ty(), true,
true);
llvm_ir::EmitEarlyReturn(succeeded, b(), GetReturnBlock());
}
llvm::Value* IrEmitter::EmitThreadLocalBufferPointer(
const BufferAllocation::Slice& slice, const Shape& target_shape) {
const BufferAllocation& allocation = *slice.allocation();
llvm::Value* tempbuf_address = [&]() -> llvm::Value* {
auto param_it =
computation_parameter_allocations_.find(slice.allocation()->index());
if (param_it != computation_parameter_allocations_.end()) {
int64_t param_number = param_it->second;
llvm::Value* params = compute_function()->parameters_arg();
llvm::Value* param_address_offset = llvm_ir::EmitBufferIndexingGEP(
params, b()->getPtrTy(), param_number, b());
llvm::LoadInst* param_address_untyped =
Load(b()->getPtrTy(), param_address_offset);
if (!target_shape.IsOpaque()) {
AttachAlignmentMetadataForLoad(param_address_untyped, target_shape);
AttachDereferenceableMetadataForLoad(param_address_untyped,
target_shape);
}
return param_address_untyped;
}
const auto& assigned_buffers = allocation.assigned_buffers();
CHECK_EQ(1, assigned_buffers.size());
const Shape& shape = assigned_buffers.begin()->first->shape();
std::pair<llvm::Function*, BufferAllocation::Slice> key = {
compute_function()->function(), slice};
auto buf_it = thread_local_buffers_.find(key);
if (buf_it == thread_local_buffers_.end()) {
llvm::Value* buffer = llvm_ir::EmitAllocaAtFunctionEntry(
IrShapeType(shape), absl::StrCat("thread_local", slice.ToString()),
b(), MinimumAlignmentForShape(target_shape));
auto it_inserted_pair = thread_local_buffers_.insert({key, buffer});
CHECK(it_inserted_pair.second);
buf_it = it_inserted_pair.first;
}
return buf_it->second;
}();
return tempbuf_address;
}
llvm::Value* IrEmitter::EmitGlobalBufferPointer(
const BufferAllocation::Slice& slice, const Shape& target_shape) {
const BufferAllocation& allocation = *slice.allocation();
llvm::Value* tempbuf_address_ptr = llvm_ir::EmitBufferIndexingGEP(
GetBufferTableArgument(), b()->getPtrTy(), slice.index(), b());
llvm::LoadInst* tempbuf_address_base =
Load(b()->getPtrTy(), tempbuf_address_ptr);
AttachInvariantLoadMetadataForLoad(tempbuf_address_base);
AttachAlignmentMetadataForLoad(tempbuf_address_base, allocation.size());
AttachDereferenceableMetadataForLoad(tempbuf_address_base, allocation.size());
llvm::Value* tempbuf_address_untyped = tempbuf_address_base;
if (slice.offset() > 0) {
tempbuf_address_untyped = InBoundsGEP(
b()->getInt8Ty(), tempbuf_address_base, b()->getInt64(slice.offset()));
}
return tempbuf_address_untyped;
}
llvm::Value* IrEmitter::EmitBufferPointer(const BufferAllocation::Slice& slice,
const Shape& target_shape) {
if (slice.allocation()->is_thread_local()) {
return EmitThreadLocalBufferPointer(slice, target_shape);
} else if (slice.allocation()->is_constant()) {
return FindOrDie(constant_buffer_to_global_, slice.allocation()->index());
} else {
return EmitGlobalBufferPointer(slice, target_shape);
}
}
absl::Status IrEmitter::EmitTargetAddressForOp(const HloInstruction* op) {
const Shape& target_shape = op->shape();
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice slice,
assignment_.GetUniqueTopLevelSlice(op));
llvm::Value* addr = EmitBufferPointer(slice, target_shape);
addr->setName(IrName(op));
emitted_value_[op] = addr;
return absl::OkStatus();
}
absl::Status IrEmitter::EmitTargetElementLoop(
const HloInstruction* target_op, absl::string_view desc,
const llvm_ir::ElementGenerator& element_generator,
std::optional<llvm_ir::IrArray> result_array_opt) {
VLOG(2) << "EmitTargetElementLoop: " << target_op->ToString();
llvm_ir::IrArray target_array;
if (result_array_opt.has_value()) {
target_array = result_array_opt.value();
} else {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(target_op));
target_array = GetIrArrayFor(target_op);
}
const Shape& target_shape = target_op->shape();
if (target_shape.IsTuple() &&
(target_op->opcode() == HloOpcode::kFusion ||
target_op->opcode() == HloOpcode::kReduce ||
target_op->opcode() == HloOpcode::kReduceWindow)) {
TF_RET_CHECK(num_dynamic_loop_bounds_ == 0);
std::vector<llvm_ir::IrArray> output_arrays;
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(target_shape); ++i) {
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice slice,
assignment_.GetUniqueSlice(target_op, {i}));
const Shape& element_shape = ShapeUtil::GetSubshape(target_shape, {i});
llvm::Value* op_target_address = EmitBufferPointer(slice, element_shape);
llvm::Type* op_target_type = IrShapeType(element_shape);
output_arrays.push_back(
llvm_ir::IrArray(op_target_address, op_target_type, element_shape));
}
TF_RETURN_IF_ERROR(
llvm_ir::LoopEmitter(element_generator, output_arrays, b())
.EmitLoop(IrName(target_op, desc)));
std::vector<llvm::Value*> tuple_operand_ptrs;
tuple_operand_ptrs.reserve(output_arrays.size());
for (int64_t i = 0; i < output_arrays.size(); ++i) {
tuple_operand_ptrs.push_back(output_arrays[i].GetBasePointer());
}
llvm_ir::EmitTuple(target_array, tuple_operand_ptrs, b());
} else {
if (ShouldEmitParallelLoopFor(*target_op)) {
std::vector<std::pair<llvm::Value*, llvm::Value*>> dynamic_loop_bounds =
compute_function()->GetDynamicLoopBounds();
TF_RETURN_IF_ERROR(ParallelLoopEmitter(element_generator, target_array,
&dynamic_loop_bounds, b())
.EmitLoop(IrName(target_op, desc)));
} else {
TF_RETURN_IF_ERROR(
llvm_ir::LoopEmitter(element_generator, target_array, b())
.EmitLoop(IrName(target_op, desc)));
}
}
return absl::OkStatus();
}
absl::Status IrEmitter::EmitMemcpy(const HloInstruction& source,
const HloInstruction& destination) {
llvm::Value* source_value = GetEmittedValueFor(&source);
llvm::Value* destination_value = GetEmittedValueFor(&destination);
int64_t source_size = ByteSizeOf(source.shape());
MemCpy(destination_value, llvm::Align(1), source_value,
llvm::Align(1), source_size);
return absl::OkStatus();
}
absl::Status IrEmitter::ElementTypesSameAndSupported(
const HloInstruction& instruction,
absl::Span<const HloInstruction* const> operands,
absl::Span<const PrimitiveType> supported_types) {
for (auto operand : operands) {
TF_RET_CHECK(
ShapeUtil::SameElementType(operands[0]->shape(), operand->shape()));
}
TF_RET_CHECK(!operands.empty());
PrimitiveType primitive_type = operands[0]->shape().element_type();
if (!absl::c_linear_search(supported_types, primitive_type)) {
return Unimplemented("unsupported operand type %s in op %s",
PrimitiveType_Name(primitive_type),
HloOpcodeString(instruction.opcode()));
}
return absl::OkStatus();
}
absl::Status IrEmitter::DefaultAction(HloInstruction* hlo) {
ElementalIrEmitter::HloToElementGeneratorMap operand_to_generator;
for (const HloInstruction* operand : hlo->operands()) {
operand_to_generator[operand] = [=](const llvm_ir::IrArray::Index& index) {
return GetIrArrayFor(operand).EmitReadArrayElement(index, b());
};
}
CpuElementalIrEmitter elemental_emitter(hlo_module_config_, this, module_);
return EmitTargetElementLoop(
hlo, "elemental_loop",
elemental_emitter.MakeElementGenerator(hlo, operand_to_generator),
std::nullopt);
}
llvm::Value* IrEmitter::EmitScalarReturningThreadLocalCall(
const HloComputation& callee, absl::Span<llvm::Value* const> parameters,
absl::string_view name) {
std::vector<llvm::Value*> return_value =
EmitThreadLocalCall(callee, parameters, name, false);
CHECK_EQ(return_value.size(), 1);
return return_value[0];
}
std::vector<llvm::Value*> IrEmitter::EmitThreadLocalCall(
const HloComputation& callee, absl::Span<llvm::Value* const> parameters,
absl::string_view name, bool is_reducer, bool in_compute_function) {
CHECK(absl::c_binary_search(thread_local_computations_, &callee));
const Shape& return_shape = callee.root_instruction()->shape();
bool is_scalar_return = ShapeUtil::IsScalar(return_shape);
bool is_tuple_of_scalars_return =
return_shape.IsTuple() &&
absl::c_all_of(return_shape.tuple_shapes(), [&](const Shape& shape) {
return ShapeUtil::IsScalar(shape);
});
CHECK(is_scalar_return || is_tuple_of_scalars_return);
std::vector<llvm::Value*> parameter_addrs;
for (llvm::Value* parameter : parameters) {
CHECK(!parameter->getType()->isPointerTy());
llvm::Value* parameter_addr = llvm_ir::EmitAllocaAtFunctionEntry(
parameter->getType(), "arg_addr", b());
Store(parameter, parameter_addr);
parameter_addrs.push_back(parameter_addr);
}
llvm::Type* return_value_buffer_type =
llvm_ir::ShapeToIrType(return_shape, module_);
std::string retval_alloca_name = absl::StrCat(name, "_return_value_addr");
int retval_alignment =
is_scalar_return
? MinimumAlignmentForPrimitiveType(return_shape.element_type())
: 0;
llvm::AllocaInst* return_value_buffer = llvm_ir::EmitAllocaAtFunctionEntry(
return_value_buffer_type, retval_alloca_name, b(), retval_alignment);
std::vector<llvm::Value*> allocas_for_returned_scalars;
if (is_scalar_return) {
allocas_for_returned_scalars.push_back(return_value_buffer);
} else {
constexpr int max_tuple_size = 1000;
CHECK_LT(return_shape.tuple_shapes_size(), max_tuple_size)
<< "Multivalue function can not return more than 1000 elements to avoid"
<< " stack smashing";
allocas_for_returned_scalars =
llvm_ir::EmitTupleAllocasAtFunctionEntry(return_shape, b());
llvm_ir::IrArray tuple_array(return_value_buffer, return_value_buffer_type,
return_shape);
EmitTuple(tuple_array, allocas_for_returned_scalars, b());
}
llvm::Value* null_ptr = llvm::Constant::getNullValue(b()->getPtrTy());
Call(
FindOrDie(emitted_functions_,
ComputationToEmit{&callee, allow_reassociation_ || is_reducer}),
GetArrayFunctionCallArguments(
parameter_addrs, b(), name,
return_value_buffer,
in_compute_function ? GetExecutableRunOptionsArgument() : null_ptr,
null_ptr,
in_compute_function ? GetStatusArgument() : null_ptr,
in_compute_function ? GetProfileCountersArgument() : null_ptr));
if (ComputationTransitivelyContainsCustomCall(&callee)) {
DCHECK(!in_compute_function) << "Custom call inside nested computations "
"are not supported by Thunks runtime";
EmitEarlyReturnIfErrorStatus();
}
std::vector<llvm::Value*> returned_scalars;
returned_scalars.reserve(allocas_for_returned_scalars.size());
for (llvm::Value* addr : allocas_for_returned_scalars) {
returned_scalars.push_back(
Load(llvm::cast<llvm::AllocaInst>(addr)->getAllocatedType(), addr));
}
return returned_scalars;
}
void IrEmitter::EmitGlobalCall(const HloComputation& callee,
absl::string_view name) {
CHECK(absl::c_binary_search(global_computations_, &callee));
Call(FindOrDie(emitted_functions_,
ComputationToEmit{&callee, allow_reassociation_}),
GetArrayFunctionCallArguments(
{}, b(), name,
llvm::Constant::getNullValue(b()->getPtrTy()),
GetExecutableRunOptionsArgument(),
GetBufferTableArgument(),
GetStatusArgument(),
GetProfileCountersArgument()));
if (ComputationTransitivelyContainsCustomCall(&callee)) {
EmitEarlyReturnIfErrorStatus();
}
}
llvm::Value* IrEmitter::GetBufferForGlobalCallReturnValue(
const HloComputation& callee) {
const HloInstruction* root_inst = callee.root_instruction();
if (root_inst->opcode() == HloOpcode::kOutfeed) {
return llvm::Constant::getNullValue(b()->getPtrTy());
}
const BufferAllocation::Slice root_buffer =
assignment_.GetUniqueTopLevelSlice(root_inst).value();
return EmitBufferPointer(root_buffer, root_inst->shape());
}
void IrEmitter::BindFusionArguments(const HloInstruction* fusion,
FusedIrEmitter* fused_emitter) {
for (int i = 0; i < fusion->operand_count(); i++) {
const HloInstruction* operand = fusion->operand(i);
fused_emitter->BindGenerator(
*fusion->fused_parameter(i),
[this, operand](llvm_ir::IrArray::Index index) {
return GetIrArrayFor(operand).EmitReadArrayElement(index, b());
});
}
}
}
} | #include "xla/service/cpu/ir_emitter.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/Casting.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/ir_function.h"
#include "xla/service/cpu/target_machine_features_fake.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_ordering.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/logical_buffer.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
using IrEmitterTest = HloTestBase;
static std::pair<llvm::Function*, llvm::BasicBlock*> CreateFunction(
llvm::LLVMContext& context, llvm::Module* module, llvm::IRBuilder<>* b) {
llvm::PointerType* ptrtype = llvm::PointerType::getUnqual(context);
llvm::FunctionType* ftype = llvm::FunctionType::get(ptrtype, ptrtype, false);
llvm::Function* function = llvm::dyn_cast<llvm::Function>(
module->getOrInsertFunction("func2", ftype).getCallee());
llvm::BasicBlock* return_block =
llvm::BasicBlock::Create(context, "", function);
b->SetInsertPoint(return_block);
[[maybe_unused]] llvm::ReturnInst* ret = b->CreateRet(
llvm::ConstantPointerNull::get(llvm::PointerType::getUnqual(context)));
return std::make_pair(function, return_block);
}
TEST_F(IrEmitterTest, ComputeFuncStack) {
llvm::LLVMContext context;
auto module = std::make_unique<llvm::Module>("test", context);
const char* hlo_text = R"(
HloModule m
ENTRY main {
ROOT %zero = f32[] constant(0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo, ParseAndReturnUnverifiedModule(hlo_text));
const HloInstruction* zero = FindInstruction(hlo.get(), "zero");
ASSERT_NE(zero, nullptr);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<BufferAssignment> buffer_assignment,
BufferAssigner::Run(
hlo.get(), std::make_unique<DependencyHloOrdering>(hlo.get()),
backend().compiler()->BufferSizeBytesFunction(),
[](LogicalBuffer::Color) { return 1; }));
TargetMachineFeaturesWithFakeAlignmentLogic target_machine(
[](int64_t size) { return 1; });
IrEmitter ir_emitter(nullptr, *hlo, *buffer_assignment, module.get(), {}, {},
{}, &target_machine, false);
llvm::IRBuilder<>* b = ir_emitter.b();
ASSERT_NE(b, nullptr);
const std::pair<llvm::Function*, llvm::BasicBlock*> fb =
CreateFunction(context, module.get(), b);
llvm::Function* function = fb.first;
llvm::BasicBlock* return_block = fb.second;
ASSERT_NE(function, nullptr);
ASSERT_NE(return_block, nullptr);
const auto funcname = "func1";
const auto linkagetype = llvm::GlobalValue::LinkageTypes::ExternalLinkage;
const HloModuleConfig module_config;
ir_emitter.PushComputeFunction(funcname, linkagetype, module_config,
module.get(), 0);
ASSERT_EQ(ir_emitter.compute_function()->function()->getName().str(),
funcname);
ir_emitter.PushComputeFunction(b, module.get(), 0, function, nullptr,
return_block);
ASSERT_EQ(ir_emitter.compute_function()->function(), function);
ir_emitter.PopComputeFunction();
ASSERT_EQ(ir_emitter.compute_function()->function()->getName().str(),
funcname);
ir_emitter.PopComputeFunction();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/ir_emitter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/ir_emitter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
94482255-ec7d-455f-8827-0a005b4924b8 | cpp | tensorflow/tensorflow | execution_stream_assignment | third_party/xla/xla/service/gpu/execution_stream_assignment.cc | third_party/xla/xla/service/gpu/execution_stream_assignment_test.cc | #include "xla/service/gpu/execution_stream_assignment.h"
#include <deque>
#include <memory>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/call_graph.h"
#include "xla/service/gpu/runtime/thunk.h"
namespace xla::gpu {
ExecutionStreamAssignment::ExecutionStreamAssignment(
const HloModule* module, ExecutionStreamAssignmentOptions options) {
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
ExecutionStreamId next_stream_id = ExecutionStreamId(1);
struct Pending {
Pending(HloComputation* node, ExecutionStreamId stream_id)
: node(node), stream_id(stream_id) {}
HloComputation* node;
ExecutionStreamId stream_id;
};
std::deque<Pending> queue;
queue.emplace_back(module->entry_computation(), ExecutionStreamId(0));
auto enqueue_called_computations = [&](const CallSite& callsite,
ExecutionStreamId stream) {
if (GetInstructionCallContext(callsite.instruction()->opcode()) ==
CallContext::kEmbedded) {
return;
}
for (HloComputation* computation : callsite.called_computations()) {
queue.emplace_back(computation, stream);
}
};
auto assign_async_execution_streams =
[&](HloInstruction* instruction, ExecutionStreamId source_stream_id) {
AsyncExecutionStreamIds streams;
streams.source_stream_id = source_stream_id;
streams.destination_stream_id = next_stream_id;
CHECK(async_instructions_.try_emplace(instruction, streams).second);
next_stream_id++;
if (next_stream_id.value() > options.number_of_execution_streams) {
next_stream_id = ExecutionStreamId(1);
}
};
while (!queue.empty()) {
Pending pending = queue.front();
queue.pop_front();
for (HloInstruction* instruction : pending.node->instructions()) {
if (instruction->IsAsynchronous()) continue;
if (instruction->opcode() == HloOpcode::kCopyStart) {
assign_async_execution_streams(instruction, pending.stream_id);
} else {
CHECK(sync_instructions_.try_emplace(instruction, pending.stream_id)
.second);
}
}
for (const CallSite& callsite :
call_graph->GetNode(pending.node).callsites()) {
if (callsite.instruction()->IsAsynchronous()) {
CHECK_EQ(callsite.instruction()->opcode(), HloOpcode::kAsyncStart);
enqueue_called_computations(callsite, next_stream_id);
assign_async_execution_streams(callsite.instruction(),
pending.stream_id);
} else {
enqueue_called_computations(callsite, pending.stream_id);
}
}
for (HloInstruction* instruction : pending.node->instructions()) {
if (!instruction->IsAsynchronous()) continue;
if (instruction->opcode() == HloOpcode::kAsyncStart) {
CHECK(async_instructions_.find(instruction) !=
async_instructions_.end());
} else {
HloInstruction* async_start =
Cast<HloAsyncInstruction>(instruction)->async_chain_start();
AsyncExecutionStreamIds async_start_streams =
async_instructions_.at(async_start);
CHECK(async_instructions_.try_emplace(instruction, async_start_streams)
.second);
}
}
}
}
namespace {
absl::Status StreamNotFoundError(const HloInstruction* instruction) {
return absl::NotFoundError(absl::StrCat(
"No ExecutionStreamId found for ", instruction->ToString(),
"; this may happen if the Computation is not reachable from the module's "
"entrypoint, or if it's only reachable through a embedded calls."));
}
}
absl::StatusOr<ExecutionStreamId>
ExecutionStreamAssignment::GetSyncExecutionStreamId(
const HloInstruction* instruction) const {
CHECK(!instruction->IsAsynchronous());
auto stream = sync_instructions_.find(instruction);
if (stream == sync_instructions_.end()) {
return StreamNotFoundError(instruction);
}
return stream->second;
}
absl::StatusOr<ExecutionStreamAssignment::AsyncExecutionStreamIds>
ExecutionStreamAssignment::GetAsyncExecutionStreamIds(
const HloInstruction* instruction) const {
CHECK(instruction->IsAsynchronous() ||
instruction->opcode() == HloOpcode::kCopyStart);
auto streams = async_instructions_.find(instruction);
if (streams == async_instructions_.end()) {
return StreamNotFoundError(instruction);
}
return streams->second;
}
} | #include "xla/service/gpu/execution_stream_assignment.h"
#include <memory>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
using AsyncExecutionStreamIds =
::xla::gpu::ExecutionStreamAssignment::AsyncExecutionStreamIds;
namespace xla::gpu {
namespace {
class ExecutionStreamAssignmentTest : public HloTestBase {
protected:
void ExpectExecutionStreamForSyncInstructions(
const ExecutionStreamAssignment& assignment, HloComputation* computation,
ExecutionStreamId stream) const {
for (const HloInstruction* instruction : computation->instructions()) {
if (instruction->IsAsynchronous()) continue;
EXPECT_THAT(assignment.GetSyncExecutionStreamId(instruction),
IsOkAndHolds(stream));
}
}
};
TEST_F(ExecutionStreamAssignmentTest, AsyncFusion) {
const char* kModuleStr = R"(
HloModule m
leaf1 {
p0 = f32[2,2] parameter(0)
ROOT add = f32[2,2] add(p0, p0)
}
leaf2 {
p0 = f32[2,2] parameter(0)
ROOT add = f32[2,2] add(p0, p0)
}
leaf3 {
p0 = f32[2,2] parameter(0)
ROOT add = f32[2,2] add(p0, p0)
}
ENTRY entry {
p0 = f32[2,2] parameter(0)
start1 = ((f32[2,2]), f32[2,2], s32[]) fusion-start(p0),
kind=kLoop, calls=leaf1
start2 = ((f32[2,2]), f32[2,2], s32[]) fusion-start(p0),
kind=kLoop, calls=leaf2
start3 = ((f32[2,2]), f32[2,2], s32[]) fusion-start(p0),
kind=kLoop, calls=leaf3
update1 = ((f32[2,2]), f32[2,2], s32[]) fusion-update(start1)
update2 = ((f32[2,2]), f32[2,2], s32[]) fusion-update(start2)
update3 = ((f32[2,2]), f32[2,2], s32[]) fusion-update(start3)
done1 = f32[2,2] fusion-done(update1)
done2 = f32[2,2] fusion-done(update2)
done3 = f32[2,2] fusion-done(update3)
ROOT done = f32[2,2] custom-call(done1, done2, done3),
custom_call_target="target"
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr));
ExecutionStreamAssignment assignment(
module.get(),
ExecutionStreamAssignmentOptions{2});
ExpectExecutionStreamForSyncInstructions(
assignment, FindComputation(module.get(), "entry"), ExecutionStreamId(0));
for (std::string_view instruction : {"start1", "update1", "done1"}) {
EXPECT_THAT(assignment.GetAsyncExecutionStreamIds(Cast<HloAsyncInstruction>(
FindInstruction(module.get(), instruction))),
IsOkAndHolds(AsyncExecutionStreamIds{
ExecutionStreamId(0),
ExecutionStreamId(1)}));
}
for (std::string_view instruction : {"start2", "update2", "done2"}) {
EXPECT_THAT(assignment.GetAsyncExecutionStreamIds(Cast<HloAsyncInstruction>(
FindInstruction(module.get(), instruction))),
IsOkAndHolds(AsyncExecutionStreamIds{
ExecutionStreamId(0),
ExecutionStreamId(2)}));
}
for (std::string_view instruction : {"start3", "update3", "done3"}) {
EXPECT_THAT(assignment.GetAsyncExecutionStreamIds(Cast<HloAsyncInstruction>(
FindInstruction(module.get(), instruction))),
IsOkAndHolds(AsyncExecutionStreamIds{
ExecutionStreamId(0),
ExecutionStreamId(1)}));
}
ExpectExecutionStreamForSyncInstructions(
assignment,
Cast<HloAsyncInstruction>(FindInstruction(module.get(), "start1"))
->async_wrapped_computation(),
ExecutionStreamId(1));
ExpectExecutionStreamForSyncInstructions(
assignment,
Cast<HloAsyncInstruction>(FindInstruction(module.get(), "start2"))
->async_wrapped_computation(),
ExecutionStreamId(2));
}
TEST_F(ExecutionStreamAssignmentTest, CopyStartStreamIdTest) {
const char* const hlo_copy_start_string = R"(
HloModule Module
ENTRY CopyStartAndCopyDone {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
copy-start = (f32[2,3]{1,0:S(2)}, f32[2,3]{1,0:S(1)}, u32[]) copy-start(p0)
ROOT copy-done = f32[2,3]{1,0:S(2)} copy-done(copy-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_copy_start_string));
ExecutionStreamAssignment assignment(module.get());
for (std::string_view instruction : {"copy-start"}) {
EXPECT_THAT(
assignment.GetAsyncExecutionStreamIds(Cast<HloCopyStartInstruction>(
FindInstruction(module.get(), instruction))),
IsOkAndHolds(AsyncExecutionStreamIds{
ExecutionStreamId(0),
ExecutionStreamId(1)}));
}
}
TEST_F(ExecutionStreamAssignmentTest, FusionComputations) {
const char* kModuleStr = R"(
HloModule m
reduce {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
p0 = f32[4] parameter(0)
c0 = f32[] constant(0)
ROOT reduce = f32[] reduce(p0, c0), dimensions={0}, to_apply=reduce
}
ENTRY entry {
p0 = f32[4] parameter(0)
ROOT done = f32[] fusion(p0), kind=kLoop, calls=fusion
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr));
ExecutionStreamAssignment assignment(module.get());
ExpectExecutionStreamForSyncInstructions(
assignment, FindComputation(module.get(), "entry"), ExecutionStreamId(0));
for (std::string_view computation : {"reduce", "fusion"}) {
for (const HloInstruction* instruction :
FindComputation(module.get(), computation)->instructions()) {
EXPECT_THAT(assignment.GetSyncExecutionStreamId(instruction),
StatusIs(absl::StatusCode::kNotFound));
}
}
}
TEST_F(ExecutionStreamAssignmentTest, UnreachableComputation) {
const char* kModuleStr = R"(
HloModule m
unreachable {
p0 = f32[2,2] parameter(0)
ROOT add = f32[2,2] add(p0, p0)
}
ENTRY entry {
p0 = f32[2,2] parameter(0)
ROOT add = f32[2,2] add(p0, p0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr));
ExecutionStreamAssignment assignment(module.get());
ExpectExecutionStreamForSyncInstructions(
assignment, FindComputation(module.get(), "entry"), ExecutionStreamId(0));
for (const HloInstruction* instruction :
FindComputation(module.get(), "unreachable")->instructions()) {
EXPECT_THAT(assignment.GetSyncExecutionStreamId(instruction),
StatusIs(absl::StatusCode::kNotFound));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/execution_stream_assignment.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/execution_stream_assignment_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2e67453e-51a6-4245-90a4-4e34db3316cd | cpp | tensorflow/tensorflow | hlo_algorithm_denylist | third_party/xla/xla/service/gpu/hlo_algorithm_denylist.cc | third_party/xla/xla/service/gpu/hlo_algorithm_denylist_test.cc | #include "xla/service/gpu/hlo_algorithm_denylist.h"
#include <optional>
#include <string>
#include <tuple>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/backend_config.h"
#include "xla/service/gpu/autotuning/gpu_autotuning.pb.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/stream_executor/dnn.h"
#include "tsl/platform/env.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status.h"
namespace xla {
namespace gpu {
constexpr char kDefaultDenylist[] = R"pb(
entries {
hlo: "(f32[512,512,7,7]{3,2,1,0}, u8[0]{0}) custom-call(f32[512,512,7,7]{3,2,1,0}, f32[512,512,3,3]{3,2,1,0}, f32[512]{0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\""
backend_config {
operation_queue_id: 0
wait_on_operation_queues: []
cudnn_conv_backend_config: {
activation_mode: kNone
conv_result_scale: 1
side_input_scale: 0
leakyrelu_alpha: 0
},
force_earliest_schedule: false
}
cc { major: 7 }
cudnn_version { major: 9 }
algos { id: 14 }
}
entries {
hlo: "(f32[512,512,7,7]{3,2,1,0}, u8[0]{0}) custom-call(f32[512,512,7,7]{3,2,1,0}, f32[512,512,3,3]{3,2,1,0}, f32[512]{0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\""
backend_config {
operation_queue_id: 0
wait_on_operation_queues: []
cudnn_conv_backend_config: {
activation_mode: kNone
conv_result_scale: 1
side_input_scale: 0
leakyrelu_alpha: 0
},
force_earliest_schedule: false
}
cc { major: 7 }
cudnn_version { major: 9 minor: 1 patch: 1 }
algos { id: 14 }
}
entries {
hlo: "(f32[27,256,32,32]{3,2,1,0}, u8[0]{0}) custom-call(f32[27,256,32,32]{3,2,1,0}, f32[256,256,3,3]{3,2,1,0}, f32[256]{0}, f32[27,256,32,32]{3,2,1,0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\""
backend_config {
operation_queue_id: 0
wait_on_operation_queues: []
cudnn_conv_backend_config: {
activation_mode: kNone
conv_result_scale: 1
side_input_scale: 1,
leakyrelu_alpha: 0
},
force_earliest_schedule: false
}
cc { major: 7 }
cudnn_version { major: 9 }
algos { id: 14 }
}
entries {
hlo: "(f32[27,256,32,32]{3,2,1,0}, u8[0]{0}) custom-call(f32[27,256,32,32]{3,2,1,0}, f32[256,256,3,3]{3,2,1,0}, f32[256]{0}, f32[27,256,32,32]{3,2,1,0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\""
backend_config {
operation_queue_id: 0
wait_on_operation_queues: []
cudnn_conv_backend_config: {
activation_mode: kNone
conv_result_scale: 1
side_input_scale: 1
leakyrelu_alpha: 0
},
force_earliest_schedule: false
}
cc { major: 7 minor: 5 }
cudnn_version { major: 9 }
algos { id: 14 }
}
entries {
hlo: "(f32[27,256,32,32]{3,2,1,0}, u8[0]{0}) custom-call(f32[27,256,32,32]{3,2,1,0}, f32[256,256,3,3]{3,2,1,0}, f32[256]{0}, f32[27,256,32,32]{3,2,1,0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\""
backend_config {
operation_queue_id: 0
wait_on_operation_queues: []
cudnn_conv_backend_config: {
activation_mode: kNone
conv_result_scale: 1
side_input_scale: 1
leakyrelu_alpha: 0
},
force_earliest_schedule: false
}
cc { major: 7 }
cudnn_version { major: 9 minor: 1 patch: 1 }
algos { id: 14 }
}
entries {
hlo: "(f32[27,256,32,32]{3,2,1,0}, u8[0]{0}) custom-call(f32[27,256,32,32]{3,2,1,0}, f32[256,256,3,3]{3,2,1,0}, f32[256]{0}, f32[27,256,32,32]{3,2,1,0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\""
backend_config {
operation_queue_id: 0
wait_on_operation_queues: []
cudnn_conv_backend_config: {
activation_mode: kNone
conv_result_scale: 1
side_input_scale: 1
leakyrelu_alpha: 0
},
force_earliest_schedule: false
}
cc { major: 7 minor: 5 }
cudnn_version { major: 9 minor: 1 patch: 1 }
algos { id: 14 }
}
)pb";
std::vector<stream_executor::dnn::AlgorithmDesc> GetDisabledConvAlgorithms(
ComputeCapability cc, CudnnVersion cudnn_version,
const std::string& blas_version, const std::string& hlo) {
using MapType = absl::flat_hash_map<
std::tuple<std::string, int, int, int, int, int, std::string>,
std::vector<stream_executor::dnn::AlgorithmDesc>>;
static MapType* denylist = [] {
auto* list = new MapType();
AlgorithmDenylist proto;
auto process_denylist = [list](const AlgorithmDenylist& proto) {
for (const auto& entry : proto.entries()) {
for (const auto& algo : entry.algos()) {
(*list)[std::make_tuple(HloStringWithGpuBackendConfig(
entry.hlo(), entry.backend_config()),
entry.cc().major(), entry.cc().minor(),
entry.cudnn_version().major(),
entry.cudnn_version().minor(),
entry.cudnn_version().patch(),
entry.blas_version())]
.emplace_back(algo.id(), algo.tensor_ops(), std::nullopt);
}
}
};
std::string file_path =
GetDebugOptionsFromFlags().xla_gpu_algorithm_denylist_path();
if (!file_path.empty()) {
TF_CHECK_OK(tsl::ReadTextProto(tsl::Env::Default(), file_path, &proto));
process_denylist(proto);
}
CHECK(tsl::protobuf::TextFormat::ParseFromString(
std::string(kDefaultDenylist), &proto));
process_denylist(proto);
return list;
}();
std::vector<stream_executor::dnn::AlgorithmDesc> algorithms;
auto add_matching_disabled_algorithms_to_result = [&](const auto& key) {
auto iter = denylist->find(key);
if (iter != denylist->end()) {
algorithms.insert(algorithms.end(), iter->second.begin(),
iter->second.end());
}
};
auto key = std::make_tuple(hlo, cc.major(), cc.minor(), cudnn_version.major(),
cudnn_version.minor(), cudnn_version.patch(),
blas_version);
add_matching_disabled_algorithms_to_result(key);
std::get<6>(key) = std::string{};
add_matching_disabled_algorithms_to_result(key);
return algorithms;
}
std::string HloStringWithGpuBackendConfig(const std::string& hlo,
GpuBackendConfig config) {
BackendConfigWrapper backend_config(config);
return absl::StrCat(hlo, ", backend_config=", backend_config.GetRawString());
}
}
} | #include "xla/service/gpu/hlo_algorithm_denylist.h"
#include <cstdlib>
#include <string>
#include "absl/strings/str_cat.h"
#include "xla/stream_executor/dnn.h"
#include "xla/tests/test_utils.h"
#include "tsl/platform/env.h"
#include "tsl/platform/path.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
class DenylistTest : public testing::Test {
protected:
DenylistTest() {
std::string existing_xla_flags;
const char* env = std::getenv("XLA_FLAGS");
if (env != nullptr) {
existing_xla_flags = absl::StrCat(env, " ");
}
tsl::setenv(
"XLA_FLAGS",
absl::StrCat(
existing_xla_flags, "--xla_gpu_algorithm_denylist_path=",
tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "service", "gpu",
"data", "hlo_algorithm_denylist.pbtxt"))
.data(),
1);
config_ =
ParseTextProto<GpuBackendConfig>(
"operation_queue_id: 0 wait_on_operation_queues: [] "
"cudnn_conv_backend_config: { activation_mode: kNone "
"conv_result_scale: 1 side_input_scale: 0 leakyrelu_alpha: 0} "
"force_earliest_schedule: false")
.value();
}
GpuBackendConfig config_;
};
TEST_F(DenylistTest, DefaultTest) {
ComputeCapability cc;
cc.set_major(7);
cc.set_minor(0);
CudnnVersion cudnn_version;
cudnn_version.set_major(7);
cudnn_version.set_minor(6);
cudnn_version.set_patch(2);
auto list = GetDisabledConvAlgorithms(
cc, cudnn_version, "9000",
HloStringWithGpuBackendConfig(
R"((f16[256,112,112,64]{3,2,1,0}, u8[0]{0}) custom-call(f16[256,224,224,4]{3,2,1,0}, f16[7,7,4,64]{2,1,0,3}), window={size=7x7 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward")",
config_));
EXPECT_THAT(list, testing::UnorderedElementsAre(
stream_executor::dnn::AlgorithmDesc{0, true},
stream_executor::dnn::AlgorithmDesc{0, false},
stream_executor::dnn::AlgorithmDesc{1, true},
stream_executor::dnn::AlgorithmDesc{1, false},
stream_executor::dnn::AlgorithmDesc{42, true},
stream_executor::dnn::AlgorithmDesc{42, false}));
}
TEST_F(DenylistTest, NegativeTest) {
ComputeCapability cc;
cc.set_major(7);
cc.set_minor(0);
CudnnVersion cudnn_version;
cudnn_version.set_major(7);
cudnn_version.set_minor(6);
cudnn_version.set_minor(2);
auto list =
GetDisabledConvAlgorithms(cc, cudnn_version, "9000", R"(invalid hlo)");
EXPECT_THAT(list, testing::IsEmpty());
}
TEST_F(DenylistTest, NoBlasVersionSet) {
ComputeCapability cc;
cc.set_major(7);
cc.set_minor(0);
CudnnVersion cudnn_version;
cudnn_version.set_major(7);
cudnn_version.set_minor(6);
cudnn_version.set_patch(2);
auto list = GetDisabledConvAlgorithms(
cc, cudnn_version, "120301",
HloStringWithGpuBackendConfig(
R"((f16[256,112,112,64]{3,2,1,0}, u8[0]{0}) custom-call(f16[256,224,224,4]{3,2,1,0}, f16[7,7,4,64]{2,1,0,3}), window={size=7x7 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward")",
config_));
EXPECT_THAT(list, testing::UnorderedElementsAre(
stream_executor::dnn::AlgorithmDesc{42, true},
stream_executor::dnn::AlgorithmDesc{42, false}));
}
TEST_F(DenylistTest, EntryFromHardcodedList) {
ComputeCapability cc;
cc.set_major(7);
cc.set_minor(0);
CudnnVersion cudnn_version;
cudnn_version.set_major(9);
cudnn_version.set_minor(0);
cudnn_version.set_patch(0);
auto list = GetDisabledConvAlgorithms(
cc, cudnn_version, "9000",
HloStringWithGpuBackendConfig(
R"((f32[512,512,7,7]{3,2,1,0}, u8[0]{0}) custom-call(f32[512,512,7,7]{3,2,1,0}, f32[512,512,3,3]{3,2,1,0}, f32[512]{0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target="__cudnn$convBiasActivationForward")",
config_));
EXPECT_THAT(list, testing::ElementsAre(
stream_executor::dnn::AlgorithmDesc{14, false}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/hlo_algorithm_denylist.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/hlo_algorithm_denylist_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0147930e-0d97-4677-8292-daa6da3d7261 | cpp | tensorflow/tensorflow | matmul_utils | third_party/xla/xla/service/gpu/matmul_utils.cc | third_party/xla/xla/service/gpu/matmul_utils_test.cc | #include "xla/service/gpu/matmul_utils.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/algorithm_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/blas.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/gpu/gpu_blas_lt.h"
#include "xla/stream_executor/numeric_options.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
absl::StatusOr<std::vector<int64_t>> GetNonContractingDims(
const Shape& shape, absl::Span<const int64_t> batch_dims,
absl::Span<const int64_t> contracting_dims) {
std::vector<int64_t> non_contracting_dims;
for (int64_t dim = 0; dim < shape.rank(); ++dim) {
bool is_batch = absl::c_count(batch_dims, dim) != 0;
bool is_contracting = absl::c_count(contracting_dims, dim) != 0;
TF_RET_CHECK(!(is_batch && is_contracting));
if (!(is_batch || is_contracting)) non_contracting_dims.push_back(dim);
}
TF_RET_CHECK(batch_dims.size() + contracting_dims.size() +
non_contracting_dims.size() ==
shape.rank());
return non_contracting_dims;
}
const tsl::protobuf::RepeatedField<int64_t>& BatchDimensionsForOperand(
const HloInstruction& dot, const int operand_number) {
const DotDimensionNumbers& dimension_numbers = dot.dot_dimension_numbers();
if (operand_number == 0) {
return dimension_numbers.lhs_batch_dimensions();
}
return dimension_numbers.rhs_batch_dimensions();
}
absl::StatusOr<int64_t> ContractingDimensionIndex(const HloInstruction& dot,
const int operand_number) {
const DotDimensionNumbers& dimension_numbers = dot.dot_dimension_numbers();
if (operand_number == 0) {
TF_RET_CHECK(dimension_numbers.lhs_contracting_dimensions().size() == 1);
return dimension_numbers.lhs_contracting_dimensions(0);
}
TF_RET_CHECK(dimension_numbers.rhs_contracting_dimensions().size() == 1);
return dimension_numbers.rhs_contracting_dimensions(0);
}
absl::StatusOr<int64_t> NonContractingDimensionIndex(const HloInstruction& dot,
const int operand_number) {
TF_ASSIGN_OR_RETURN(int64_t contracting_dim,
ContractingDimensionIndex(dot, operand_number));
TF_ASSIGN_OR_RETURN(
std::vector<int64_t> non_contracting_dims,
GetNonContractingDims(dot.operand(operand_number)->shape(),
BatchDimensionsForOperand(dot, operand_number),
{contracting_dim}));
TF_RET_CHECK(non_contracting_dims.size() == 1);
return non_contracting_dims.front();
}
absl::StatusOr<Shape> GetBatchRowColumnShape(
const Shape& shape, absl::Span<const int64_t> batch_dims,
absl::Span<const int64_t> row_dims, absl::Span<const int64_t> col_dims) {
TF_RET_CHECK(shape.has_layout());
std::vector<int64_t> minor_to_major;
for (size_t i = 0; i < shape.rank();) {
auto check_physically_sequential =
[&](absl::Span<const int64_t> dims) -> absl::Status {
for (auto it = dims.rbegin(); it != dims.rend(); ++it) {
if (*it != shape.layout().minor_to_major()[i++])
return InvalidArgument("dims not physically_sequential");
}
return absl::OkStatus();
};
int64_t dim = shape.layout().minor_to_major()[i];
if (!row_dims.empty() && dim == row_dims.back()) {
minor_to_major.push_back(1);
TF_RETURN_IF_ERROR(check_physically_sequential(row_dims));
} else if (!col_dims.empty() && dim == col_dims.back()) {
minor_to_major.push_back(2);
TF_RETURN_IF_ERROR(check_physically_sequential(col_dims));
} else if (!batch_dims.empty() && (dim == batch_dims.back())) {
minor_to_major.push_back(0);
TF_RETURN_IF_ERROR(check_physically_sequential(batch_dims));
} else {
return InvalidArgument("dims not physically sequential");
}
}
if (col_dims.empty()) minor_to_major.push_back(2);
if (row_dims.empty()) minor_to_major.push_back(1);
if (batch_dims.empty()) minor_to_major.push_back(0);
auto dim_size = [&](absl::Span<const int64_t> dims) {
return absl::c_accumulate(dims, 1, [&](int64_t size, int64_t dim) {
return size * shape.dimensions(dim);
});
};
return ShapeUtil::MakeShapeWithDenseLayout(
shape.element_type(),
{dim_size(batch_dims), dim_size(row_dims), dim_size(col_dims)},
minor_to_major);
}
absl::StatusOr<MatrixLayout> MatrixLayout::For(const Shape& shape) {
TF_RET_CHECK(shape.rank() == 3);
TF_RET_CHECK(shape.has_layout());
int64_t batch_size = shape.dimensions(0);
int64_t num_rows = shape.dimensions(1);
int64_t num_cols = shape.dimensions(2);
Order order{Order::kRowMajor};
int64_t leading_dim_stride = num_cols;
int64_t batch_stride = num_rows * num_cols;
absl::Span<const int64_t> minor_to_major = shape.layout().minor_to_major();
switch (64 * minor_to_major[2] + 8 * minor_to_major[1] + minor_to_major[0]) {
case 012:
break;
case 021:
order = Order::kColumnMajor;
leading_dim_stride = num_rows;
break;
case 0102:
leading_dim_stride = batch_size * num_cols;
batch_stride = num_cols;
break;
case 0201:
order = Order::kColumnMajor;
leading_dim_stride = batch_size * num_rows;
batch_stride = num_rows;
break;
default:
return Unimplemented("batch in most minor dimension");
}
if (batch_size == 1) {
batch_stride = 0;
}
return MatrixLayout{se::gpu::MatrixLayout{shape.element_type(), num_rows,
num_cols, order, batch_size,
leading_dim_stride, batch_stride}};
}
absl::StatusOr<MatrixLayout> MatrixLayout::For(
const Shape& shape, absl::Span<const int64_t> batch_dims,
absl::Span<const int64_t> row_dims, absl::Span<const int64_t> col_dims) {
TF_ASSIGN_OR_RETURN(
Shape batch_row_col_shape,
GetBatchRowColumnShape(shape, batch_dims, row_dims, col_dims));
return MatrixLayout::For(batch_row_col_shape);
}
absl::StatusOr<MatrixLayout> MatrixLayout::For(
const Shape& shape, size_t lhs_num_batch_dims, size_t lhs_num_row_dims,
size_t rhs_num_batch_dims, size_t rhs_num_col_dims) {
size_t num_batch_dims = std::max(lhs_num_batch_dims, rhs_num_batch_dims);
TF_RET_CHECK(shape.rank() ==
num_batch_dims + lhs_num_row_dims + rhs_num_col_dims);
std::vector<int64_t> dims(shape.rank());
absl::c_iota(dims, 0);
auto batch_dims = absl::Span<const int64_t>(dims).first(num_batch_dims);
auto row_dims =
absl::Span<const int64_t>(dims).subspan(num_batch_dims, lhs_num_row_dims);
auto col_dims = absl::Span<const int64_t>(dims).last(rhs_num_col_dims);
return MatrixLayout::For(shape, batch_dims, row_dims, col_dims);
}
namespace {
std::vector<int64_t> NormalizedRelativeOrder(absl::Span<const int64_t> dims) {
std::vector<int64_t> indices(dims.size());
absl::c_iota(indices, 0);
absl::c_sort(indices,
[&](int64_t a, int64_t b) { return dims[a] < dims[b]; });
return indices;
}
}
absl::StatusOr<bool> CanFoldTransposeOperandIntoDot(const HloInstruction& dot,
int64_t operand_idx) {
if (Cast<HloDotInstruction>(&dot)->sparse_operands()) {
return false;
}
TF_RET_CHECK(dot.opcode() == HloOpcode::kDot);
TF_RET_CHECK(dot.operand_count() > operand_idx);
const HloInstruction& transpose = *dot.operand(operand_idx);
TF_RET_CHECK(transpose.opcode() == HloOpcode::kTranspose);
const DotDimensionNumbers& dot_dims = dot.dot_dimension_numbers();
auto transposed = [&](const auto& dims) {
std::vector<int64_t> transposed_dims;
transposed_dims.reserve(dims.size());
for (int64_t dim : dims) {
transposed_dims.push_back(transpose.dimensions(dim));
}
return transposed_dims;
};
auto batch_dims = (operand_idx == 0) ? dot_dims.lhs_batch_dimensions()
: dot_dims.rhs_batch_dimensions();
auto contracting_dims = (operand_idx == 0)
? dot_dims.lhs_contracting_dimensions()
: dot_dims.rhs_contracting_dimensions();
TF_ASSIGN_OR_RETURN(
std::vector<int64_t> non_contracting_dims,
GetNonContractingDims(transpose.shape(), batch_dims, contracting_dims));
auto transposed_non_contracting_dims = transposed(non_contracting_dims);
if (NormalizedRelativeOrder(non_contracting_dims) !=
NormalizedRelativeOrder(transposed_non_contracting_dims)) {
return false;
}
return MatrixLayout::For(transpose.operand(0)->shape(),
transposed(batch_dims), transposed(contracting_dims),
transposed_non_contracting_dims)
.ok();
}
absl::StatusOr<GemmConfig> GemmConfig::For(
const Shape& lhs_shape, absl::Span<const int64_t> lhs_batch_dims,
absl::Span<const int64_t> lhs_contracting_dims, const Shape& rhs_shape,
absl::Span<const int64_t> rhs_batch_dims,
absl::Span<const int64_t> rhs_contracting_dims, const Shape& output_shape,
double alpha_real, double alpha_imag, double beta,
PrecisionConfig::Algorithm precision_algorithm,
std::optional<int64_t> algorithm, int64_t compute_precision, bool grad_x,
bool grad_y) {
return GemmConfig::For(lhs_shape, lhs_batch_dims, lhs_contracting_dims,
rhs_shape, rhs_batch_dims, rhs_contracting_dims,
output_shape, nullptr,
output_shape, alpha_real, alpha_imag, beta,
precision_algorithm, algorithm, compute_precision,
grad_x, grad_y);
}
absl::StatusOr<GemmConfig> GemmConfig::For(
const Shape& lhs_shape, absl::Span<const int64_t> lhs_batch_dims,
absl::Span<const int64_t> lhs_contracting_dims, const Shape& rhs_shape,
absl::Span<const int64_t> rhs_batch_dims,
absl::Span<const int64_t> rhs_contracting_dims, const Shape& c_shape,
const Shape* bias_shape_ptr, const Shape& output_shape, double alpha_real,
double alpha_imag, double beta,
PrecisionConfig::Algorithm precision_algorithm,
std::optional<int64_t> algorithm, int64_t compute_precision, bool grad_x,
bool grad_y) {
absl::Span<const int64_t> lhs_col_dims = lhs_contracting_dims;
TF_ASSIGN_OR_RETURN(
std::vector<int64_t> lhs_row_dims,
GetNonContractingDims(lhs_shape, lhs_batch_dims, lhs_col_dims));
TF_ASSIGN_OR_RETURN(
MatrixLayout lhs_layout,
MatrixLayout::For(lhs_shape, lhs_batch_dims, lhs_row_dims, lhs_col_dims));
absl::Span<const int64_t> rhs_row_dims = rhs_contracting_dims;
TF_ASSIGN_OR_RETURN(
std::vector<int64_t> rhs_col_dims,
GetNonContractingDims(rhs_shape, rhs_batch_dims, rhs_row_dims));
TF_ASSIGN_OR_RETURN(
MatrixLayout rhs_layout,
MatrixLayout::For(rhs_shape, rhs_batch_dims, rhs_row_dims, rhs_col_dims));
int64_t num_batch_dims =
std::max(lhs_batch_dims.size(), rhs_batch_dims.size());
TF_RET_CHECK(output_shape.rank() ==
num_batch_dims + lhs_row_dims.size() + rhs_col_dims.size());
std::vector<int64_t> output_dims(output_shape.rank());
absl::c_iota(output_dims, 0);
auto output_batch_dims =
absl::Span<const int64_t>(output_dims).first(num_batch_dims);
auto output_row_dims = absl::Span<const int64_t>(output_dims)
.subspan(num_batch_dims, lhs_row_dims.size());
auto output_col_dims =
absl::Span<const int64_t>(output_dims).last(rhs_col_dims.size());
TF_ASSIGN_OR_RETURN(MatrixLayout output_layout,
MatrixLayout::For(output_shape, output_batch_dims,
output_row_dims, output_col_dims));
Shape c_matrix_shape = c_shape;
if (primitive_util::IsF8Type(lhs_shape.element_type()) &&
primitive_util::IsF8Type(output_shape.element_type()) && beta == 0.0) {
#if GOOGLE_CUDA
c_matrix_shape.set_element_type(
bias_shape_ptr != nullptr ? bias_shape_ptr->element_type() : BF16);
#endif
}
TF_ASSIGN_OR_RETURN(MatrixLayout c_layout,
MatrixLayout::For(c_matrix_shape, output_batch_dims,
output_row_dims, output_col_dims));
if (lhs_shape.element_type() != F8E4M3FN &&
lhs_shape.element_type() != F8E5M2) {
TF_RET_CHECK(lhs_layout.num_cols == rhs_layout.num_rows);
TF_RET_CHECK(output_layout.num_rows == lhs_layout.num_rows);
TF_RET_CHECK(output_layout.num_cols == rhs_layout.num_cols);
}
TF_RET_CHECK(c_layout.num_rows == output_layout.num_rows);
TF_RET_CHECK(c_layout.num_cols == output_layout.num_cols);
TF_RET_CHECK((lhs_layout.batch_size == output_layout.batch_size) ||
(lhs_layout.batch_size == 1));
TF_RET_CHECK((rhs_layout.batch_size == output_layout.batch_size) ||
(rhs_layout.batch_size == 1));
switch (output_shape.element_type()) {
case F8E4M3FN:
case F8E5M2:
case F8E4M3FNUZ:
case F8E5M2FNUZ:
case F16:
case BF16:
case F32:
case F64:
TF_RET_CHECK(alpha_imag == 0);
break;
case C64:
case C128:
break;
case S32:
TF_RET_CHECK(alpha_imag == 0);
if (lhs_layout.dtype != PrimitiveType::S8 ||
rhs_layout.dtype != PrimitiveType::S8) {
return Internal(
"For int32 gemm output only int8 input is supported, got input: "
"%s, %s",
primitive_util::LowercasePrimitiveTypeName(lhs_layout.dtype),
primitive_util::LowercasePrimitiveTypeName(rhs_layout.dtype));
}
break;
default:
return Internal("Unexpected GEMM datatype: %s",
primitive_util::LowercasePrimitiveTypeName(
output_shape.element_type()));
}
return GemmConfig{lhs_layout,
rhs_layout,
c_layout,
output_layout,
{alpha_real, alpha_imag},
beta,
compute_precision,
precision_algorithm,
algorithm,
grad_x,
grad_y};
}
namespace {
bool IsTf32Allowed(PrecisionConfig::Algorithm algorithm,
int64_t compute_precision) {
if (algorithm == PrecisionConfig::ALG_UNSET) {
return compute_precision <= 1;
}
return algorithm_util::HasTf32InputType(algorithm);
}
}
absl::StatusOr<GemmConfig> GemmConfig::For(
const HloInstruction* gemm) {
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
gemm->backend_config<GpuBackendConfig>());
return For(gemm, gpu_config.gemm_backend_config());
}
absl::StatusOr<GemmConfig> GemmConfig::For(
const HloInstruction* gemm, const GemmBackendConfig& config) {
std::optional<int64_t> algorithm;
if (config.algorithm_case() != GemmBackendConfig::ALGORITHM_NOT_SET) {
algorithm = config.selected_algorithm();
} else {
algorithm = se::blas::kDefaultAlgorithm;
}
const Shape& lhs_shape = gemm->operand(0)->shape();
const Shape& rhs_shape = gemm->operand(1)->shape();
const DotDimensionNumbers& dot_dims = config.dot_dimension_numbers();
const Shape& output_shape =
gemm->shape().IsTuple() ? gemm->shape().tuple_shapes(0) : gemm->shape();
bool has_matrix_bias = config.beta() != 0.;
Shape c_shape = has_matrix_bias ? gemm->operand(2)->shape() : output_shape;
std::optional<Shape> vector_bias_shape;
TF_ASSIGN_OR_RETURN(
bool has_vector_bias,
xla::gpu::gpublas_lt::EpilogueAddsVectorBias(config.epilogue()));
if (has_vector_bias) {
int vector_bias_index = has_matrix_bias ? 3 : 2;
if (primitive_util::IsF8Type(lhs_shape.element_type())) {
vector_bias_index += 2;
}
vector_bias_shape = gemm->operand(vector_bias_index)->shape();
}
auto attributes = gemm->frontend_attributes().map();
bool grad_x = (attributes["grad_x"] == "true");
bool grad_y = (attributes["grad_y"] == "true");
int64_t precision = se::blas::kDefaultComputePrecision;
for (auto operand_precision : config.precision_config().operand_precision()) {
precision = std::max(precision, static_cast<int64_t>(operand_precision));
}
const PrecisionConfig::Algorithm precision_algorithm =
config.precision_config().algorithm();
return GemmConfig::For(
lhs_shape, dot_dims.lhs_batch_dimensions(),
dot_dims.lhs_contracting_dimensions(), rhs_shape,
dot_dims.rhs_batch_dimensions(), dot_dims.rhs_contracting_dimensions(),
c_shape,
vector_bias_shape ? &vector_bias_shape.value() : nullptr, output_shape,
config.alpha_real(), config.alpha_imag(), config.beta(),
precision_algorithm, algorithm, precision, grad_x, grad_y);
}
absl::StatusOr<GemmConfig::DescriptorsTuple> GemmConfig::GetMatrixDescriptors(
se::DeviceMemoryBase lhs_buf, se::DeviceMemoryBase rhs_buf,
se::DeviceMemoryBase out_buf) const {
auto create_matrix_desc = [](const se::gpu::MatrixLayout& layout,
se::DeviceMemoryBase data)
-> absl::StatusOr<se::gpu::MatrixDescriptor> {
TF_ASSIGN_OR_RETURN(se::blas::DataType type,
se::gpu::AsBlasDataType(layout.dtype));
return se::gpu::MatrixDescriptor{
data, layout.leading_dim_stride, layout.batch_stride, type,
(layout.order == se::gpu::MatrixLayout::Order::kColumnMajor
? se::blas::Transpose::kNoTranspose
: se::blas::Transpose::kTranspose)};
};
se::gpu::MatrixLayout lhs = lhs_layout, rhs = rhs_layout, out = output_layout;
bool must_swap_operands = MakeOutputColumnMajor(lhs, rhs, out);
if (must_swap_operands) {
std::swap(lhs_buf, rhs_buf);
}
TF_ASSIGN_OR_RETURN(se::gpu::OutputMatrixDescriptor out_desc,
create_matrix_desc(out, out_buf));
out_desc.batch_size = out.batch_size;
out_desc.m = out.num_rows;
out_desc.n = out.num_cols;
out_desc.k = lhs.num_cols;
TF_ASSIGN_OR_RETURN(out_desc.compute_type,
se::gpu::GetBlasComputationType(
PrecisionConfig::ALG_UNSET, lhs.dtype, out.dtype,
se::blas::kDefaultComputePrecision));
TF_ASSIGN_OR_RETURN(se::gpu::MatrixDescriptor lhs_desc,
create_matrix_desc(lhs, lhs_buf));
TF_ASSIGN_OR_RETURN(se::gpu::MatrixDescriptor rhs_desc,
create_matrix_desc(rhs, rhs_buf));
return DescriptorsTuple{lhs_desc, rhs_desc, out_desc, must_swap_operands};
}
namespace {
template <typename Scale, typename Input, typename Output>
absl::Status DoGemmWithAlgorithm(const se::gpu::MatrixDescriptor& lhs,
const se::gpu::MatrixDescriptor& rhs,
const se::gpu::OutputMatrixDescriptor& output,
se::DeviceMemoryBase workspace, Scale alpha,
Scale beta, se::Stream* stream,
PrecisionConfig::Algorithm precision_algorithm,
se::blas::AlgorithmType algorithm,
se::blas::ComputePrecision compute_precision,
const se::NumericOptions& numeric_options,
se::blas::ProfileResult* profile_result,
se::blas::CallContext context) {
CHECK(output.transpose == se::blas::Transpose::kNoTranspose);
PrimitiveType lhs_type = primitive_util::NativeToPrimitiveType<Input>();
PrimitiveType output_type = primitive_util::NativeToPrimitiveType<Output>();
TF_ASSIGN_OR_RETURN(
se::blas::ComputationType computation_type,
se::gpu::GetBlasComputationType(precision_algorithm, lhs_type,
output_type, compute_precision));
se::DeviceMemory<Output> output_data(output.data);
auto* blas = stream->parent()->AsBlas();
if (blas == nullptr) {
return absl::InternalError("No Blas support for stream");
}
se::blas::BlasSupport::ScopedWorkspace scoped_workspace(blas, &workspace);
if (output.batch_size != 1) {
return blas->BlasGemmStridedBatchedWithAlgorithm(
stream, lhs.transpose, rhs.transpose, output.m, output.n, output.k,
alpha, lhs.cast<Input>(), lhs.leading_dim_stride, lhs.batch_stride,
rhs.cast<Input>(), rhs.leading_dim_stride, rhs.batch_stride, beta,
&output_data, output.leading_dim_stride, output.batch_stride,
output.batch_size, computation_type, algorithm, numeric_options,
profile_result, context);
} else {
return blas->BlasGemmWithAlgorithm(
stream, lhs.transpose, rhs.transpose, output.m, output.n, output.k,
alpha, lhs.cast<Input>(), lhs.leading_dim_stride, rhs.cast<Input>(),
rhs.leading_dim_stride, beta, &output_data, output.leading_dim_stride,
computation_type, algorithm, numeric_options, profile_result, context);
}
}
template <typename Scale, typename Input, typename Output>
absl::Status DoGemm(const se::gpu::MatrixDescriptor& lhs,
const se::gpu::MatrixDescriptor& rhs,
const se::gpu::OutputMatrixDescriptor& output,
se::DeviceMemoryBase workspace, Scale alpha, Scale beta,
se::Stream* stream,
PrecisionConfig::Algorithm precision_algorithm,
std::optional<se::blas::AlgorithmType> algorithm,
se::blas::ComputePrecision compute_precision,
const se::NumericOptions& numeric_options,
se::blas::ProfileResult* profile_result,
se::blas::CallContext context) {
CHECK(output.transpose == se::blas::Transpose::kNoTranspose);
se::DeviceMemory<Output> output_data(output.data);
auto* blas = stream->parent()->AsBlas();
if (blas == nullptr) {
return absl::InternalError("No Blas support for stream");
}
if (algorithm) {
return DoGemmWithAlgorithm<Scale, Input, Output>(
lhs, rhs, output, workspace, alpha, beta, stream, precision_algorithm,
*algorithm, compute_precision, numeric_options, profile_result,
context);
}
se::blas::BlasSupport::ScopedWorkspace scoped_workspace(blas, &workspace);
if (output.batch_size != 1) {
return blas->BlasGemmStridedBatched(
stream, lhs.transpose, rhs.transpose, output.m, output.n, output.k,
alpha, lhs.cast<Input>(), lhs.leading_dim_stride, lhs.batch_stride,
rhs.cast<Input>(), rhs.leading_dim_stride, rhs.batch_stride, beta,
&output_data, output.leading_dim_stride, output.batch_stride,
output.batch_size, numeric_options, context);
}
return blas->BlasGemm(stream, lhs.transpose, rhs.transpose, output.m,
output.n, output.k, alpha, lhs.cast<Input>(),
lhs.leading_dim_stride, rhs.cast<Input>(),
rhs.leading_dim_stride, beta, &output_data,
output.leading_dim_stride, numeric_options, context);
}
}
absl::Status RunGemm(const GemmConfig& config, se::DeviceMemoryBase lhs_buffer,
se::DeviceMemoryBase rhs_buffer,
se::DeviceMemoryBase output_buffer,
se::DeviceMemoryBase workspace_buffer,
bool deterministic_ops, se::Stream* stream,
std::optional<se::blas::AlgorithmType> algorithm,
se::blas::ProfileResult* profile_result) {
VLOG(2) << "Executing a GemmThunk";
TF_ASSIGN_OR_RETURN(
GemmConfig::DescriptorsTuple desc,
config.GetMatrixDescriptors(lhs_buffer, rhs_buffer, output_buffer));
se::NumericOptions numeric_options{
deterministic_ops,
IsTf32Allowed(config.precision_algorithm,
config.compute_precision)};
if (!algorithm) algorithm = config.algorithm;
se::blas::CallContext context = se::blas::CallContext::kNone;
if (config.grad_x) {
context = desc.operands_swapped ? se::blas::CallContext::kBackpropInput2
: se::blas::CallContext::kBackpropInput1;
}
if (config.grad_y) {
context = desc.operands_swapped ? se::blas::CallContext::kBackpropInput1
: se::blas::CallContext::kBackpropInput2;
}
std::tuple operand_types{config.lhs_layout.dtype, config.rhs_layout.dtype,
config.output_layout.dtype};
if (config.alpha.real() == 0.0 && config.alpha.imag() == 0.0 &&
config.beta == 0.0) {
return stream->MemZero(&output_buffer, output_buffer.size());
}
#define TYPED_GEMM(SCALENTYPE, ATYPE, BTYPE, CTYPE) \
if (operand_types == std::make_tuple(ATYPE, BTYPE, CTYPE)) { \
using NativeScaleType = \
primitive_util::PrimitiveTypeToNative<SCALENTYPE>::type; \
using NativeAType = primitive_util::PrimitiveTypeToNative<ATYPE>::type; \
using NativeCType = primitive_util::PrimitiveTypeToNative<CTYPE>::type; \
return DoGemm<NativeScaleType, NativeAType, NativeCType>( \
desc.lhs, desc.rhs, desc.output, workspace_buffer, \
static_cast<NativeScaleType>(config.alpha.real()), \
static_cast<NativeScaleType>(config.beta), stream, \
config.precision_algorithm, algorithm, config.compute_precision, \
numeric_options, profile_result, context); \
}
#define TYPED_GEMM_COMPLEX(SCALENTYPE, ATYPE, BTYPE, CTYPE) \
if (operand_types == std::make_tuple(ATYPE, BTYPE, CTYPE)) { \
using NativeScaleType = \
primitive_util::PrimitiveTypeToNative<SCALENTYPE>::type; \
using NativeAType = primitive_util::PrimitiveTypeToNative<ATYPE>::type; \
using NativeCType = primitive_util::PrimitiveTypeToNative<CTYPE>::type; \
return DoGemm<NativeScaleType, NativeAType, NativeCType>( \
desc.lhs, desc.rhs, desc.output, workspace_buffer, \
static_cast<NativeScaleType>(config.alpha), \
static_cast<NativeScaleType>(config.beta), stream, \
config.precision_algorithm, algorithm, config.compute_precision, \
numeric_options, profile_result, context); \
}
if (config.output_layout.dtype == S32) {
if (!algorithm) algorithm = se::blas::kDefaultGemmAlgo;
return DoGemmWithAlgorithm<int32_t, int8_t, int32_t>(
desc.lhs, desc.rhs, desc.output, workspace_buffer,
static_cast<int32_t>(config.alpha.real()),
static_cast<int32_t>(config.beta), stream, PrecisionConfig::ALG_UNSET,
*algorithm, se::blas::kDefaultComputePrecision, numeric_options,
profile_result, context);
}
TYPED_GEMM(F32, BF16, BF16, BF16)
TYPED_GEMM(F32, F16, F16, F16)
TYPED_GEMM(F32, S8, S8, F32)
TYPED_GEMM(F32, BF16, BF16, F32)
TYPED_GEMM(F32, F16, F16, F32)
TYPED_GEMM(F32, F32, F32, F32)
TYPED_GEMM(F64, F64, F64, F64)
TYPED_GEMM_COMPLEX(C64, C64, C64, C64)
TYPED_GEMM_COMPLEX(C128, C128, C128, C128)
#undef TYPED_GEMM
#undef TYPED_GEMM_COMPLEX
return Internal(
"Unexpected GEMM dtype: %s %s %s",
primitive_util::LowercasePrimitiveTypeName(config.lhs_layout.dtype),
primitive_util::LowercasePrimitiveTypeName(config.rhs_layout.dtype),
primitive_util::LowercasePrimitiveTypeName(config.output_layout.dtype));
}
namespace gpublas_lt {
absl::StatusOr<bool> EpilogueAddsVectorBias(
GemmBackendConfig_Epilogue epilogue) {
switch (epilogue) {
case GemmBackendConfig::DEFAULT:
case GemmBackendConfig::RELU:
case GemmBackendConfig::GELU:
case GemmBackendConfig::GELU_AUX:
return false;
case GemmBackendConfig::BIAS:
case GemmBackendConfig::BIAS_RELU:
case GemmBackendConfig::BIAS_GELU:
case GemmBackendConfig::BIAS_GELU_AUX:
return true;
default:
return Internal("Unknown Epilogue.");
}
}
absl::StatusOr<bool> EpilogueHasAuxiliaryOutput(
GemmBackendConfig_Epilogue epilogue) {
switch (epilogue) {
case GemmBackendConfig::DEFAULT:
case GemmBackendConfig::RELU:
case GemmBackendConfig::GELU:
case GemmBackendConfig::BIAS:
case GemmBackendConfig::BIAS_RELU:
case GemmBackendConfig::BIAS_GELU:
return false;
case GemmBackendConfig::GELU_AUX:
case GemmBackendConfig::BIAS_GELU_AUX:
return true;
default:
return Internal("Unknown Epilogue.");
}
}
absl::StatusOr<se::gpu::BlasLt::Epilogue> AsBlasLtEpilogue(
GemmBackendConfig_Epilogue epilogue) {
switch (epilogue) {
case GemmBackendConfig::DEFAULT:
return se::gpu::BlasLt::Epilogue::kDefault;
case GemmBackendConfig::RELU:
return se::gpu::BlasLt::Epilogue::kReLU;
case GemmBackendConfig::GELU:
return se::gpu::BlasLt::Epilogue::kGELU;
case GemmBackendConfig::GELU_AUX:
return se::gpu::BlasLt::Epilogue::kGELUWithAux;
case GemmBackendConfig::BIAS:
return se::gpu::BlasLt::Epilogue::kBias;
case GemmBackendConfig::BIAS_RELU:
return se::gpu::BlasLt::Epilogue::kBiasThenReLU;
case GemmBackendConfig::BIAS_GELU:
return se::gpu::BlasLt::Epilogue::kBiasThenGELU;
case GemmBackendConfig::BIAS_GELU_AUX:
return se::gpu::BlasLt::Epilogue::kBiasThenGELUWithAux;
default:
return Internal("unexpected epilogue value");
}
}
}
absl::StatusOr<TritonGemmConfig> TritonGemmConfig::FromProto(
const AutotuneResult::TritonGemmKey& proto) {
TF_RET_CHECK(proto.block_m() > 0);
TF_RET_CHECK(proto.block_n() > 0);
TF_RET_CHECK(proto.block_k() > 0);
TF_RET_CHECK(proto.split_k() > 0);
TF_RET_CHECK(proto.num_stages() > 0);
TF_RET_CHECK(proto.num_warps() > 0);
TF_RET_CHECK(proto.num_ctas() > 0);
return TritonGemmConfig(proto.block_m(), proto.block_n(), proto.block_k(),
proto.split_k(), proto.num_stages(),
proto.num_warps(), proto.num_ctas());
}
AutotuneResult::TritonGemmKey TritonGemmConfig::ToProto() const {
AutotuneResult::TritonGemmKey key;
key.set_block_m(block_m);
key.set_block_n(block_n);
key.set_block_k(block_k);
key.set_split_k(split_k);
key.set_num_stages(num_stages);
key.set_num_warps(num_warps);
key.set_num_ctas(num_ctas);
return key;
}
std::string TritonGemmConfig::ToString() const {
return absl::StrCat("{block_m:", block_m, ",block_n:", block_n,
",block_k:", block_k, ",split_k:", split_k,
",num_stages:", num_stages, ",num_warps:", num_warps,
",num_ctas:", num_ctas, "}");
}
absl::StatusOr<bool> IsMatrixMultiplicationTooSmallForRewriting(
const HloInstruction& dot, int64_t threshold) {
CHECK_EQ(dot.opcode(), HloOpcode::kDot);
const Shape& lhs_shape = dot.operand(0)->shape();
const Shape& rhs_shape = dot.operand(1)->shape();
const DotDimensionNumbers& dot_dims = dot.dot_dimension_numbers();
int64_t contracting_size = 1;
for (int64_t dim : dot_dims.lhs_contracting_dimensions()) {
contracting_size *= lhs_shape.dimensions(dim);
}
TF_ASSIGN_OR_RETURN(
std::vector<int64_t> lhs_non_contracting_dims,
GetNonContractingDims(lhs_shape, dot_dims.lhs_batch_dimensions(),
dot_dims.lhs_contracting_dimensions()));
int64_t lhs_non_contracting_size = 1;
for (int64_t dim : lhs_non_contracting_dims) {
lhs_non_contracting_size *= lhs_shape.dimensions(dim);
}
TF_ASSIGN_OR_RETURN(
std::vector<int64_t> rhs_non_contracting_dims,
GetNonContractingDims(rhs_shape, dot_dims.rhs_batch_dimensions(),
dot_dims.rhs_contracting_dimensions()));
int64_t rhs_non_contracting_size = 1;
for (int64_t dim : rhs_non_contracting_dims) {
rhs_non_contracting_size *= rhs_shape.dimensions(dim);
}
return (rhs_non_contracting_size + lhs_non_contracting_size) *
contracting_size <
threshold;
}
bool IsDotSupportedByClassicalEmitters(const HloInstruction& dot) {
if (!algorithm_util::IsSupportedByElementalIrEmitter(
dot.precision_config().algorithm())) {
return false;
}
switch (dot.shape().element_type()) {
case F16:
case F32:
case BF16:
return true;
default:
return false;
}
}
}
} | #include "xla/service/gpu/matmul_utils.h"
#include <cstdint>
#include <memory>
#include <vector>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::tsl::testing::IsOkAndHolds;
TEST(GetNonContractingDimsTest, Valid) {
Shape shape = ParseShape("f32[1,2,3,4,5,6]").value();
EXPECT_THAT(GetNonContractingDims(shape, {4},
{1, 5}),
IsOkAndHolds(ElementsAre(0, 2, 3)));
}
using CanFoldTransposeOperandIntoDotTest = HloTestBase;
TEST_F(CanFoldTransposeOperandIntoDotTest, ArgTransposeFoldGemm) {
const char* hlo_text = R"(
HloModule ArgTransposeFoldGemm
ENTRY AddDotsFunc {
x = f32[3,2] parameter(0)
y = f32[3,4] parameter(1)
x_transposed = f32[2,3] transpose(x), dimensions={1, 0}
ROOT dot_a = f32[2,4] dot(x_transposed, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
auto dot = module->entry_computation()->root_instruction();
EXPECT_THAT(CanFoldTransposeOperandIntoDot(*dot, 0), IsOkAndHolds(true));
}
TEST_F(CanFoldTransposeOperandIntoDotTest, BatchedArgRowColTransposeFoldGemm) {
const char* hlo_text = R"(
HloModule BatchedArgRowColTransposeFoldGemm
ENTRY AddDotsFunc {
x = f32[5,3,2] parameter(0)
y = f32[5,3,4] parameter(1)
x_transposed = f32[5,2,3] transpose(x), dimensions={0, 2, 1}
ROOT dot_a = f32[5,2,4] dot(x_transposed, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
auto dot = module->entry_computation()->root_instruction();
EXPECT_THAT(CanFoldTransposeOperandIntoDot(*dot, 0), IsOkAndHolds(true));
}
TEST_F(CanFoldTransposeOperandIntoDotTest, BatchRowTransposeFoldGemm) {
const char* hlo_text = R"(
HloModule BatchRowTransposeFoldCheck
ENTRY AddDotsFunc {
x = f32[2,5,3] parameter(0)
y = f32[5,3,4] parameter(1)
x_transposed = f32[5,2,3] transpose(x), dimensions={1, 0, 2}
ROOT dot_a = f32[5,2,4] dot(x_transposed, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
auto dot = module->entry_computation()->root_instruction();
EXPECT_THAT(CanFoldTransposeOperandIntoDot(*dot, 0), IsOkAndHolds(true));
}
TEST_F(CanFoldTransposeOperandIntoDotTest,
BatchFromMinorDimTransposeDoesntFold) {
const char* hlo_text = R"(
HloModule BatchFromMinorDimTransposeDoesntFold
ENTRY AddDotsFunc {
x = f32[3,2,5] parameter(0)
y = f32[5,3,4] parameter(1)
x_transposed = f32[5,2,3] transpose(x), dimensions={2, 1, 0}
ROOT dot_a = f32[5,2,4] dot(x_transposed, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
auto dot = module->entry_computation()->root_instruction();
EXPECT_THAT(CanFoldTransposeOperandIntoDot(*dot, 0), IsOkAndHolds(false));
}
TEST_F(CanFoldTransposeOperandIntoDotTest,
TransposedNonContractingDimsDontFold) {
const char* hlo_text = R"(
HloModule TransposedNonContractingDimsDontFold
ENTRY AddDotsFunc {
x = f32[5,3,4]{2,1,0} parameter(1)
y = f32[5,2,6,3]{3,1,2,0} parameter(0)
y_transposed = f32[5,6,2,3]{3,2,1,0} transpose(y), dimensions={0, 2, 1, 3}
ROOT dot_a = f32[5,4,6,2]{3,2,1,0} dot(x, y_transposed), lhs_contracting_dims={1}, rhs_contracting_dims={3}, lhs_batch_dims={0}, rhs_batch_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
auto dot = module->entry_computation()->root_instruction();
EXPECT_THAT(CanFoldTransposeOperandIntoDot(*dot, 1), IsOkAndHolds(false));
}
struct GetBatchRowColumnShapeTestParams {
absl::string_view shape;
std::vector<int64_t> batch_dims;
std::vector<int64_t> row_dims;
std::vector<int64_t> col_dims;
absl::string_view expected_shape;
};
using GetBatchRowColumnShapeTest =
::testing::TestWithParam<GetBatchRowColumnShapeTestParams>;
TEST_P(GetBatchRowColumnShapeTest, ValidShape) {
const GetBatchRowColumnShapeTestParams& params = GetParam();
Shape shape = ParseShape(params.shape).value();
EXPECT_THAT(GetBatchRowColumnShape(shape, params.batch_dims, params.row_dims,
params.col_dims),
IsOkAndHolds(ParseShape(params.expected_shape).value()));
}
INSTANTIATE_TEST_SUITE_P(
GetBatchRowColumnShapeTests, GetBatchRowColumnShapeTest,
::testing::ValuesIn<GetBatchRowColumnShapeTestParams>({
{"f32[3,4]{1,0}", {}, {0}, {1},
"f32[1,3,4]{2,1,0}"},
{"f32[3,4]{0,1}", {}, {0}, {1}, "f32[1,3,4]{1,2,0}"},
{"f32[3,4]{1,0}", {}, {1}, {0}, "f32[1,4,3]{1,2,0}"},
{"f32[3,4,5]{2,1,0}", {0}, {1}, {2}, "f32[3,4,5]{2,1,0}"},
{"f32[3,4,5]{2,1,0}", {2}, {1}, {0}, "f32[5,4,3]{0,1,2}"},
{"f32[3,4,5,6,7,8]{5,2,4,1,3,0}",
{0, 3},
{1, 4},
{2, 5},
"f32[18,28,40]{2,1,0}"},
}));
TEST(GetBatchRowColumnShapeTest, BatchRowsColsInterleaved) {
Shape shape = ParseShape("f32[3,4,5,6,7,8]{5,4,3,2,1,0}").value();
auto result =
GetBatchRowColumnShape(shape, {0, 3},
{1, 4}, {2, 5});
EXPECT_FALSE(result.ok());
}
TEST(GetBatchRowColumnShapeTest, WrongPhysicalOrder) {
Shape shape = ParseShape("f32[3,4,5,6]{3,2,0,1}").value();
auto result = GetBatchRowColumnShape(shape, {0, 1},
{2}, {3});
EXPECT_FALSE(result.ok());
}
using Order = MatrixLayout::Order;
struct GetMatrixLayoutTestParams {
absl::string_view shape;
int64_t batch_size;
int64_t num_rows;
int64_t num_cols;
Order order;
int64_t leading_dim_stride;
int64_t batch_stride;
};
using GetMatrixLayoutTest = ::testing::TestWithParam<GetMatrixLayoutTestParams>;
TEST_P(GetMatrixLayoutTest, ValidShape) {
const GetMatrixLayoutTestParams& params = GetParam();
Shape shape = ParseShape(params.shape).value();
MatrixLayout result = MatrixLayout::For(shape).value();
EXPECT_EQ(result.batch_size, params.batch_size);
EXPECT_EQ(result.num_rows, params.num_rows);
EXPECT_EQ(result.num_cols, params.num_cols);
EXPECT_EQ(result.order, params.order);
EXPECT_EQ(result.leading_dim_stride, params.leading_dim_stride);
EXPECT_EQ(result.batch_stride, params.batch_stride);
}
INSTANTIATE_TEST_SUITE_P(
GetMatrixLayoutTests, GetMatrixLayoutTest,
::testing::ValuesIn<GetMatrixLayoutTestParams>({
{"f32[3,4,5]{2,1,0}", 3, 4, 5,
Order::kRowMajor, 5,
20},
{"f32[3,4,5]{1,2,0}", 3, 4, 5, Order::kColumnMajor, 4, 20},
{"f32[3,4,5]{2,0,1}", 3, 4, 5, Order::kRowMajor, 15, 5},
{"f32[3,4,5]{1,0,2}", 3, 4, 5, Order::kColumnMajor, 12, 4},
}));
TEST(GetMatrixLayoutTest, BatchInMostMinorPhysicalDimension) {
Shape shape = ParseShape("f32[3,4,5]{0,2,1}").value();
EXPECT_FALSE(MatrixLayout::For(shape).ok());
}
using GetMatrixSizeRewriteThresholdTest = HloTestBase;
TEST_F(GetMatrixSizeRewriteThresholdTest, MatMulTooSmallForRewrite) {
const char* hlo_text = R"(
HloModule DotFuncModule
ENTRY DotFunc {
x = f32[100,30,3] parameter(0)
y = f32[100,3,3] parameter(1)
ROOT dot = f32[100,30,3] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
auto dot = module->entry_computation()->root_instruction();
EXPECT_THAT(IsMatrixMultiplicationTooSmallForRewriting(*dot, 100),
IsOkAndHolds(true));
}
TEST_F(GetMatrixSizeRewriteThresholdTest, MatMulSupportedByClassicalEmitters) {
const char* hlo_text = R"(
HloModule DotFuncModule
ENTRY DotFunc {
x = f32[100,30,3] parameter(0)
y = f32[100,3,3] parameter(1)
ROOT dot = f32[100,30,3] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
auto dot = module->entry_computation()->root_instruction();
EXPECT_TRUE(IsDotSupportedByClassicalEmitters(*dot));
}
TEST_F(GetMatrixSizeRewriteThresholdTest,
MatMulUnsupportedByClassicalEmitters) {
const char* hlo_text = R"(
HloModule DotFuncModule
ENTRY DotFunc {
x = s8[100,30,3] parameter(0)
y = s8[100,3,3] parameter(1)
ROOT dot = s32[100,30,3] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
auto dot = module->entry_computation()->root_instruction();
EXPECT_FALSE(IsDotSupportedByClassicalEmitters(*dot));
}
TEST_F(GetMatrixSizeRewriteThresholdTest, MatMulLeftLargeEnoughForRewrite) {
const char* hlo_text = R"(
HloModule DotFuncModule
ENTRY DotFunc {
x = f32[50,2] parameter(0)
y = f32[2,2] parameter(1)
ROOT dot = f32[50,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
auto dot = module->entry_computation()->root_instruction();
EXPECT_THAT(IsMatrixMultiplicationTooSmallForRewriting(*dot, 100),
IsOkAndHolds(false));
}
TEST_F(GetMatrixSizeRewriteThresholdTest, MatMulRightLargeEnoughForRewrite) {
const char* hlo_text = R"(
HloModule DotFuncModule
ENTRY DotFunc {
x = f32[2,2] parameter(0)
y = f32[2,50] parameter(1)
ROOT dot = f32[2,50] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
auto dot = module->entry_computation()->root_instruction();
EXPECT_THAT(IsMatrixMultiplicationTooSmallForRewriting(*dot, 100),
IsOkAndHolds(false));
}
TEST_F(GetMatrixSizeRewriteThresholdTest, MatMulTogetherLargeEnoughForRewrite) {
const char* hlo_text = R"(
HloModule DotFuncModule
ENTRY DotFunc {
x = f32[4,16] parameter(0)
y = f32[16,4] parameter(1)
ROOT dot = f32[4,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
auto dot = module->entry_computation()->root_instruction();
EXPECT_THAT(IsMatrixMultiplicationTooSmallForRewriting(*dot, 100),
IsOkAndHolds(false));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/matmul_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/matmul_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5e5b458c-749f-4599-ab52-44f385a4c5f4 | cpp | tensorflow/tensorflow | stream_executor_util | third_party/xla/xla/service/gpu/stream_executor_util.cc | third_party/xla/xla/service/gpu/stream_executor_util_test.cc | #include "xla/service/gpu/stream_executor_util.h"
#include <cstdint>
#include <iterator>
#include <limits>
#include <map>
#include <memory>
#include <optional>
#include <random>
#include <sstream>
#include <string_view>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/const_init.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/data_type.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/typed_kernel_factory.h"
#include "xla/tsl/protobuf/dnn.pb.h"
#include "xla/tsl/util/proto/proto_utils.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
absl::StatusOr<se::dnn::VersionInfo> GetDnnVersionInfo(
stream_executor::StreamExecutor* stream_exec) {
if (!stream_exec) {
return absl::InvalidArgumentError("StreamExecutor is null");
}
stream_executor::dnn::DnnSupport* dnn = stream_exec->AsDnn();
if (!dnn) {
return absl::FailedPreconditionError(
"DNN library initialization failed. Look at the errors above for more "
"details.");
}
return dnn->GetVersion();
}
se::dnn::VersionInfo GetDnnVersionInfoOrDefault(
stream_executor::StreamExecutor* stream_exec,
se::dnn::VersionInfo fallback_version) {
return GetDnnVersionInfo(stream_exec).value_or(fallback_version);
}
namespace {
using se::dnn::DataLayout;
using se::dnn::DataLayoutString;
using se::dnn::FilterLayout;
using se::dnn::FilterLayoutString;
int64_t FindMissingDnum(absl::Span<const int64_t> vals) {
for (int i = 0; i < vals.size(); i++) {
if (!absl::c_linear_search(vals, i)) {
return i;
}
}
return vals.size();
}
absl::StatusOr<Layout> DataLayoutToXlaLayout(
DataLayout data_layout, int64_t batch_dimension, int64_t feature_dimension,
absl::Span<int64_t const> spatial_dimensions) {
std::vector<int64_t> layout;
switch (data_layout) {
case DataLayout::kBatchDepthYX:
layout.push_back(batch_dimension);
layout.push_back(feature_dimension);
layout.insert(layout.end(), spatial_dimensions.begin(),
spatial_dimensions.end());
break;
case DataLayout::kBatchDepthYX4:
case DataLayout::kBatchDepthYX32:
layout.push_back(batch_dimension);
layout.push_back(feature_dimension);
layout.insert(layout.end(), spatial_dimensions.begin(),
spatial_dimensions.end());
layout.push_back(FindMissingDnum(layout));
break;
case DataLayout::kBatchYXDepth:
layout.push_back(batch_dimension);
layout.insert(layout.end(), spatial_dimensions.begin(),
spatial_dimensions.end());
layout.push_back(feature_dimension);
break;
default:
return Internal("Invalid layout %s", DataLayoutString(data_layout));
}
return LayoutUtil::MakeLayoutFromMajorToMinor(layout);
}
}
absl::StatusOr<std::tuple<Layout, Layout, Layout>>
StreamExecutorConvLayoutsToXlaLayouts(const ConvolutionDimensionNumbers& dnums,
DataLayout input, FilterLayout filter,
DataLayout output) {
TF_ASSIGN_OR_RETURN(
Layout input_layout,
DataLayoutToXlaLayout(input, dnums.input_batch_dimension(),
dnums.input_feature_dimension(),
dnums.input_spatial_dimensions()));
TF_ASSIGN_OR_RETURN(
Layout output_layout,
DataLayoutToXlaLayout(input, dnums.output_batch_dimension(),
dnums.output_feature_dimension(),
dnums.output_spatial_dimensions()));
std::vector<int64_t> filter_layout;
switch (filter) {
case FilterLayout::kOutputInputYX:
filter_layout.push_back(dnums.kernel_output_feature_dimension());
filter_layout.push_back(dnums.kernel_input_feature_dimension());
filter_layout.insert(filter_layout.end(),
dnums.kernel_spatial_dimensions().begin(),
dnums.kernel_spatial_dimensions().end());
break;
case FilterLayout::kOutputInputYX4:
filter_layout.push_back(dnums.kernel_output_feature_dimension());
filter_layout.push_back(dnums.kernel_input_feature_dimension());
filter_layout.insert(filter_layout.end(),
dnums.kernel_spatial_dimensions().begin(),
dnums.kernel_spatial_dimensions().end());
filter_layout.push_back(FindMissingDnum(filter_layout));
break;
case FilterLayout::kOutputYXInput:
filter_layout.push_back(dnums.kernel_output_feature_dimension());
filter_layout.insert(filter_layout.end(),
dnums.kernel_spatial_dimensions().begin(),
dnums.kernel_spatial_dimensions().end());
filter_layout.push_back(dnums.kernel_input_feature_dimension());
break;
default:
return Internal("Invalid filter layout %s for conv with dnums %s,",
FilterLayoutString(filter),
ConvolutionDimensionNumbersToString(dnums));
}
return std::make_tuple(input_layout,
LayoutUtil::MakeLayoutFromMajorToMinor(filter_layout),
output_layout);
}
absl::StatusOr<std::tuple<DataLayout, FilterLayout, DataLayout>>
XlaConvShapesToStreamExecutorLayouts(const ConvolutionDimensionNumbers& dnums,
const Shape& input, const Shape& filter,
const Shape& output) {
CHECK(input.has_layout());
CHECK(filter.has_layout());
CHECK(output.has_layout());
Layout nchw_input, nchw_filter, nchw_output;
std::tie(nchw_input, nchw_filter, nchw_output) =
StreamExecutorConvLayoutsToXlaLayouts(dnums, DataLayout::kBatchDepthYX,
FilterLayout::kOutputInputYX,
DataLayout::kBatchDepthYX)
.value();
Layout nchw_vect_input, nchw_vect_filter, nchw_vect_output;
std::tie(nchw_vect_input, nchw_vect_filter, nchw_vect_output) =
StreamExecutorConvLayoutsToXlaLayouts(dnums, DataLayout::kBatchDepthYX4,
FilterLayout::kOutputInputYX4,
DataLayout::kBatchDepthYX4)
.value();
Layout nhwc_input, nhwc_filter, nhwc_output;
std::tie(nhwc_input, nhwc_filter, nhwc_output) =
StreamExecutorConvLayoutsToXlaLayouts(dnums, DataLayout::kBatchYXDepth,
FilterLayout::kOutputYXInput,
DataLayout::kBatchYXDepth)
.value();
DataLayout input_layout;
if (LayoutUtil::Equal(input.layout(), nchw_input)) {
input_layout = DataLayout::kBatchDepthYX;
} else if (LayoutUtil::Equal(input.layout(), nchw_vect_input)) {
int64_t vect_size = input.dimensions(input.layout().minor_to_major(0));
if (vect_size == 4) {
input_layout = DataLayout::kBatchDepthYX4;
} else if (vect_size == 32) {
input_layout = DataLayout::kBatchDepthYX32;
} else {
return Internal(
"Invalid input shape %s for conv with dnums %s. Most-minor dim "
"should be 4 or 32, but was %d.",
ShapeUtil::HumanStringWithLayout(input),
ConvolutionDimensionNumbersToString(dnums), vect_size);
}
} else if (LayoutUtil::Equal(input.layout(), nhwc_input)) {
input_layout = DataLayout::kBatchYXDepth;
} else {
return Internal(
"Invalid input layout %s for conv with dnums %s; expected one of (%s, "
"%s, %s)",
LayoutUtil::HumanString(input.layout()),
ConvolutionDimensionNumbersToString(dnums), nchw_input.ToString(),
nchw_vect_input.ToString(), nhwc_input.ToString());
}
FilterLayout filter_layout;
if (LayoutUtil::Equal(filter.layout(), nchw_filter)) {
filter_layout = FilterLayout::kOutputInputYX;
} else if (LayoutUtil::Equal(filter.layout(), nchw_vect_filter)) {
int64_t vect_size = filter.dimensions(filter.layout().minor_to_major(0));
if (vect_size == 4) {
filter_layout = FilterLayout::kOutputInputYX4;
} else if (vect_size == 32) {
filter_layout = FilterLayout::kOutputInputYX32;
} else {
return Internal(
"Invalid filter shape %s for conv with dnums %s. Most-minor dim "
"should be 4 or 32, but was %d.",
ShapeUtil::HumanStringWithLayout(filter),
ConvolutionDimensionNumbersToString(dnums), vect_size);
}
} else if (LayoutUtil::Equal(filter.layout(), nhwc_filter)) {
filter_layout = FilterLayout::kOutputYXInput;
} else {
return Internal(
"Invalid filter layout %s for conv with dnums %s, expected one of (%s, "
"%s, %s)",
LayoutUtil::HumanString(filter.layout()),
ConvolutionDimensionNumbersToString(dnums), nchw_filter.ToString(),
nchw_vect_filter.ToString(), nhwc_filter.ToString());
}
DataLayout output_layout;
if (LayoutUtil::Equal(output.layout(), nchw_output)) {
output_layout = DataLayout::kBatchDepthYX;
} else if (LayoutUtil::Equal(output.layout(), nchw_vect_output)) {
int64_t vect_size = output.dimensions(output.layout().minor_to_major(0));
if (vect_size == 4) {
output_layout = DataLayout::kBatchDepthYX4;
} else if (vect_size == 32) {
output_layout = DataLayout::kBatchDepthYX32;
} else {
return Internal(
"Invalid output shape %s for conv with dnums %s. Most-minor dim "
"should be 4 or 32, but was %d.",
ShapeUtil::HumanStringWithLayout(output),
ConvolutionDimensionNumbersToString(dnums), vect_size);
}
} else if (LayoutUtil::Equal(output.layout(), nhwc_output)) {
output_layout = DataLayout::kBatchYXDepth;
} else {
return Internal("Invalid output layout %s for conv with dnums %s",
LayoutUtil::HumanString(output.layout()),
ConvolutionDimensionNumbersToString(dnums));
}
return std::make_tuple(input_layout, filter_layout, output_layout);
}
static std::optional<int64_t> FindVectorizedDim(int64_t rank, int64_t d0,
int64_t d1,
absl::Span<const int64_t> ds) {
for (int64_t i = 0; i < rank; i++) {
if (i == d0 || i == d1 || absl::c_linear_search(ds, i)) {
continue;
}
return i;
}
return std::nullopt;
}
std::tuple<std::optional<int64_t>, std::optional<int64_t>,
std::optional<int64_t>>
FindVectorizedFeatureDims(const ConvolutionDimensionNumbers& dnums,
const Shape& input, const Shape& filter,
const Shape& output) {
return {
FindVectorizedDim(input.dimensions_size(), dnums.input_batch_dimension(),
dnums.input_feature_dimension(),
dnums.input_spatial_dimensions()),
FindVectorizedDim(filter.dimensions_size(),
dnums.kernel_input_feature_dimension(),
dnums.kernel_output_feature_dimension(),
dnums.kernel_spatial_dimensions()),
FindVectorizedDim(
output.dimensions_size(), dnums.output_batch_dimension(),
dnums.output_feature_dimension(), dnums.output_spatial_dimensions()),
};
}
absl::Mutex& GetGpuMutex(const se::StreamExecutor* stream_exec) {
static absl::Mutex mu(absl::kConstInit);
static auto* mutexes =
new std::map<std::pair<const se::Platform*, int64_t>,
absl::Mutex>();
absl::MutexLock global_lock(&mu);
auto it = mutexes
->emplace(std::piecewise_construct,
std::make_tuple(stream_exec->GetPlatform(),
stream_exec->device_ordinal()),
std::make_tuple())
.first;
return it->second;
}
absl::StatusOr<std::unique_ptr<se::Kernel>> CreateKernel(
absl::string_view kernel_name, uint64_t num_args, absl::string_view ptx,
absl::Span<const uint8_t> cubin_data, se::StreamExecutor* stream_exec,
uint32_t shared_mem_bytes) {
se::MultiKernelLoaderSpec loader_spec(num_args);
loader_spec.AddCudaPtxInMemory(ptx, kernel_name);
if (!cubin_data.empty()) {
loader_spec.AddCudaCubinInMemory(cubin_data, kernel_name);
}
TF_ASSIGN_OR_RETURN(std::unique_ptr<se::Kernel> kernel,
stream_exec->LoadKernel(loader_spec));
se::KernelMetadata m;
m.set_shared_memory_bytes(shared_mem_bytes);
kernel->set_metadata(m);
return kernel;
}
absl::Status ExecuteKernelOnStream(const se::Kernel& kernel,
absl::Span<const se::DeviceMemoryBase> args,
const LaunchDimensions& dims,
se::Stream* stream) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::KernelArgsPackedArrayBase> kernel_args,
se::PackKernelArgs(args, kernel.metadata()));
return stream->Launch(dims.thread_counts_per_block(), dims.block_counts(),
kernel, *kernel_args);
}
absl::Status ExecuteKernelOnStream(const se::Kernel& kernel,
absl::Span<const se::DeviceMemoryBase> args,
const LaunchDimensions& dims,
const se::ClusterDim& cluster_dim,
se::Stream* stream) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::KernelArgsPackedArrayBase> kernel_args,
se::PackKernelArgs(args, kernel.metadata()));
return stream->Launch(dims.thread_counts_per_block(), dims.block_counts(),
cluster_dim, kernel, *kernel_args);
}
template <typename T, typename Generator>
typename std::enable_if<std::is_integral<T>::value,
T>::type static UniformDistribution(T lhs, T rhs,
Generator* gen) =
delete;
template <typename T, typename Generator>
typename std::enable_if<std::is_floating_point<T>::value,
T>::type static UniformDistribution(T lhs, T rhs,
Generator* gen) {
return std::uniform_real_distribution<T>(lhs, rhs)(*gen);
}
namespace repeat_buffer_kernel {
void* kernel();
}
template <typename T>
static void InitializeTypedBuffer(se::Stream* stream,
se::DeviceMemoryBase buffer,
int64_t* rng_state) {
constexpr int host_buffer_size = 10069;
static std::vector<T>* host_buffer = [&] {
auto* ret = new std::vector<T>(host_buffer_size);
std::mt19937 gen;
for (auto& element : *ret) {
constexpr bool kIsIntegral = std::numeric_limits<T>::is_integer;
constexpr bool kIsLowRange =
!kIsIntegral && std::numeric_limits<T>::max_exponent <=
std::numeric_limits<Eigen::half>::max_exponent;
using RandomType = typename std::conditional<std::is_same_v<T, double>,
double, float>::type;
auto upper_bound = RandomType(kIsLowRange ? 0.1 : 1.0);
auto rand_val = UniformDistribution(RandomType(0), upper_bound, &gen);
element = T(kIsIntegral ? rand_val + 0.5 : rand_val);
}
return ret;
}();
CHECK_EQ(0, buffer.size() % sizeof(T));
int64_t elements_to_fill = buffer.size() / sizeof(T);
int64_t host_index = *rng_state;
CHECK_LT(host_index, host_buffer_size);
*rng_state = (*rng_state + elements_to_fill) % host_buffer_size;
int64_t first_size =
std::min<int64_t>(host_buffer_size - host_index, elements_to_fill);
TF_CHECK_OK(stream->Memcpy(&buffer, host_buffer->data() + host_index,
first_size * sizeof(T)));
elements_to_fill -= first_size;
if (elements_to_fill == 0) {
return;
}
int64_t second_size = std::min<int64_t>(host_index, elements_to_fill);
CHECK_LE(first_size + second_size, host_buffer_size);
se::DeviceMemoryBase mem =
buffer.GetByteSlice(first_size * sizeof(T), second_size * sizeof(T));
TF_CHECK_OK(stream->Memcpy(&mem, host_buffer->data(), mem.size()));
elements_to_fill -= second_size;
if (elements_to_fill == 0) {
return;
}
#ifdef GOOGLE_CUDA
CHECK_EQ(elements_to_fill, buffer.size() / sizeof(T) - host_buffer_size);
se::StreamExecutor* executor = stream->parent();
auto kernel =
se::TypedKernelFactory<se::DeviceMemoryBase, int64_t, int64_t>::Create(
executor, "RepeatBufferKernel", repeat_buffer_kernel::kernel());
if (!kernel.ok()) {
LOG(FATAL) << "Could not create RepeatBufferKernel: " << kernel.status();
}
constexpr int64_t host_buffer_bytes = host_buffer_size * sizeof(T);
constexpr int threads_per_block = 256;
constexpr int blocks_per_grid =
(host_buffer_bytes + threads_per_block - 1) / threads_per_block;
TF_CHECK_OK(stream->ThenLaunch(se::ThreadDim(threads_per_block, 1, 1),
se::BlockDim(blocks_per_grid, 1, 1), *kernel,
buffer, host_buffer_bytes,
static_cast<int64_t>(buffer.size())));
#endif
}
void InitializeBuffer(se::Stream* stream, PrimitiveType buffer_type,
int64_t* rng_state, se::DeviceMemoryBase buffer) {
return primitive_util::PrimitiveTypeSwitch<void>(
[&](auto primitive_type_constant) -> void {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant) ||
primitive_util::IsIntegralType(primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return InitializeTypedBuffer<NativeT>(stream, buffer, rng_state);
}
if constexpr (primitive_util::IsComplexType(primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return InitializeTypedBuffer<typename NativeT::value_type>(
stream, buffer, rng_state);
}
if constexpr (primitive_type_constant == PRED) {
return InitializeTypedBuffer<int8_t>(stream, buffer, rng_state);
}
LOG(FATAL) << "Unexpected type: "
<< primitive_util::LowercasePrimitiveTypeName(buffer_type);
},
buffer_type);
}
absl::StatusOr<se::dnn::ConvolutionKind> GetDNNConvKindFromCudnnConvKind(
CudnnConvKind kind) {
switch (kind) {
case CudnnConvKind::kBackwardFilter:
return se::dnn::BACKWARD_FILTER;
case CudnnConvKind::kBackwardInput:
return se::dnn::BACKWARD_DATA;
case CudnnConvKind::kForward:
return se::dnn::FORWARD;
case CudnnConvKind::kForwardActivation:
return se::dnn::FORWARD_BIAS_ACTIVATION;
case CudnnConvKind::kForwardGraph:
return se::dnn::FORWARD_GRAPH;
default:
break;
}
return Internal("Unexpected convolution kind");
}
absl::StatusOr<se::dnn::NormKind> GetDNNNormKindFromCudnnNormKind(
CudnnNormKind kind) {
switch (kind) {
case CudnnNormKind::kLayerForwardInfer:
return se::dnn::LAYER_FWD_INFER;
case CudnnNormKind::kLayerForwardTrain:
return se::dnn::LAYER_FWD_TRAIN;
case CudnnNormKind::kLayerBackward:
return se::dnn::LAYER_BWD;
default:
return Internal("Unexpected norm kind");
}
}
absl::StatusOr<se::dnn::FMHAMaskKind> GetDNNFmhaMaskKindFromCudnnFmhaMaskKind(
CudnnfMHAMaskKind kind) {
switch (kind) {
case CudnnfMHAMaskKind::kNoMask:
return se::dnn::NO_MASK;
case CudnnfMHAMaskKind::kPadding:
return se::dnn::PADDING;
case CudnnfMHAMaskKind::kCausal:
return se::dnn::CAUSAL;
case CudnnfMHAMaskKind::kPaddingCausal:
return se::dnn::PADDING_CAUSAL;
case CudnnfMHAMaskKind::kAlibi:
return se::dnn::ALIBI;
default:
return Internal("Unexpected fmha mask kind");
}
}
absl::StatusOr<se::dnn::DataType> GetDNNDataTypeFromPrimitiveType(
PrimitiveType type) {
switch (type) {
case F16:
return se::dnn::ToDataType<Eigen::half>::value;
case F32:
return se::dnn::ToDataType<float>::value;
case F64:
return se::dnn::ToDataType<double>::value;
case S8:
return se::dnn::ToDataType<int8_t>::value;
case S32:
return se::dnn::ToDataType<int32_t>::value;
case BF16:
return se::dnn::ToDataType<Eigen::bfloat16>::value;
case F8E4M3FN:
return se::dnn::ToDataType<tsl::float8_e4m3fn>::value;
case F8E5M2:
return se::dnn::ToDataType<tsl::float8_e5m2>::value;
default:
break;
}
return Internal("Unsupported datatype");
}
bool RequireDeterminism(const HloModuleConfig& config) {
return config.debug_options().xla_gpu_deterministic_ops() ||
config.debug_options().xla_gpu_exclude_nondeterministic_ops();
}
namespace {
std::vector<AutotuneResult> KeepNonFailures(
absl::Span<AutotuneResult const> profile_results) {
std::vector<AutotuneResult> filtered_results;
absl::c_copy_if(profile_results, std::back_inserter(filtered_results),
[](const AutotuneResult& r) {
return !r.has_failure() ||
r.failure().kind() == AutotuneResult::WRONG_RESULT;
});
return filtered_results;
}
absl::Status AllAlgorithmsFailedInternalError(
std::optional<std::string_view> instr_str,
absl::Span<AutotuneResult const> profile_results) {
std::ostringstream msg;
if (instr_str.has_value()) {
msg << "All algorithms tried for " << instr_str.value()
<< " failed. Falling back to default algorithm. Per-algorithm "
"errors:";
} else {
msg << "All algorithms failed. Falling back to the default algorithm. "
<< "Per-algorithm errors:";
}
for (const auto& result : profile_results) {
msg << "\n " << result.failure().msg();
}
return Internal("%s", msg.str());
}
absl::Status NoAlgorithmSuppliedInternalError(
std::optional<std::string_view> instr_str) {
std::ostringstream msg;
if (instr_str.has_value()) {
msg << "There are no algorithm candidates for computing: \n "
<< instr_str.value()
<< "\nThis likely means that the instruction shape is not supported by "
"the target GPU library.";
} else {
msg << "There are no algorithm candidates for computing the instruction.\n"
"This likely means that the instruction shape is not supported by "
"the target GPU library.";
}
return Internal("%s", msg.str());
}
void SortAutotuningResultsByRunTime(std::vector<AutotuneResult>& results) {
absl::c_sort(results,
[](const AutotuneResult& lhs, const AutotuneResult& rhs) {
return tsl::proto_utils::FromDurationProto(lhs.run_time()) <
tsl::proto_utils::FromDurationProto(rhs.run_time());
});
}
absl::Span<AutotuneResult const> TopResultsWithinMeasurementError(
std::vector<AutotuneResult>& results_sorted_by_runtime) {
constexpr absl::Duration kMeasurementError = absl::Microseconds(4);
absl::Duration min_time = tsl::proto_utils::FromDurationProto(
results_sorted_by_runtime.front().run_time());
absl::Duration limit_time = min_time + kMeasurementError;
auto limit_time_it = absl::c_find_if(
results_sorted_by_runtime, [limit_time](const AutotuneResult& x) {
return tsl::proto_utils::FromDurationProto(x.run_time()) > limit_time;
});
return absl::MakeSpan(&*results_sorted_by_runtime.begin(), &*limit_time_it);
}
}
absl::StatusOr<AutotuneResult> PickBestResult(
absl::Span<AutotuneResult const> profile_results,
std::optional<std::string_view> instr_str,
HloModuleConfig hlo_module_config) {
if (profile_results.empty()) {
return NoAlgorithmSuppliedInternalError(instr_str);
}
std::vector<AutotuneResult> filtered_results =
KeepNonFailures(profile_results);
if (filtered_results.empty()) {
return AllAlgorithmsFailedInternalError(instr_str, profile_results);
}
if (RequireDeterminism(hlo_module_config)) {
return *filtered_results.begin();
}
SortAutotuningResultsByRunTime(filtered_results);
auto top_within_error = TopResultsWithinMeasurementError(filtered_results);
return *absl::c_min_element(top_within_error, [](const AutotuneResult& lhs,
const AutotuneResult& rhs) {
return lhs.scratch_bytes() < rhs.scratch_bytes();
});
}
}
} | #include "xla/service/gpu/stream_executor_util.h"
#include <cstdint>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/time/time.h"
#include "xla/autotuning.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/tsl/util/proto/proto_utils.h"
namespace xla::gpu {
namespace {
struct Result {
int64_t run_time_ns;
int64_t scratch_bytes;
bool operator==(const Result& other) const {
return other.run_time_ns == run_time_ns &&
other.scratch_bytes == scratch_bytes;
};
explicit operator AutotuneResult() const {
AutotuneResult result;
*result.mutable_run_time() =
tsl::proto_utils::ToDurationProto(absl::Nanoseconds(run_time_ns));
result.set_scratch_bytes(scratch_bytes);
return result;
}
};
static Result ATRToResult(AutotuneResult atr) {
return Result{.run_time_ns = absl::ToInt64Nanoseconds(
tsl::proto_utils::FromDurationProto(atr.run_time())),
.scratch_bytes = atr.scratch_bytes()};
}
std::vector<AutotuneResult> Results(const std::vector<Result>& stats) {
std::vector<AutotuneResult> results;
for (const auto& s : stats) results.push_back(AutotuneResult(s));
return results;
}
TEST(StreamExecutorTest, PickBestResult) {
absl::StatusOr<AutotuneResult> atr;
atr = PickBestResult(Results({{9000, 0}, {1000, 0}, {16000, 0}}), "", {});
EXPECT_EQ(ATRToResult(atr.value()), Result({1000, 0}));
atr = PickBestResult(Results({{4700, 0}, {4600, 0}, {4500, 0}}), "", {});
EXPECT_EQ(ATRToResult(atr.value()), Result({4500, 0}));
atr = PickBestResult(Results({{4700, 0}, {4600, 2}, {4500, 1}}), "", {});
EXPECT_EQ(ATRToResult(atr.value()), Result({4700, 0}));
atr = PickBestResult(Results({{5000, 1}, {6000, 0}, {7500, 0}}), "", {});
EXPECT_EQ(ATRToResult(atr.value()), Result({6000, 0}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/stream_executor_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/stream_executor_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
adb8539d-0c2b-41c4-b736-da697e10d787 | cpp | tensorflow/tensorflow | gpu_compiler | third_party/xla/xla/service/gpu/gpu_compiler.cc | third_party/xla/xla/service/gpu/gpu_compiler_test.cc | #include "xla/service/gpu/gpu_compiler.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <functional>
#include <memory>
#include <new>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "absl/types/variant.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/AsmParser/Parser.h"
#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/Bitcode/BitcodeWriter.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Verifier.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/SplitModule.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_module_group.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/pass/hlo_pass_fix.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/maybe_owning.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/all_gather_broadcast_reorder.h"
#include "xla/service/all_gather_combiner.h"
#include "xla/service/all_reduce_combiner.h"
#include "xla/service/all_reduce_contiguous.h"
#include "xla/service/all_reduce_folder.h"
#include "xla/service/all_reduce_promotion.h"
#include "xla/service/all_reduce_reassociate.h"
#include "xla/service/async_collective_creator.h"
#include "xla/service/batched_gather_scatter_normalizer.h"
#include "xla/service/batchnorm_expander.h"
#include "xla/service/bitcast_dtypes_expander.h"
#include "xla/service/broadcast_canonicalizer.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/call_inliner.h"
#include "xla/service/collective_permute_decomposer.h"
#include "xla/service/collective_pipeliner.h"
#include "xla/service/collective_quantizer.h"
#include "xla/service/collectives_schedule_linearizer.h"
#include "xla/service/comparison_expander.h"
#include "xla/service/compiler.h"
#include "xla/service/conditional_canonicalizer.h"
#include "xla/service/conditional_simplifier.h"
#include "xla/service/convert_memory_placement_to_internal_annotations.h"
#include "xla/service/convert_mover.h"
#include "xla/service/convolution_4d_expander.h"
#include "xla/service/convolution_pred_expander.h"
#include "xla/service/copy_insertion.h"
#include "xla/service/cpu_gpu_shape_verifier.h"
#include "xla/service/dot_decomposer.h"
#include "xla/service/dot_merger.h"
#include "xla/service/dump.h"
#include "xla/service/dynamic_dimension_inference.h"
#include "xla/service/dynamic_dimension_simplifier.h"
#include "xla/service/dynamic_index_splitter.h"
#include "xla/service/dynamic_padder.h"
#include "xla/service/eigh_expander.h"
#include "xla/service/executable.h"
#include "xla/service/export_hlo.h"
#include "xla/service/flatten_call_graph.h"
#include "xla/service/float_normalization.h"
#include "xla/service/float_support.h"
#include "xla/service/gather_expander.h"
#include "xla/service/gather_simplifier.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/service/gpu/autotuning/custom_kernel_fusion_autotuner.h"
#include "xla/service/gpu/compile_module_to_llvm_ir.h"
#include "xla/service/gpu/conv_layout_normalization.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/execution_stream_assignment.h"
#include "xla/service/gpu/fusion_pipeline.h"
#include "xla/service/gpu/fusions/triton/triton_support.h"
#include "xla/service/gpu/gpu_executable.h"
#include "xla/service/gpu/gpu_float_support.h"
#include "xla/service/gpu/gpu_hlo_schedule.h"
#include "xla/service/gpu/gpu_latency_hiding_scheduler.h"
#include "xla/service/gpu/gpu_p2p_pipeliner.h"
#include "xla/service/gpu/gpu_spmd_pipeline.h"
#include "xla/service/gpu/hlo_fusion_stats.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/ir_emitter_unnested.h"
#include "xla/service/gpu/kernel_reuse_cache.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/metrics.h"
#include "xla/service/gpu/model/gpu_cost_model_stats_collection.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/prepare_hlo_for_ir_emitting_pipeline.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/service/gpu/runtime_intrinsics.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/gpu/transforms/algebraic_simplifier.h"
#include "xla/service/gpu/transforms/algorithm_checker.h"
#include "xla/service/gpu/transforms/all_gather_optimizer.h"
#include "xla/service/gpu/transforms/all_reduce_blueconnect.h"
#include "xla/service/gpu/transforms/all_reduce_splitter.h"
#include "xla/service/gpu/transforms/async_collective_annotator.h"
#include "xla/service/gpu/transforms/async_wrapper.h"
#include "xla/service/gpu/transforms/collective_permute_cycle_decomposer.h"
#include "xla/service/gpu/transforms/collective_permute_valid_iteration_annotator.h"
#include "xla/service/gpu/transforms/command_buffer_scheduling.h"
#include "xla/service/gpu/transforms/conv_rewriter.h"
#include "xla/service/gpu/transforms/convert_async_collectives_to_sync.h"
#include "xla/service/gpu/transforms/cudnn_custom_call_converter.h"
#include "xla/service/gpu/transforms/custom_kernel_fusion_rewriter.h"
#include "xla/service/gpu/transforms/dot_dimension_sorter.h"
#include "xla/service/gpu/transforms/dot_operand_converter.h"
#include "xla/service/gpu/transforms/double_buffer_loop_unrolling.h"
#include "xla/service/gpu/transforms/dynamic_slice_fusion_rewriter.h"
#include "xla/service/gpu/transforms/fusion_block_level_rewriter.h"
#include "xla/service/gpu/transforms/fusion_wrapper.h"
#include "xla/service/gpu/transforms/gemm_broadcast_folding_rewriter.h"
#include "xla/service/gpu/transforms/gemm_fusion.h"
#include "xla/service/gpu/transforms/gemm_rewriter.h"
#include "xla/service/gpu/transforms/gemv_rewriter.h"
#include "xla/service/gpu/transforms/layout_assignment.h"
#include "xla/service/gpu/transforms/move_copy_to_users.h"
#include "xla/service/gpu/transforms/pipelined_p2p_rewriter.h"
#include "xla/service/gpu/transforms/reduce_scatter_creator.h"
#include "xla/service/gpu/transforms/reduction_degenerate_dim_remover.h"
#include "xla/service/gpu/transforms/reduction_dimension_grouper.h"
#include "xla/service/gpu/transforms/reduction_layout_normalizer.h"
#include "xla/service/gpu/transforms/reduction_splitter.h"
#include "xla/service/gpu/transforms/rename_fusions.h"
#include "xla/service/gpu/transforms/sanitize_constant_names.h"
#include "xla/service/gpu/transforms/scatter_expander.h"
#include "xla/service/gpu/transforms/scatter_slice_simplifier.h"
#include "xla/service/gpu/transforms/softmax_rewriter_triton.h"
#include "xla/service/gpu/transforms/stream_attribute_annotator.h"
#include "xla/service/gpu/transforms/stream_attribute_async_wrapper.h"
#include "xla/service/gpu/transforms/topk_specializer.h"
#include "xla/service/gpu/transforms/topk_splitter.h"
#include "xla/service/gpu/transforms/transpose_dimension_grouper.h"
#include "xla/service/gpu/transforms/tree_reduction_rewriter.h"
#include "xla/service/gpu/transforms/triton_fusion_numerics_verifier.h"
#include "xla/service/gpu/transforms/windowed_einsum_handler.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_computation_deduplicator.h"
#include "xla/service/hlo_constant_folding.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_cse.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_rematerialization.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/host_memory_transfer_asyncifier.h"
#include "xla/service/host_offload_legalize.h"
#include "xla/service/host_offloader.h"
#include "xla/service/layout_assignment.h"
#include "xla/service/layout_normalization.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/service/logistic_expander.h"
#include "xla/service/operand_upcaster.h"
#include "xla/service/optimization_barrier_expander.h"
#include "xla/service/optimize_input_output_buffer_alias.h"
#include "xla/service/qr_expander.h"
#include "xla/service/real_imag_expander.h"
#include "xla/service/reduce_decomposer.h"
#include "xla/service/reduce_scatter_combiner.h"
#include "xla/service/reduce_scatter_reassociate.h"
#include "xla/service/reduce_window_rewriter.h"
#include "xla/service/reshape_decomposer.h"
#include "xla/service/reshape_mover.h"
#include "xla/service/result_caster.h"
#include "xla/service/rng_bit_generator_expander.h"
#include "xla/service/rng_expander.h"
#include "xla/service/scatter_expander.h"
#include "xla/service/scatter_simplifier.h"
#include "xla/service/sharding_remover.h"
#include "xla/service/simplify_fp_conversions.h"
#include "xla/service/slice_sinker.h"
#include "xla/service/slow_operation_alarm.h"
#include "xla/service/sort_simplifier.h"
#include "xla/service/stable_sort_expander.h"
#include "xla/service/stochastic_convert_decomposer.h"
#include "xla/service/sub_byte_normalization.h"
#include "xla/service/topk_rewriter.h"
#include "xla/service/transpose_folding.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/service/while_loop_all_reduce_code_motion.h"
#include "xla/service/while_loop_constant_sinking.h"
#include "xla/service/while_loop_simplifier.h"
#include "xla/service/while_loop_trip_count_annotator.h"
#include "xla/service/zero_sized_hlo_elimination.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_description.pb.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/semantic_version.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/blocking_counter.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/profiler/lib/scoped_annotation.h"
#include "tsl/profiler/lib/traceme.h"
#ifdef PLATFORM_GOOGLE
#include "xla/hlo/experimental/auto_sharding/auto_sharding.h"
#endif
namespace xla {
namespace gpu {
namespace {
using MaybeOwningThreadPool = MaybeOwning<tsl::thread::ThreadPool>;
MaybeOwningThreadPool CreateMaybeOwningThreadPool(
int parallelism, tsl::thread::ThreadPool* default_thread_pool,
int default_parallelism) {
CHECK_GE(parallelism, 0);
CHECK_GE(default_parallelism, 1);
CHECK(default_thread_pool == nullptr ||
default_thread_pool->CurrentThreadId() == -1);
auto create_thread_pool = [&](int num_threads) {
CHECK_GE(num_threads, 1);
return std::make_unique<tsl::thread::ThreadPool>(tsl::Env::Default(), "",
num_threads);
};
switch (parallelism) {
case 0:
if (default_thread_pool == nullptr && default_parallelism > 1) {
return MaybeOwningThreadPool(create_thread_pool(default_parallelism));
}
return MaybeOwningThreadPool(default_thread_pool);
case 1:
return MaybeOwningThreadPool(nullptr);
default:
return MaybeOwningThreadPool(create_thread_pool(parallelism));
}
}
absl::StatusOr<AutotuneConfig> GetAutotuneConfig(
se::StreamExecutor* stream_exec, const DebugOptions& debug_options,
const GpuCompiler::CompileOptions& options,
const Compiler::TargetConfig& gpu_target_config) {
if (stream_exec) {
return AutotuneConfig{DeviceConfig{stream_exec, options.device_allocator},
debug_options};
}
return AutotuneConfig{DevicelessConfig{gpu_target_config.device_description},
debug_options};
}
se::GpuComputeCapability GetGpuVersion(const se::StreamExecutor* stream_exec) {
return stream_exec->GetDeviceDescription().gpu_compute_capability();
}
class GpuThunkAotCompilationResult : public AotCompilationResult {
public:
static absl::StatusOr<std::unique_ptr<GpuThunkAotCompilationResult>>
FromModule(const HloModule* hlo_module,
const BufferAssignment* buffer_assignment,
std::string_view asm_text, absl::Span<const uint8_t> binary,
const BinaryMap& dnn_compiled_graphs) {
CompilationResultProto proto;
*proto.mutable_hlo_module_with_config() = hlo_module->ToProtoWithConfig();
*proto.mutable_buffer_assignment() = buffer_assignment->ToProto();
proto.set_asm_text(std::string(asm_text));
proto.set_binary(binary.data(), binary.size());
proto.mutable_dnn_compiled_graphs()->insert(dnn_compiled_graphs.cbegin(),
dnn_compiled_graphs.cend());
return std::unique_ptr<GpuThunkAotCompilationResult>(
new GpuThunkAotCompilationResult(hlo_module->Clone(),
std::move(proto)));
}
static absl::StatusOr<std::unique_ptr<GpuThunkAotCompilationResult>>
FromString(const std::string& serialized) {
CompilationResultProto proto;
if (!proto.ParseFromString(serialized)) {
return Internal(
"Failed to parse serialized GpuThunkAotCompilationResult.");
}
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModule> module,
HloModule::CreateFromProtoWithConfig(proto.hlo_module_with_config()));
return std::unique_ptr<GpuThunkAotCompilationResult>(
new GpuThunkAotCompilationResult(std::move(module), std::move(proto)));
}
absl::StatusOr<std::string> SerializeAsString() const override {
return proto_.SerializeAsString();
}
absl::StatusOr<std::unique_ptr<Executable>> LoadExecutable(
Compiler* compiler, const se::StreamExecutor* stream_exec) const override;
const HloModule* optimized_module() const override { return module_.get(); }
std::unique_ptr<HloModule> consume_optimized_module() override {
return std::move(module_);
}
private:
GpuThunkAotCompilationResult(std::unique_ptr<HloModule> module,
CompilationResultProto proto)
: module_(std::move(module)), proto_(std::move(proto)) {}
std::unique_ptr<HloModule> module_;
CompilationResultProto proto_;
};
}
absl::StatusOr<std::unique_ptr<Executable>>
GpuThunkAotCompilationResult::LoadExecutable(
Compiler* compiler, const se::StreamExecutor* stream_exec) const {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModule> hlo_module,
HloModule::CreateFromProtoWithConfig(proto_.hlo_module_with_config()));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<BufferAssignment> buffer_assignment,
BufferAssignment::FromProto(proto_.buffer_assignment(), hlo_module.get(),
compiler->BufferSizeBytesFunction(),
nullptr));
ExecutionStreamAssignment execution_stream_assignment(hlo_module.get());
std::vector<uint8_t> binary(proto_.binary().begin(), proto_.binary().end());
TF_ASSIGN_OR_RETURN(
se::Platform * platform,
se::PlatformManager::PlatformWithId(compiler->PlatformId()));
std::string platform_name = platform->Name();
const se::DeviceDescription& gpu_device_info =
stream_exec->GetDeviceDescription();
mlir::DialectRegistry registry;
auto mlir_context = std::make_unique<mlir::MLIRContext>(registry);
llvm::LLVMContext llvm_context;
auto* gpu_compiler = dynamic_cast<GpuCompiler*>(compiler);
if (gpu_compiler == nullptr) {
return Internal("Compiler is not a GpuCompiler.");
}
auto llvm_module = std::make_unique<llvm::Module>("", llvm_context);
llvm_module->setTargetTriple(gpu_compiler->target_triple());
llvm_module->setDataLayout(gpu_compiler->data_layout());
IrEmitterContext ir_emitter_context(
hlo_module.get(), buffer_assignment.get(), &execution_stream_assignment,
platform_name, gpu_device_info, mlir_context.get(), llvm_module.get(),
nullptr,
false);
absl::string_view cache_file_path =
hlo_module->config().debug_options().xla_gpu_kernel_cache_file();
if (!cache_file_path.empty() &&
hlo_module->config()
.debug_options()
.xla_gpu_enable_llvm_module_compilation_parallelism()) {
TF_RETURN_IF_ERROR(LoadCache(ir_emitter_context, cache_file_path));
}
auto ir_emitter = IrEmitterUnnested::Create(&ir_emitter_context);
TF_RETURN_IF_ERROR(
ir_emitter->EmitHloComputation(hlo_module->entry_computation()));
std::vector<GpuExecutable::ConstantInfo> constants =
std::move(ir_emitter_context.constants());
TF_ASSIGN_OR_RETURN(auto output_info,
GetOutputInfo(*hlo_module, *buffer_assignment));
const Shape& output_shape = hlo_module->result_shape();
int64_t debug_buffer_assignment_show_max =
hlo_module->config()
.debug_options()
.xla_debug_buffer_assignment_show_max();
TF_ASSIGN_OR_RETURN(
std::unique_ptr<GpuExecutable> executable,
GpuExecutable::Create(GpuExecutable::Params{
proto_.asm_text(),
binary,
BinaryMap(proto_.dnn_compiled_graphs().cbegin(),
proto_.dnn_compiled_graphs().cend()),
gpu_device_info.gpu_compute_capability(),
ir_emitter->ConsumeThunkSequence(),
std::move(constants),
std::move(output_info),
std::move(hlo_module->name()),
std::move(output_shape),
std::nullopt,
std::move(buffer_assignment),
debug_buffer_assignment_show_max,
std::move(hlo_module),
true}));
return executable;
}
GpuCompiler::GpuCompiler(se::Platform::Id platform_id,
const char* target_triple, const char* data_layout)
: platform_id_(platform_id),
target_triple_(target_triple),
data_layout_(data_layout),
pointer_size_(llvm::DataLayout(data_layout)
.getPointerSize(0 )) {}
namespace {
void AddHloVerifier(HloPassPipeline* pipeline,
bool verify_unique_channel_ids = false,
HloVerifierOpts&& opts = {}, bool debug_only = false) {
opts.verify_unique_channel_ids = verify_unique_channel_ids;
std::unique_ptr<TargetVerifierMetadata> verifier_metadata =
std::make_unique<CpuGpuVerifierMetadata>(std::move(opts));
if (debug_only) {
pipeline->AddInvariantCheckerDebug<HloVerifier>(
std::move(verifier_metadata), "hlo verifier (debug)");
} else {
pipeline->AddInvariantChecker<HloVerifier>(std::move(verifier_metadata),
"hlo verifier");
}
}
void CheckNotScheduled(HloModule* hlo_module) {
if (hlo_module->has_schedule() &&
!hlo_module->config().debug_options().xla_disable_all_hlo_passes()) {
LOG(WARNING) << "\nThe current HLO module " << hlo_module->name()
<< " is scheduled and optimized. \n"
<< "It is not expected to run optimization passes again.\n"
"Use a test method like RunAndCompareNoHloPasses() or "
<< "the xla_disable_all_hlo_passes flag.";
}
}
void LogDebugOptions(HloModule* hlo_module) {
XLA_VLOG_LINES(
1, absl::StrFormat("GpuCompilationEnvironment of hlo_module %s:\n%s",
hlo_module->name(),
hlo_module->config().debug_options().DebugString()));
}
AlgebraicSimplifierOptions LayoutInsensitiveAlgebraicSimplifierOptions(
const HloModuleConfig& hlo_module_config,
const Compiler::TargetConfig& gpu_target_config,
AlgebraicSimplifierOptions opts_from_compiler) {
AlgebraicSimplifierOptions layout_insensitive_algsimp_opts =
opts_from_compiler;
layout_insensitive_algsimp_opts.set_conv_is_lowerable_callback(
ConvRewriter::ConvIsLowerable);
layout_insensitive_algsimp_opts.set_enable_dot_strength_reduction(
hlo_module_config.debug_options()
.xla_gpu_enable_dot_strength_reduction());
layout_insensitive_algsimp_opts.set_supports_non_canonical_dots(false);
layout_insensitive_algsimp_opts.set_minmax_propagate_nan(
!hlo_module_config.debug_options().xla_gpu_enable_fast_min_max());
layout_insensitive_algsimp_opts
.set_unconditionally_simplify_reduce_of_transpose_or_reshape(true);
if (gpu_target_config.platform_name == "ROCM") {
layout_insensitive_algsimp_opts.set_enable_conv_operand_swap(false);
}
layout_insensitive_algsimp_opts
.set_enable_unconditional_reduce_of_concat_replacement(false);
return layout_insensitive_algsimp_opts;
}
absl::Status RunPreSPMDPartitionerPasses(HloModule* hlo_module) {
HloPassPipeline pre_spmd_pipeline("pre-spmd-partitioner");
pre_spmd_pipeline.AddPass<BatchedGatherScatterNormalizer>();
pre_spmd_pipeline.AddPass<CuDnnCustomCallConverter>();
pre_spmd_pipeline.AddPass<ConvertMemoryPlacementToInternalAnnotations>();
pre_spmd_pipeline.AddPass<CallInliner>();
pre_spmd_pipeline.AddPass<ZeroSizedHloElimination>();
pre_spmd_pipeline.AddPass<ConditionalCanonicalizer>();
pre_spmd_pipeline.AddPass<TopkDecomposer>([&](const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kTopK;
});
pre_spmd_pipeline.AddPass<TopkRewriter>(
[](const HloSortInstruction*, int64_t) { return true; });
return pre_spmd_pipeline.Run(hlo_module).status();
}
absl::Status RunSPMDPasses(
HloModule* hlo_module, const Compiler::TargetConfig& gpu_target_config,
const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts) {
bool auto_sharding = hlo_module->config().use_auto_spmd_partitioning();
#ifndef PLATFORM_GOOGLE
if (auto_sharding) {
LOG(ERROR) << "GPU autosharding is not yet available in open source.";
}
#endif
const int64_t num_partitions = hlo_module->config().num_partitions();
if (num_partitions > 1) {
if (!hlo_module->config().use_spmd_partitioning()) {
return InvalidArgument(
"num_partitions=%d but SPMD partitioning not enabled.",
num_partitions);
}
HloPassPipeline spmd_pipeline("spmd-partitioner");
AddSPMDPasses(
hlo_module, layout_insensitive_algsimp_opts,
gpu_target_config.device_description.gpu_compute_capability(),
spmd_pipeline,
#ifdef PLATFORM_GOOGLE
[&](HloPassPipeline& pipeline) {
if (auto_sharding) {
AutoShardingOption option;
option.enable = true;
if (!hlo_module->config()
.auto_spmd_partitioning_mesh_shape()
.empty()) {
option.device_mesh_shape =
hlo_module->config().auto_spmd_partitioning_mesh_shape();
} else {
option.device_mesh_shape = {
gpu_target_config.device_description.core_count(), 1};
}
if (!hlo_module->config()
.auto_spmd_partitioning_mesh_ids()
.empty()) {
option.device_mesh_ids =
hlo_module->config().auto_spmd_partitioning_mesh_ids();
}
option.memory_budget_per_device =
hlo_module->config()
.debug_options()
.xla_gpu_auto_spmd_partitioning_memory_budget_gb() *
1024 * 1024 * 1024;
option.memory_budget_ratio =
hlo_module->config()
.debug_options()
.xla_gpu_auto_spmd_partitioning_memory_budget_ratio();
spmd_pipeline.AddPass<AutoSharding>(option);
}
});
#else
std::nullopt);
#endif
if (hlo_module->config()
.debug_options()
.xla_gpu_unsafe_pipelined_loop_annotator()) {
spmd_pipeline.AddPass<WhileLoopTripCountAnnotator>();
spmd_pipeline.AddPass<CollectivePermuteValidIterationAnnotator>();
}
return spmd_pipeline.Run(hlo_module).status();
} else {
HloPassPipeline sharding_removal_pipeline("sharding-removal");
sharding_removal_pipeline.AddPass<ShardingRemover>();
sharding_removal_pipeline.AddPass<HloDCE>();
return sharding_removal_pipeline.Run(hlo_module).status();
}
}
absl::Status RunOptimizationPasses(
HloModule* hlo_module, const Compiler::TargetConfig& gpu_target_config,
const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts) {
const DebugOptions& debug_options = hlo_module->config().debug_options();
HloPassPipeline pipeline("optimization");
AddHloVerifier(&pipeline,
!debug_options.xla_experimental_ignore_channel_id());
if (debug_options.xla_gpu_multi_streamed_windowed_einsum()) {
pipeline.AddPass<WindowedEinsumHandler>();
}
pipeline.AddPass<TopKSplitter>();
pipeline.AddPass<TopkSpecializer>();
pipeline.AddPass<TopkDecomposer>();
HloPredicate upcaster_filter = [&](const HloInstruction* instr) {
const auto* cuda_cc = std::get_if<se::CudaComputeCapability>(
&gpu_target_config.device_description.gpu_compute_capability());
if (cuda_cc != nullptr &&
!cuda_cc->IsAtLeast(se::CudaComputeCapability::VOLTA)) {
return true;
}
return !gpu::IsMatrixMultiplication(*instr);
};
pipeline.AddPass<DotDimensionSorter>();
pipeline.AddPass<DotDecomposer>();
pipeline.AddPass<ResultCaster>(upcaster_filter);
pipeline.AddPass<OperandUpcaster>(upcaster_filter);
pipeline.AddPass<DotOperandConverter>();
pipeline.AddPass<SubByteNormalization>(
SubByteNormalization::SET_ELEMENT_SIZE);
pipeline.AddPass<RngExpander>();
pipeline.AddPass<RngBitGeneratorExpander>(RandomAlgorithm::RNG_PHILOX);
pipeline.AddPass<ComparisonExpander>(std::array{std::make_pair(BF16, F32)});
pipeline.AddPass<ZeroSizedHloElimination>();
if (RequireDeterminism(hlo_module->config())) {
pipeline.AddPass<ScatterExpander>(
ScatterExpander::kEliminateIndeterministicScatters);
}
pipeline.AddPass<GpuScatterExpander>();
pipeline.AddPass<QrExpander>();
pipeline.AddPass<EighExpander>();
pipeline.AddPass<DynamicIndexSplitter>();
pipeline.AddPass<CallInliner>();
pipeline.AddPass<StochasticConvertDecomposer>();
pipeline.AddPass<Convolution4DExpander>();
pipeline.AddPass<ConvolutionPredExpander>();
pipeline.AddPass<StableSortExpander>();
pipeline.AddPass<BatchNormExpander>(
true,
true,
true);
pipeline.AddPass<LogisticExpander>();
pipeline.AddPass<ConditionalCanonicalizer>();
pipeline.AddPass<DynamicDimensionSimplifier>();
if (debug_options.xla_reduce_window_rewrite_base_length() != 0) {
pipeline.AddPass<HloPassFix<ReduceWindowRewriter>>(
debug_options.xla_reduce_window_rewrite_base_length());
}
DynamicPadderOptions dynamic_padder_options;
switch (debug_options.xla_gpu_shape_checks()) {
case DebugOptions::IGNORE:
dynamic_padder_options.shape_check_mode =
DynamicDimensionInference::ShapeCheckMode::kIgnore;
break;
case DebugOptions::RUNTIME: {
dynamic_padder_options.shape_check_mode =
DynamicDimensionInference::ShapeCheckMode::kRuntime;
dynamic_padder_options.assertion_generator = [&](HloInstruction* inst) {
auto created = Cast<HloCustomCallInstruction>(
inst->parent()->AddInstruction(HloInstruction::CreateCustomCall(
ShapeUtil::MakeTokenShape(), {inst}, kXlaGpuAssertCustomCallTag,
"Buffers have different size at runtime",
API_VERSION_STATUS_RETURNING)));
created->set_custom_call_has_side_effect(true);
};
break;
}
case DebugOptions::COMPILE_TIME:
dynamic_padder_options.shape_check_mode =
DynamicDimensionInference::ShapeCheckMode::kCompileTime;
break;
default:
LOG(FATAL) << "Unreachable";
}
pipeline.AddPass<DynamicPadder>(dynamic_padder_options);
se::GpuComputeCapability gpu_version =
gpu_target_config.device_description.gpu_compute_capability();
[&, &pipeline =
pipeline.AddPass<HloPassFix<HloPassPipeline>>("simplification")] {
AddHloVerifier(&pipeline,
!debug_options.xla_experimental_ignore_channel_id(),
HloVerifierOpts{}, true);
pipeline.AddPass<ZeroSizedHloElimination>();
pipeline.AddPass<GatherSimplifier>();
pipeline.AddPass<GatherExpander>(GatherExpander::kEliminateSimpleGathers);
pipeline.AddPass<ScatterSimplifier>();
pipeline.AddPass<ScatterExpander>(
ScatterExpander::kEliminateSimpleScatters);
pipeline.AddPass<ScatterSliceSimplifier>();
pipeline.AddPass<GpuAlgebraicSimplifier>(layout_insensitive_algsimp_opts,
gpu_version);
pipeline.AddPass<BitcastDtypesExpander>();
pipeline.AddPass<DotDimensionSorter>();
pipeline.AddPass<DotDecomposer>();
pipeline.AddPass<DotMerger>(
int64_t{
debug_options.xla_gpu_dot_merger_threshold_mb()}
<< 20);
pipeline.AddPass<SortSimplifier>();
pipeline.AddPass<TupleSimplifier>();
pipeline.AddPass<WhileLoopConstantSinking>();
pipeline.AddPass<WhileLoopSimplifier>();
pipeline.AddPass<SliceSinker>();
ReshapeMoverOptions reshape_mover_options;
reshape_mover_options.reshape_of_1d_broadcast_is_cheap = true;
pipeline.AddPass<ReshapeMover>(reshape_mover_options);
pipeline.AddPass<HloConstantFolding>();
pipeline.AddPass<ConditionalSimplifier>();
pipeline.AddPass<RealImagExpander>();
pipeline.AddPass<TransposeFolding>(CanFoldTransposeOperandIntoDot);
pipeline.AddPass<HloCSE>(false);
pipeline.AddPass<HloDCE>();
}();
[&, &pipeline =
pipeline.AddPass<HloPassFix<HloPassPipeline>>("simplification-2")] {
pipeline.AddPass<ConvertMover>();
pipeline.AddPass<GpuAlgebraicSimplifier>(layout_insensitive_algsimp_opts,
gpu_version);
}();
pipeline.AddPass<HloComputationDeduplicator>(
false);
return pipeline.Run(hlo_module).status();
}
absl::Status AddCollectivePipelinerPasses(
const DebugOptions& debug_options, HloPassPipeline& collectives_pipeline) {
if (debug_options.xla_gpu_enable_pipelined_collectives() ||
debug_options.xla_gpu_enable_pipelined_all_reduce()) {
CollectivePipeliner::Config config{
0,
INT64_MAX,
true,
false,
true,
CollectivePipeliner::PipeliningDirection::kForward,
HloPredicateIsOp<HloOpcode::kAllReduce>,
HloPredicateTrue,
HloPredicateFalse};
collectives_pipeline.AddPass<CollectivePipeliner>(config);
}
if (debug_options.xla_gpu_enable_pipelined_collectives() ||
debug_options.xla_gpu_enable_pipelined_all_gather()) {
CollectivePipeliner::Config config{
0,
INT64_MAX,
true,
false,
true,
CollectivePipeliner::PipeliningDirection::kBackward,
HloPredicateIsOp<HloOpcode::kAllGather>,
HloPredicateTrue,
HloPredicateFalse,
HloPredicateFalse,
false,
std::nullopt,
std::nullopt,
true,
};
collectives_pipeline.AddPass<CollectivePipeliner>(config);
}
if (debug_options.xla_gpu_enable_pipelined_collectives() ||
debug_options.xla_gpu_enable_pipelined_reduce_scatter()) {
CollectivePipeliner::Config config{
0,
INT64_MAX,
true,
false,
true,
CollectivePipeliner::PipeliningDirection::kForward,
HloPredicateIsOp<HloOpcode::kReduceScatter>,
HloPredicateTrue,
HloPredicateFalse};
collectives_pipeline.AddPass<CollectivePipeliner>(config);
}
return absl::OkStatus();
}
absl::Status RunPostLayoutCollectivePipelinerPasses(HloModule* hlo_module) {
const DebugOptions& debug_options = hlo_module->config().debug_options();
HloPassPipeline collectives_pipeline("collective-pipeliner-optimizations");
if (debug_options.xla_gpu_run_post_layout_collective_pipeliner()) {
TF_RETURN_IF_ERROR(
AddCollectivePipelinerPasses(debug_options, collectives_pipeline));
collectives_pipeline.AddPass<WhileLoopTripCountAnnotator>();
collectives_pipeline.AddPass<FlattenCallGraph>();
}
return collectives_pipeline.Run(hlo_module).status();
}
absl::Status RunCollectiveOptimizationPasses(
HloModule* hlo_module,
const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts,
se::GpuComputeCapability gpu_version) {
const DebugOptions& debug_options = hlo_module->config().debug_options();
HloPassPipeline collectives_pipeline("collective-optimizations");
collectives_pipeline.AddPass<AllReduceFolder>();
collectives_pipeline.AddPass<AllReduceSplitter>();
collectives_pipeline.AddPass<AllGatherOptimizer>();
collectives_pipeline.AddPass<AllReduceReassociate>(
debug_options.xla_gpu_enable_reassociation_for_converted_ar());
collectives_pipeline.AddPass<ReduceScatterReassociate>();
collectives_pipeline.AddPass<WhileLoopAllReduceCodeMotion>(
debug_options
.xla_gpu_enable_while_loop_reduce_scatter_code_motion());
if (!debug_options.xla_gpu_run_post_layout_collective_pipeliner()) {
TF_RETURN_IF_ERROR(
AddCollectivePipelinerPasses(debug_options, collectives_pipeline));
}
collectives_pipeline.AddPass<ReduceScatterCreator>();
collectives_pipeline.AddPass<CollectivePermuteCycleDecomposer>(
hlo_module->config()
.debug_options()
.xla_gpu_collective_permute_decomposer_threshold());
collectives_pipeline.AddPass<CollectivePermuteDecomposer>(
hlo_module->config()
.debug_options()
.xla_gpu_collective_permute_decomposer_threshold());
if (hlo_module->config()
.debug_options()
.xla_gpu_enable_pipelined_collectives() ||
hlo_module->config().debug_options().xla_gpu_enable_pipelined_p2p()) {
AddP2PPipeliner(collectives_pipeline);
}
collectives_pipeline.AddPass<GpuAlgebraicSimplifier>(
layout_insensitive_algsimp_opts, gpu_version);
collectives_pipeline.AddPass<AllGatherBroadcastReorder>();
const std::pair<PrimitiveType, PrimitiveType> ar_promoted_types[] = {
{U16, U32}, {S16, S32}};
collectives_pipeline.AddPass<AllReducePromotion>(ar_promoted_types);
collectives_pipeline.AddPass<HloDCE>();
collectives_pipeline.AddPass<CollectiveQuantizer>();
collectives_pipeline.AddPass<HloDCE>();
collectives_pipeline.AddPass<WhileLoopTripCountAnnotator>();
return collectives_pipeline.Run(hlo_module).status();
}
absl::Status RunLayoutAssignmentPasses(HloModule* hlo_module,
se::GpuComputeCapability gpu_version,
se::dnn::VersionInfo dnn_version) {
HloPassPipeline pipeline("layout assignment");
pipeline.AddPass<FlattenCallGraph>();
ChannelLayoutConstraints layout_constraints;
pipeline.AddPass<GpuLayoutAssignment>(
hlo_module->mutable_entry_computation_layout(), gpu_version, dnn_version,
&layout_constraints);
pipeline.AddPass<SubByteNormalization>(
SubByteNormalization::SET_ELEMENT_SIZE);
pipeline.AddPass<OptimizeInputOutputBufferAlias>(true);
pipeline.AddPass<HostOffloadLegalize>(
static_cast<int64_t>(stream_executor::MemoryType::kHost),
true);
return pipeline.Run(hlo_module).status();
}
absl::Status RunFusionPasses(HloModule* hlo_module,
const Compiler::TargetConfig& gpu_target_config,
tsl::thread::ThreadPool* thread_pool,
HloCostAnalysis::ShapeSizeFunction shape_size_fn) {
const se::DeviceDescription& gpu_device_info =
gpu_target_config.device_description;
TF_RETURN_IF_ERROR(FusionPipeline(hlo_module->config().debug_options(),
shape_size_fn, thread_pool, gpu_device_info)
.Run(hlo_module)
.status());
if (hlo_module->config().debug_options().xla_gpu_collect_cost_model_stats()) {
GpuHloCostAnalysis::Options cost_analysis_options{
shape_size_fn,
{},
{},
true};
HloPassPipeline post_fusion_analysis("post_fusion_analysis");
post_fusion_analysis.AddPass<GpuCostModelStatsCollection>(
gpu_device_info, cost_analysis_options);
TF_RETURN_IF_ERROR(post_fusion_analysis.Run(hlo_module).status());
}
TF_RETURN_IF_ERROR(
HorizontalFusionPipeline(gpu_device_info).Run(hlo_module).status());
if (VLOG_IS_ON(2)) {
HloFusionStatsVisitor stats;
TF_RETURN_IF_ERROR(hlo_module->entry_computation()->Accept(&stats));
VLOG(2) << stats.ToString();
}
return absl::OkStatus();
}
void AddDoubleBufferingPasses(const DebugOptions& opts,
HloPassPipeline& pipeline) {
std::optional<DoubleBufferLoopUnrolling::UnrollStrategy> unroll_strategy =
std::nullopt;
if (opts.xla_gpu_enable_while_loop_double_buffering()) {
unroll_strategy = DoubleBufferLoopUnrolling::UnrollStrategy::kDoubleBuffer;
}
if (opts.xla_gpu_enable_while_loop_unrolling() ==
DebugOptions::WHILE_LOOP_UNROLLING_DOUBLE_BUFFER) {
unroll_strategy = DoubleBufferLoopUnrolling::UnrollStrategy::kDoubleBuffer;
}
if (opts.xla_gpu_enable_while_loop_unrolling() ==
DebugOptions::WHILE_LOOP_UNROLLING_FULL_UNROLL) {
LOG_IF(WARNING, unroll_strategy != std::nullopt)
<< "Overriding double buffering set via "
"`xla_gpu_enable_while_loop_double_buffering` flag.";
unroll_strategy = DoubleBufferLoopUnrolling::UnrollStrategy::kFullUnroll;
}
if (opts.xla_gpu_enable_while_loop_unrolling() ==
DebugOptions::WHILE_LOOP_UNROLLING_AUTO_UNROLL &&
opts.xla_gpu_enable_heuristic_pass_configuration() &&
!opts.xla_gpu_enable_while_loop_double_buffering()) {
unroll_strategy = DoubleBufferLoopUnrolling::UnrollStrategy::kAuto;
}
if (unroll_strategy != std::nullopt) {
pipeline.AddPass<WhileLoopSimplifier>();
pipeline.AddPass<DoubleBufferLoopUnrolling>(*unroll_strategy);
pipeline.AddPass<TupleSimplifier>();
pipeline.AddPass<HloDCE>();
}
}
absl::Status RunPostFusionPasses(
HloModule* hlo_module,
std::function<absl::Status(HloPassPipeline*, const DebugOptions&)>
add_custom_kernel_replacement_passes) {
const DebugOptions& opts = hlo_module->config().debug_options();
HloPassPipeline pipeline("post-fusion optimization");
pipeline.AddPass<RenameFusions>();
pipeline.AddPass<AllGatherCombiner>(
opts.xla_gpu_all_gather_combine_threshold_bytes(),
256,
opts.xla_gpu_enable_all_gather_combine_by_dim());
pipeline.AddPass<AllReduceCombiner>(
opts.xla_gpu_all_reduce_combine_threshold_bytes(),
256);
pipeline.AddPass<ReduceScatterCombiner>(
opts.xla_gpu_reduce_scatter_combine_threshold_bytes(),
256,
opts.xla_gpu_enable_reduce_scatter_combine_by_dim());
pipeline.AddPass<AllReduceContiguous>();
TF_RETURN_IF_ERROR(add_custom_kernel_replacement_passes(&pipeline, opts));
int32_t blueconnect_num_devices_per_host =
hlo_module->config()
.debug_options()
.xla_gpu_all_reduce_blueconnect_num_devices_per_host();
if (blueconnect_num_devices_per_host > 0) {
pipeline.AddPass<AllReduceBlueConnect>(blueconnect_num_devices_per_host);
}
AddDoubleBufferingPasses(opts, pipeline);
return pipeline.Run(hlo_module).status();
}
absl::Status RunPostFusionCollectiveOptimizationPasses(HloModule* hlo_module) {
HloPassPipeline pipeline("post-fusion-collectives optimization");
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_all_reduce = HloPredicateTrue;
config.convert_collective_broadcast = HloPredicateTrue;
config.convert_collective_permute = HloPredicateTrue;
config.convert_all_gather = HloPredicateTrue;
config.convert_reduce_scatter = HloPredicateTrue;
config.convert_all_to_all = HloPredicateTrue;
pipeline.AddPass<AsyncCollectiveCreator>(std::move(config));
absl::flat_hash_set<DebugOptions::CollectiveOpType> disabled_async_ops;
for (auto collective_op_type : hlo_module->config()
.debug_options()
.xla_gpu_disable_async_collectives()) {
disabled_async_ops.insert(
static_cast<DebugOptions::CollectiveOpType>(collective_op_type));
}
auto convert_to_async = [&disabled_async_ops](const HloInstruction* inst) {
switch (inst->opcode()) {
case HloOpcode::kAllReduceStart:
return !disabled_async_ops.contains(DebugOptions::ALLREDUCE);
case HloOpcode::kCollectivePermuteStart:
return !disabled_async_ops.contains(DebugOptions::COLLECTIVEPERMUTE);
case HloOpcode::kAllGatherStart:
return !disabled_async_ops.contains(DebugOptions::ALLGATHER);
case HloOpcode::kAsyncStart: {
auto async_inst = Cast<HloAsyncInstruction>(inst);
switch (async_inst->async_wrapped_opcode()) {
case HloOpcode::kCollectiveBroadcast:
return !disabled_async_ops.contains(
DebugOptions::COLLECTIVEBROADCAST);
case HloOpcode::kReduceScatter:
return !disabled_async_ops.contains(DebugOptions::REDUCESCATTER);
case HloOpcode::kAllToAll:
return !disabled_async_ops.contains(DebugOptions::ALLTOALL);
default:
return false;
}
}
default:
return false;
}
};
pipeline.AddPass<AsyncCollectiveAnnotator>(convert_to_async);
return pipeline.Run(hlo_module).status();
}
absl::Status RunPostFusionSimplificationPasses(
HloModule* hlo_module,
const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts,
se::GpuComputeCapability gpu_version) {
HloPassPipeline pipeline("post-fusion-simplification-pipeline optimization");
AlgebraicSimplifierOptions options = layout_insensitive_algsimp_opts;
options.set_is_layout_sensitive(true);
pipeline.AddPass<GpuAlgebraicSimplifier>(options, gpu_version);
pipeline.AddPass<HloComputationDeduplicator>(
true);
if (hlo_module->config()
.debug_options()
.xla_gpu_multi_streamed_windowed_einsum()) {
pipeline.AddPass<StreamAttributeAnnotator>();
pipeline.AddPass<StreamAttributeAsyncWrapper>();
}
return pipeline.Run(hlo_module).status();
}
absl::Status RunPostFusionVerificationPasses(
HloModule* hlo_module, se::StreamExecutor* stream_exec,
const GpuCompiler::CompileOptions& options,
const Compiler::TargetConfig& gpu_target_config) {
HloPassPipeline pipeline("post-fusion-verification-pipeline optimization");
if (hlo_module->config()
.debug_options()
.xla_gpu_verify_triton_fusion_numerics()) {
TF_ASSIGN_OR_RETURN(
AutotuneConfig autotune_config,
GetAutotuneConfig(stream_exec, hlo_module->config().debug_options(),
options, gpu_target_config));
pipeline.AddPass<TritonFusionNumericsVerifier>(autotune_config);
}
return pipeline.Run(hlo_module).status();
}
absl::Status RunLayoutNormalizationPasses(
HloModule* hlo_module, const se::GpuComputeCapability& gpu_version) {
HloPassPipeline layout_normalization_pipeline("layout normalization");
const DebugOptions& debug_options = hlo_module->config().debug_options();
AlgebraicSimplifierOptions opts =
GpuCompiler::GetAlgebraicSimplifierOptions(hlo_module->config());
opts.set_supports_non_canonical_dots(false);
opts.set_is_layout_sensitive(true);
opts.set_enable_conv_operand_swap(false);
opts.set_minmax_propagate_nan(!debug_options.xla_gpu_enable_fast_min_max());
opts.set_enable_unconditional_reduce_of_concat_replacement(false);
layout_normalization_pipeline.AddPass<ReshapeDecomposer>();
layout_normalization_pipeline.AddPass<HloPassFix<MoveCopyToUsers>>();
layout_normalization_pipeline.AddPass<LayoutNormalization>(
&NormalizeLayoutForGpuCustomCalls);
layout_normalization_pipeline.AddPass<HloPassFix<GpuAlgebraicSimplifier>>(
opts, gpu_version);
layout_normalization_pipeline.AddPass<BroadcastCanonicalizer>();
layout_normalization_pipeline.AddPass<ScatterSimplifier>();
return layout_normalization_pipeline.Run(hlo_module).status();
}
absl::Status RunAsyncDotPasses(HloModule* hlo_module) {
HloPassPipeline pipeline("async-wrapper");
const DebugOptions& debug_options = hlo_module->config().debug_options();
if (debug_options.xla_gpu_async_dot()) {
pipeline.AddPass<AsyncWrapper>([](HloInstruction* instruction) {
if (IsCublasGemm(*instruction)) {
return true;
}
if (instruction->called_computations().size() == 1 &&
IsTritonFusedComputation(
*instruction->called_computations().front())) {
return true;
}
return false;
});
}
return pipeline.Run(hlo_module).status();
}
absl::Status RunDynamicSliceFusionPasses(HloModule* hlo_module,
se::Platform::Id platform_id) {
if (hlo_module->config()
.debug_options()
.xla_gpu_enable_dynamic_slice_fusion()) {
HloPassPipeline pipeline("dynamic-slice");
TF_ASSIGN_OR_RETURN(se::Platform * platform,
se::PlatformManager::PlatformWithId(platform_id));
pipeline.AddPass<DynamicSliceFusionRewriter>(platform->Name());
TF_RETURN_IF_ERROR(pipeline.Run(hlo_module).status());
}
return absl::OkStatus();
}
}
absl::Status GpuCompiler::RunCollectiveScheduleLinearizerPasses(
HloModule* hlo_module, se::StreamExecutor* stream_exec) {
HloPassPipeline pipeline("collective-schedule-linearizer");
pipeline.AddPass<CollectivesScheduleLinearizer>(
[this, stream_exec](const HloModule* module) {
return RequiresCollectiveScheduleLinearizer(module, stream_exec);
});
return pipeline.Run(hlo_module).status();
}
absl::Status GpuCompiler::OptimizeHloModule(
HloModule* hlo_module, se::StreamExecutor* stream_exec,
const CompileOptions& options, const TargetConfig& gpu_target_config) {
tsl::profiler::TraceMe traceme("GpuCompiler::OptimizeHloModule");
CheckNotScheduled(hlo_module);
LogDebugOptions(hlo_module);
MaybeOwningThreadPool thread_pool = CreateMaybeOwningThreadPool(
hlo_module->config()
.debug_options()
.xla_gpu_force_compilation_parallelism(),
options.thread_pool,
tsl::port::MaxParallelism());
AlgebraicSimplifierOptions layout_insensitive_algsimp_opts =
LayoutInsensitiveAlgebraicSimplifierOptions(
hlo_module->config(), gpu_target_config,
GetAlgebraicSimplifierOptions(hlo_module->config()));
TF_RETURN_IF_ERROR(RunPreSPMDPartitionerPasses(hlo_module));
TF_RETURN_IF_ERROR(RunSPMDPasses(hlo_module, gpu_target_config,
layout_insensitive_algsimp_opts));
TF_RETURN_IF_ERROR(RunOptimizationPasses(hlo_module, gpu_target_config,
layout_insensitive_algsimp_opts));
se::GpuComputeCapability gpu_version =
gpu_target_config.device_description.gpu_compute_capability();
TF_RETURN_IF_ERROR(RunCollectiveOptimizationPasses(
hlo_module, layout_insensitive_algsimp_opts, gpu_version));
se::dnn::VersionInfo dnn_version = gpu_target_config.dnn_version_info;
if (stream_exec != nullptr) {
gpu_version = GetGpuVersion(stream_exec);
TF_ASSIGN_OR_RETURN(dnn_version, GetDnnVersionInfo(stream_exec));
}
TF_RETURN_IF_ERROR(OptimizeHloConvolutionCanonicalization(
hlo_module, gpu_version, dnn_version, options.device_allocator,
gpu_target_config.device_description.runtime_version()));
TF_RETURN_IF_ERROR(
RunLayoutAssignmentPasses(hlo_module, gpu_version, dnn_version));
TF_RETURN_IF_ERROR(RunLayoutNormalizationPasses(hlo_module, gpu_version));
TF_RETURN_IF_ERROR(OptimizeHloPostLayoutAssignment(
hlo_module, stream_exec, options, gpu_target_config,
thread_pool.get_mutable()));
TF_RETURN_IF_ERROR(RunPostLayoutCollectivePipelinerPasses(hlo_module));
TF_RETURN_IF_ERROR(RunDynamicSliceFusionPasses(hlo_module, PlatformId()));
TF_RETURN_IF_ERROR(RunFusionPasses(hlo_module, gpu_target_config,
thread_pool.get_mutable(),
ShapeSizeBytesFunction()));
TF_RETURN_IF_ERROR(RunPostFusionPasses(
hlo_module,
[this](HloPassPipeline* pipeline, const DebugOptions& debug_options) {
return AddCustomKernelReplacementPasses(pipeline, debug_options);
}));
TF_RETURN_IF_ERROR(RunPostFusionCollectiveOptimizationPasses(hlo_module));
TF_RETURN_IF_ERROR(RunPostFusionSimplificationPasses(
hlo_module, layout_insensitive_algsimp_opts, gpu_version));
TF_RETURN_IF_ERROR(RunPostFusionVerificationPasses(
hlo_module, stream_exec, options, gpu_target_config));
TF_RETURN_IF_ERROR(
RunCollectiveScheduleLinearizerPasses(hlo_module, stream_exec));
TF_RETURN_IF_ERROR(RunAsyncDotPasses(hlo_module));
return absl::OkStatus();
}
AlgebraicSimplifierOptions GpuCompiler::GetAlgebraicSimplifierOptions(
const HloModuleConfig& config) {
AlgebraicSimplifierOptions opts;
opts.set_enable_dot_strength_reduction(
config.debug_options().xla_gpu_enable_dot_strength_reduction());
return opts;
}
absl::Status GpuCompiler::PrepareHloModuleForIrEmitting(HloModule* hlo_module) {
return PrepareHloModuleForIrEmittingPipeline(*hlo_module, GetCanShareBuffer())
.Run(hlo_module)
.status();
}
namespace {
void AddGemmRewriterPasses(HloPassPipeline& pipeline,
const DebugOptions& debug_options,
const se::GpuComputeCapability gpu_version,
const se::SemanticVersion& toolkit_version) {
GemmRewriterOptions::BiasMode bias_mode =
GemmRewriterOptions::BiasMode::kBias;
if (debug_options.xla_gpu_async_dot()) {
bias_mode = GemmRewriterOptions::BiasMode::kNoBias;
}
pipeline.AddPass<GemmRewriter>(
gpu_version, toolkit_version,
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only, bias_mode});
pipeline.AddPass<GemmRewriter>(
gpu_version, toolkit_version,
GemmRewriterOptions{GemmRewriterOptions::DType::kNonFp8Only, bias_mode});
}
}
absl::Status GpuCompiler::OptimizeHloPostLayoutAssignment(
HloModule* hlo_module, se::StreamExecutor* stream_exec,
const CompileOptions& options, const TargetConfig& gpu_target_config,
tsl::thread::ThreadPool* thread_pool) {
const DebugOptions& debug_options = hlo_module->config().debug_options();
const se::GpuComputeCapability gpu_version =
gpu_target_config.device_description.gpu_compute_capability();
const AlgebraicSimplifierOptions simplifier_options = [&] {
AlgebraicSimplifierOptions opts =
GetAlgebraicSimplifierOptions(hlo_module->config());
opts.set_supports_non_canonical_dots(false);
opts.set_is_layout_sensitive(true);
opts.set_enable_conv_operand_swap(false);
opts.set_minmax_propagate_nan(!debug_options.xla_gpu_enable_fast_min_max());
opts.set_enable_unconditional_reduce_of_concat_replacement(false);
return opts;
}();
TF_ASSIGN_OR_RETURN(AutotuneConfig autotune_config,
GetAutotuneConfig(stream_exec, debug_options, options,
gpu_target_config));
const GpuFloatSupport bf16_support(gpu_version, BF16);
const GpuFloatSupport f8e5m2_support(gpu_version, F8E5M2, F16);
const GpuFloatSupport f8e4m3_support(gpu_version, F8E4M3, F16);
const GpuFloatSupport f8e4m3fn_support(gpu_version, F8E4M3FN, F16);
const FloatSupport f8e4m3b11fnuz_support(F8E4M3B11FNUZ, F16);
const GpuFloatSupport f8e5m2fnuz_support(gpu_version, F8E5M2FNUZ, F16);
const GpuFloatSupport f8e4m3fnuz_support(gpu_version, F8E4M3FNUZ, F16);
const GpuFloatSupport f8e3m4_support(gpu_version, F8E3M4, F16);
auto add_float_normalization = [&](HloPassPipeline& pipeline) {
auto& sub_pipeline =
pipeline.AddPass<HloPassPipeline>("float_normalization");
sub_pipeline.AddPass<FloatNormalization>(&bf16_support);
sub_pipeline.AddPass<FloatNormalization>(&f8e5m2_support);
sub_pipeline.AddPass<FloatNormalization>(&f8e4m3_support);
sub_pipeline.AddPass<FloatNormalization>(&f8e4m3fn_support);
sub_pipeline.AddPass<FloatNormalization>(&f8e4m3b11fnuz_support);
sub_pipeline.AddPass<FloatNormalization>(&f8e5m2fnuz_support);
sub_pipeline.AddPass<FloatNormalization>(&f8e4m3fnuz_support);
sub_pipeline.AddPass<FloatNormalization>(&f8e3m4_support);
if (debug_options.xla_allow_excess_precision()) {
sub_pipeline.AddPass<SimplifyFPConversions>();
}
};
{
HloPassPipeline pipeline("hlo normalization");
pipeline.AddPass<HloPassFix<GpuAlgebraicSimplifier>>(simplifier_options,
gpu_version);
pipeline.AddPass<TransposeFolding>(CanFoldTransposeOperandIntoDot,
TransposeFolding::NeverFoldTranspose);
pipeline.AddPass<ReshapeDecomposer>();
pipeline.AddPass<ReduceDecomposer>([&](const HloInstruction* r) {
return IsReductionFromOrToContiguousDimensions(*r);
});
if (debug_options.xla_gpu_enable_custom_fusions()) {
pipeline.AddPass<SimplifyFPConversions>();
pipeline.AddPass<CustomKernelFusionRewriter>(
&gpu_target_config.device_description);
pipeline.AddPass<CustomKernelFusionAutotuner>(autotune_config);
}
se::GpuComputeCapability gpu_version =
gpu_target_config.device_description.gpu_compute_capability();
pipeline.AddPass<AlgorithmChecker>(gpu_version);
const auto* cuda_cc = std::get_if<se::CudaComputeCapability>(&gpu_version);
const auto* rocm_cc = std::get_if<se::RocmComputeCapability>(&gpu_version);
if (debug_options.xla_gpu_enable_triton_gemm() &&
(cuda_cc != nullptr &&
cuda_cc->IsAtLeast(se::CudaComputeCapability::AMPERE))) {
pipeline.AddPass<GemvRewriter>();
pipeline.AddPass<GemmFusion>(gpu_version);
} else if (cuda_cc != nullptr &&
cuda_cc->major == se::CudaComputeCapability::VOLTA) {
pipeline.AddPass<SimplifyFPConversions>();
pipeline.AddPass<CustomKernelFusionRewriter>(
&gpu_target_config.device_description);
pipeline.AddPass<CustomKernelFusionAutotuner>(autotune_config);
}
AddGemmRewriterPasses(
pipeline, debug_options, gpu_version,
gpu_target_config.device_description.runtime_version());
pipeline.AddPass<GemmBroadcastFoldingRewriter>();
pipeline.AddPass<LayoutNormalization>(&NormalizeLayoutForGpuCustomCalls);
pipeline.AddPass<HloPassFix<GpuAlgebraicSimplifier>>(simplifier_options,
gpu_version);
pipeline.AddPass<ScatterSimplifier>();
pipeline.AddPass<BroadcastCanonicalizer>();
pipeline.AddPass<TransposeDimensionGrouper>();
pipeline.AddPass<ReductionDegenerateDimRemover>();
pipeline.AddPass<ReductionLayoutNormalizer>();
if (debug_options
.xla_gpu_experimental_enable_triton_softmax_priority_fusion() &&
((cuda_cc != nullptr &&
cuda_cc->IsAtLeast(se::CudaComputeCapability::AMPERE)) ||
rocm_cc != nullptr)) {
add_float_normalization(pipeline);
pipeline.AddPass<HloPassFix<GpuAlgebraicSimplifier>>(simplifier_options,
gpu_version);
pipeline.AddPass<HloCSE>(true);
pipeline.AddPass<HloConstantFolding>();
pipeline.AddPass<HloDCE>();
pipeline.AddPass<SoftmaxRewriterTriton>(
gpu_target_config.device_description, ShapeSizeBytesFunction(),
true);
}
pipeline.AddPass<ReductionDimensionGrouper>();
bool ignore_small_reduce_dims =
!debug_options.xla_gpu_enable_priority_fusion();
pipeline.AddPass<HloPassFix<ReductionSplitter>>(ignore_small_reduce_dims);
pipeline.AddPass<HloPassFix<TreeReductionRewriter>>(gpu_version);
pipeline.AddPass<SubByteNormalization>(
SubByteNormalization::SET_ELEMENT_SIZE);
TF_RETURN_IF_ERROR(pipeline.Run(hlo_module).status());
}
HloPassPipeline pipeline("post-layout_assignment");
AddHloVerifier(&pipeline, !debug_options.xla_experimental_ignore_channel_id(),
HloVerifierOpts{}
.MakeLayoutSensitive()
.WithInstructionCanChangeLayout(
LayoutAssignment::InstructionCanChangeLayout)
.VerifyBroadcastDimensionsOrder()
.VerifyReshapeIsBitcast(),
true);
add_float_normalization(pipeline);
TF_RETURN_IF_ERROR(AddGemmFusionAutotuningPasses(
&pipeline, hlo_module, autotune_config, thread_pool,
options.key_value_store,
gpu_target_config.device_description.runtime_version()));
pipeline.AddPass<CallInliner>();
AddGemmRewriterPasses(pipeline, debug_options, gpu_version,
gpu_target_config.device_description.runtime_version());
pipeline.AddPass<GemmBroadcastFoldingRewriter>();
pipeline.AddPass<HostOffloader>(
static_cast<int64_t>(stream_executor::MemoryType::kHost));
TF_RETURN_IF_ERROR(
AddConvAndGemmAutotuningPasses(&pipeline, gpu_version, options,
hlo_module, autotune_config, thread_pool));
add_float_normalization(pipeline);
pipeline.AddPass<TupleSimplifier>();
pipeline.AddPass<HloPassFix<GpuAlgebraicSimplifier>>(simplifier_options,
gpu_version);
if (debug_options.xla_allow_excess_precision()) {
pipeline.AddPass<SimplifyFPConversions>();
}
pipeline.AddPass<HloCSE>(true);
pipeline.AddPass<HostMemoryTransferAsyncifier>(
static_cast<int64_t>(stream_executor::MemoryType::kHost));
#ifdef NDEBUG
HloVerifierOpts opts = HloVerifierOpts{}
.MakeLayoutSensitive()
.WithInstructionCanChangeLayout(
LayoutAssignment::InstructionCanChangeLayout)
.VerifyBroadcastDimensionsOrder()
.VerifyReshapeIsBitcast();
opts.verify_unique_channel_ids =
!debug_options.xla_experimental_ignore_channel_id();
pipeline.AddPass<HloVerifier>(
std::make_unique<DefaultVerifierMetadata>(std::move(opts)),
"end-of-post-layout_assignment");
#endif
TF_RETURN_IF_ERROR(pipeline.Run(hlo_module).status());
return absl::OkStatus();
}
absl::StatusOr<Compiler::TargetConfig> GpuCompiler::GetTargetConfig(
const Compiler::CompileOptions& options, const DebugOptions& debug_opts,
se::StreamExecutor* executor) {
if (options.target_config.has_value()) {
return *options.target_config;
}
if (!debug_opts.xla_gpu_target_config_filename().empty()) {
std::string gpu_target_config_string;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(
tsl::Env::Default(), debug_opts.xla_gpu_target_config_filename(),
&gpu_target_config_string));
stream_executor::GpuTargetConfigProto gpu_target_config_proto;
if (!tsl::protobuf::TextFormat::ParseFromString(gpu_target_config_string,
&gpu_target_config_proto)) {
return absl::FailedPreconditionError(
"Failed to parse GpuTargetConfigProto");
}
return Compiler::TargetConfig{gpu_target_config_proto};
}
if (executor) {
Compiler::TargetConfig target_config = Compiler::TargetConfig{executor};
int64_t device_memory_size =
target_config.device_description.device_memory_size();
if (device_memory_size == -1) {
return absl::FailedPreconditionError(
"When running on an NVIDIA simulation device, you must use "
"--xla_gpu_target_config_filename to pass in target information. "
"The target config from StreamExecutor is inaccurate.");
}
return target_config;
}
return absl::InternalError(
"Either GPU has to be attached, or --xla_gpu_target_config_filename "
"has to be specified to specify the target to compile for.");
}
absl::StatusOr<std::unique_ptr<HloModule>> GpuCompiler::RunHloPasses(
std::unique_ptr<HloModule> module, se::StreamExecutor* stream_exec,
const CompileOptions& options) {
const DebugOptions debug_opts = module->config().debug_options();
TF_RETURN_IF_ERROR(LoadAutotuneResultsFromFile(debug_opts));
bool is_deviceless = options.target_config.has_value() ||
!debug_opts.xla_gpu_target_config_filename().empty();
TF_ASSIGN_OR_RETURN(TargetConfig gpu_target_config,
GetTargetConfig(options, debug_opts, stream_exec));
const std::optional<std::string> unoptimized_fingerprint =
MaybeUploadUnoptimizedGpuSymbols(module.get(),
gpu_target_config.ToProto());
XLA_SCOPED_LOGGING_TIMER_IF(
absl::StrCat("GpuCompiler::RunHloPasses for ", module->name()),
!options.is_autotuning_compilation);
uint64_t start_usecs = tsl::Env::Default()->NowMicros();
tsl::profiler::TraceMe activity(
[&] { return absl::StrCat("HLO Transforms:", module->name()); },
tsl::profiler::TraceMeLevel::kInfo);
TF_RETURN_IF_ERROR(OptimizeHloModule(module.get(),
is_deviceless ? nullptr : stream_exec,
options, gpu_target_config));
TF_RETURN_IF_ERROR(PrepareHloModuleForIrEmitting(module.get()));
if (module->config()
.debug_options()
.xla_gpu_experimental_enable_fusion_block_level_rewriter()) {
HloPassPipeline pipeline("fusion-block-level-rewriter-pipeline");
pipeline.AddPass<FusionBlockLevelRewriter>(
gpu_target_config.device_description, ShapeSizeBytesFunction());
TF_RETURN_IF_ERROR(pipeline.Run(module.get()).status());
}
uint64_t end_usecs = tsl::Env::Default()->NowMicros();
RecordHloPassesDuration(end_usecs - start_usecs);
DumpHloModuleMetadataIfEnabled({module.get()});
AutotuneResults autotune_results;
TF_ASSIGN_OR_RETURN(
AutotuneConfig autotune_config,
GetAutotuneConfig(stream_exec, debug_opts, options, gpu_target_config));
if (!is_deviceless) {
TF_RETURN_IF_ERROR(
AutotunerUtil::SerializeAutotuneResults(&autotune_results));
TF_RETURN_IF_ERROR(SerializeAutotuneResultsToFile(debug_opts));
}
const std::optional<std::string> optimized_fingerprint =
MaybeUploadOptimizedGpuSymbols(module.get(), autotune_results);
if (unoptimized_fingerprint.has_value() &&
optimized_fingerprint.has_value()) {
MaybeUploadGpuSymbolMapping(*unoptimized_fingerprint,
*optimized_fingerprint);
}
if (DumpingEnabledForHloModule(*module)) {
TF_ASSIGN_OR_RETURN(
std::string autotune_results,
AutotunerUtil::SerializeAutotuneResults(true));
DumpToFileInDirOrStdout(*module, "", "autotune_results.pbtxt",
autotune_results);
}
return std::move(module);
}
namespace {
absl::Status RunPostSchedulingCopyInsertion(
HloModule* module,
const HloDataflowAnalysis::CanShareBuffer& can_share_buffer) {
constexpr int64_t kRegionBasedLiveRangeAnalysisLimit = -1;
const int64_t kUseRegionBasedLiveRangeAnalysis =
module->config()
.debug_options()
.xla_gpu_copy_insertion_use_region_analysis()
? kRegionBasedLiveRangeAnalysisLimit
: 0;
CopyInsertion copy_insertion(can_share_buffer,
kUseRegionBasedLiveRangeAnalysis);
TF_RETURN_IF_ERROR(copy_insertion.RemoveUnnecessaryCopies(module));
HloSchedule saved_schedule = module->schedule();
module->clear_schedule();
TF_RETURN_IF_ERROR(
copy_insertion.CopyInsertion::AddSpecialCaseCopies(module));
TF_RETURN_IF_ERROR(HloDCE().Run(module).status());
TF_RETURN_IF_ERROR(saved_schedule.Update());
TF_RETURN_IF_ERROR(module->set_schedule(std::move(saved_schedule)));
return absl::OkStatus();
}
}
using OutputInfoMap =
absl::flat_hash_map<ShapeIndex, GpuExecutable::OutputInfo>;
static void NullDiagnosticHandler(const llvm::DiagnosticInfo* diag_info,
void* context) {
std::string error_string;
llvm::raw_string_ostream string_printer(error_string);
llvm::DiagnosticPrinterRawOStream diagnostic_printer(string_printer);
diag_info->print(diagnostic_printer);
VLOG(5) << error_string;
}
namespace {
std::unique_ptr<llvm::Module> CopyToContext(const llvm::Module& module,
llvm::LLVMContext& context) {
llvm::SmallString<0> bitcode;
llvm::raw_svector_ostream bitcode_ostream(bitcode);
llvm::WriteBitcodeToFile(module, bitcode_ostream);
llvm::Expected<std::unique_ptr<llvm::Module>> new_module =
llvm::parseBitcodeFile(
llvm::MemoryBufferRef(llvm::StringRef(bitcode.data(), bitcode.size()),
"split_module"),
context);
CHECK(new_module) << "Failed to parse bitcode "
<< llvm::toString(new_module.takeError());
return std::move(new_module.get());
}
}
absl::StatusOr<GpuCompiler::BackendCompileResult>
GpuCompiler::CompileSingleModule(const HloModuleConfig& module_config,
se::GpuComputeCapability gpu_version,
const HloModule* debug_module,
llvm::Module* llvm_module, bool relocatable,
const CompileOptions& options,
std::optional<int> shard_number) {
{
XLA_SCOPED_LOGGING_TIMER_IF(
absl::StrCat(
"GpuCompiler::RunBackend - Running LLVM verifier for ",
(debug_module != nullptr ? debug_module->name() : "(unknown)")),
VLOG_IS_ON(4) && !options.is_autotuning_compilation);
llvm_module->getContext().setDiagnosticHandlerCallBack(
NullDiagnosticHandler, nullptr);
std::string err;
llvm::raw_string_ostream err_stream(err);
TF_RET_CHECK(!llvm::verifyModule(*llvm_module, &err_stream))
<< "Invalid LLVM IR before optimizations:\n"
<< err_stream.str()
<< "\nThis probably indicates a bug in the HLO -> LLVM IR "
"lowering. Rerun with --xla_dump_to to get the IR"
<< (debug_module
? absl::StrCat(" and looks for files with name containing: *",
FilenameFor(*debug_module, "", ""), "*")
: ".");
}
TF_ASSIGN_OR_RETURN(
BackendCompileResult result,
CompileTargetBinary(module_config, llvm_module, gpu_version, relocatable,
debug_module, options));
const bool should_dump = DumpingEnabledForHloModule(
debug_module ? debug_module->name() : "", module_config.debug_options());
if (should_dump) {
if (debug_module) {
llvm_ir::DumpIrIfEnabled(
*debug_module, *llvm_module,
true,
shard_number.has_value() ? std::to_string(*shard_number) : "");
} else {
LOG(ERROR) << "Dumping is not implemented since the file name cannot be "
"inferred. Please implement (potentially MLIR) module -> "
"filename heuristic.";
}
}
if (user_post_optimization_hook_) {
user_post_optimization_hook_(*llvm_module);
}
if (should_dump) {
absl::string_view ptx = result.asm_text;
if (debug_module) {
DumpToFileInDirOrStdout(*debug_module, "",
shard_number.has_value()
? (std::to_string(*shard_number) + ".ptx")
: "ptx",
ptx);
} else {
LOG(ERROR) << "Dumping is not implemented since the file name cannot be "
"inferred. Please implement (potentially MLIR) module -> "
"filename heuristic.";
}
}
return result;
}
namespace {
int CountFunctions(const llvm::Module& module) {
int num_functions = 0;
for (const llvm::Function& func : module.functions()) {
if (!func.isDeclaration() &&
func.getLinkage() == llvm::GlobalValue::LinkageTypes::ExternalLinkage) {
++num_functions;
}
}
return num_functions;
}
std::string SingleFunctionName(const llvm::Module& module) {
std::string name;
for (const llvm::Function& func : module.functions()) {
if (!func.isDeclaration() &&
func.getLinkage() == llvm::GlobalValue::LinkageTypes::ExternalLinkage) {
if (name.empty()) {
name = func.getName().str();
} else {
return "";
}
}
}
return name;
}
}
absl::StatusOr<GpuCompiler::BackendCompileResult> GpuCompiler::CompileAndLink(
const HloModuleConfig& module_config,
CompileModuleResults& compile_module_results,
se::GpuComputeCapability gpu_version, se::StreamExecutor* stream_exec,
const CompileOptions& options, const HloModule* debug_module) {
llvm::Module* llvm_module = &*compile_module_results.llvm_module;
bool force_module_split =
module_config.debug_options().xla_llvm_force_inline_before_split();
if (force_module_split) {
for (llvm::Function& func : llvm_module->functions()) {
if (func.getNumUses() > 0 && !func.isDeclaration()) {
VLOG(4) << absl::StrFormat("Inlining function %s with %d users.\n",
func.getName().str(), func.getNumUses());
std::vector<llvm::CallInst*> calls_to_inline;
for (auto* user : func.users()) {
if (auto* call = llvm::dyn_cast<llvm::CallInst>(user)) {
calls_to_inline.push_back(call);
}
}
for (auto* call_to_inline : calls_to_inline) {
llvm::InlineFunctionInfo inline_function_info;
if (!llvm::InlineFunction(*call_to_inline, inline_function_info)
.isSuccess()) {
return absl::InternalError("Can not inline function " +
func.getName().str());
};
}
}
}
}
llvm::DenseMap<llvm::StringRef, llvm::Constant*> const_initializer_map;
llvm::Module& module_with_constants =
(compile_module_results.llvm_module_constants == nullptr)
? *llvm_module
: *compile_module_results.llvm_module_constants;
for (llvm::GlobalVariable& gv : module_with_constants.globals()) {
if (gv.hasName() && gv.isConstant() && gv.hasInitializer() &&
gv.hasExternalLinkage()) {
llvm::Constant* initializer = gv.getInitializer();
unsigned int num_elements = 0;
if (auto* caz =
llvm::dyn_cast<llvm::ConstantAggregateZero>(initializer)) {
num_elements = caz->getElementCount().getFixedValue();
} else if (auto* cds = llvm::dyn_cast<llvm::ConstantDataSequential>(
initializer)) {
num_elements = cds->getNumElements();
}
if (num_elements > 0) {
const_initializer_map[gv.getName()] = initializer;
}
}
}
llvm_ir::DumpIrIfEnabled(*debug_module, *llvm_module,
false, "inlined");
absl::string_view cache_path =
module_config.debug_options().xla_gpu_kernel_cache_file();
const bool use_cache = !cache_path.empty();
struct NamedModule {
std::string name;
std::unique_ptr<llvm::Module> module;
};
std::vector<NamedModule> llvm_modules;
MaybeOwningThreadPool thread_pool = CreateMaybeOwningThreadPool(
module_config.debug_options()
.xla_gpu_force_compilation_parallelism(),
options.thread_pool,
1);
int num_modules = CountFunctions(*llvm_module);
if (thread_pool.get() != nullptr && !use_cache) {
num_modules = std::max(1, std::min(thread_pool->NumThreads(), num_modules));
}
if (compile_module_results.llvm_module_constants != nullptr) {
llvm_modules.reserve(num_modules + 1);
llvm_modules.push_back(
{"", std::move(compile_module_results.llvm_module_constants)});
} else {
llvm_modules.reserve(num_modules);
}
int single_function_module_count = 0;
llvm::SplitModule(
*llvm_module, num_modules,
[&](std::unique_ptr<llvm::Module> module) {
for (llvm::GlobalVariable& gv : module->globals()) {
if (gv.hasName() && gv.isConstant() && !gv.hasInitializer() &&
const_initializer_map.count(gv.getName()) != 0) {
gv.setInitializer(const_initializer_map[gv.getName()]);
gv.setLinkage(llvm::GlobalValue::InternalLinkage);
}
}
const std::string name = SingleFunctionName(*module);
if (!name.empty()) {
++single_function_module_count;
}
llvm_modules.push_back({name, std::move(module)});
},
true, true);
VLOG(2) << "Single-function cacheable modules: "
<< single_function_module_count << " / " << llvm_modules.size();
struct NamedCompileResult {
std::string name;
absl::StatusOr<BackendCompileResult> result;
};
std::vector<NamedCompileResult> compile_results(llvm_modules.size());
if (thread_pool.get() != nullptr) {
tsl::BlockingCounter counter(llvm_modules.size());
for (int i = 0; i < llvm_modules.size(); ++i) {
thread_pool.get_mutable()->Schedule(
[&compile_results, i, &llvm_modules, &counter, this, &module_config,
&gpu_version, &debug_module, &options] {
llvm::LLVMContext new_context;
std::unique_ptr<llvm::Module> new_module =
CopyToContext(*llvm_modules.at(i).module, new_context);
compile_results.at(i) = {
llvm_modules.at(i).name,
CompileSingleModule(module_config, gpu_version, debug_module,
new_module.get(),
true, options,
i)};
counter.DecrementCount();
});
}
counter.Wait();
} else {
for (int i = 0; i < llvm_modules.size(); ++i) {
compile_results.at(i) = {
llvm_modules.at(i).name,
CompileSingleModule(module_config, gpu_version, debug_module,
&*llvm_modules.at(i).module,
true, options,
i)};
}
}
std::string ptx_snippets;
std::vector<std::vector<uint8_t>> binaries_to_link;
binaries_to_link.reserve(compile_results.size());
std::vector<KernelReuseCache::NamedBinary> binaries_to_cache;
binaries_to_cache.reserve(single_function_module_count);
for (const auto& [name, maybe_result] : compile_results) {
TF_ASSIGN_OR_RETURN(auto result, maybe_result);
if (result.binary.empty()) {
continue;
}
ptx_snippets += result.asm_text;
ptx_snippets += "\n";
binaries_to_link.push_back(result.binary);
if (!name.empty()) {
binaries_to_cache.push_back({name, result.binary});
}
}
if (use_cache) {
std::string resolved_path;
if (!tsl::io::ResolveTestPrefixes(cache_path, resolved_path)) {
return FailedPrecondition("File path can not be resolved: %s",
cache_path);
}
const CompilationCacheProto& current_cache =
compile_module_results.kernel_compilation_cache;
const bool cache_file_exists =
tsl::Env::Default()->FileExists(resolved_path).ok();
if (cache_file_exists) {
int loaded_kernel_count = 0;
for (const auto& [name, entry] : current_cache.entries()) {
if (llvm_module->getFunction(name) != nullptr) {
VLOG(5) << "Using the just compiled kernel for " << name;
TF_RET_CHECK(entry.binary().empty())
<< name
<< " is a just compiled kernel and is not expected to have a "
"binary yet.";
continue;
}
const uint8_t* binary =
reinterpret_cast<const uint8_t*>(entry.binary().data());
binaries_to_link.push_back(
std::vector<uint8_t>(binary, binary + entry.binary().size()));
VLOG(5) << "Using " << name << " from cache: " << entry.binary().size();
++loaded_kernel_count;
}
VLOG(2) << "Using " << loaded_kernel_count << " / "
<< current_cache.entries_size() << " cached kernels.";
}
if (!binaries_to_cache.empty()) {
TF_RETURN_IF_ERROR(
UpdateDiskKernelCache(resolved_path, cache_file_exists,
current_cache, binaries_to_cache));
}
}
auto maybe_backend_result =
LinkModules(gpu_version, stream_exec, std::move(binaries_to_link),
module_config.debug_options());
if (!maybe_backend_result.ok()) {
LOG(ERROR) << "The CUDA linking API did not work. Please use XLA_FLAGS="
"--xla_gpu_enable_llvm_module_compilation_parallelism=false "
"to bypass it, but expect to get longer compilation time due "
"to the lack of multi-threading. Original error: "
<< maybe_backend_result.status();
return maybe_backend_result.status();
}
VLOG(4) << "Binary size after linking [B]: " << maybe_backend_result->size();
compile_module_results.kernel_compilation_cache.Clear();
return BackendCompileResult{ptx_snippets, std::move(*maybe_backend_result)};
}
absl::StatusOr<GpuCompiler::CompileResultWithMetadata>
GpuCompiler::CompileToBackendResult(
HloModule* module, llvm::LLVMContext* llvm_context,
se::StreamExecutor* executor, const CompileOptions& options,
const se::DeviceDescription& gpu_device_info) {
tsl::profiler::TraceMe traceme("GpuCompiler::CompileToBackendResult");
TF_RETURN_IF_ERROR(RunPreSchedulingPasses(module, executor));
TF_ASSIGN_OR_RETURN(
ScheduleMetadata schedule_metadata,
ScheduleGpuModule(module, pointer_size_, gpu_device_info));
TF_RETURN_IF_ERROR(RunPostSchedulingPipelines(
module, schedule_metadata.scheduler_mem_limit, gpu_device_info));
TF_ASSIGN_OR_RETURN(se::Platform * platform,
se::PlatformManager::PlatformWithId(PlatformId()));
bool can_use_link_modules = (executor != nullptr);
if (can_use_link_modules) {
TF_ASSIGN_OR_RETURN(can_use_link_modules,
CanUseLinkModules(module->config()));
}
const bool split_modules =
can_use_link_modules &&
module->config()
.debug_options()
.xla_gpu_enable_llvm_module_compilation_parallelism();
const bool use_cache =
split_modules &&
!module->config().debug_options().xla_gpu_kernel_cache_file().empty();
TF_ASSIGN_OR_RETURN(
CompileModuleResults compile_module_results,
CompileModuleToLlvmIr(module, llvm_context, target_triple_, data_layout_,
platform->Name(), platform->id(), gpu_device_info,
GetCanShareBuffer(), BufferSizeBytesFunction(),
use_cache));
if (user_pre_optimization_hook_) {
user_pre_optimization_hook_(*compile_module_results.llvm_module);
if (compile_module_results.llvm_module_constants != nullptr) {
user_pre_optimization_hook_(
*compile_module_results.llvm_module_constants);
}
}
llvm_ir::DumpIrIfEnabled(*module, *compile_module_results.llvm_module,
false);
if (compile_module_results.llvm_module_constants != nullptr) {
llvm_ir::DumpIrIfEnabled(*module,
*compile_module_results.llvm_module_constants,
false, "constants");
}
BackendCompileResult backend_result;
if (split_modules) {
TF_ASSIGN_OR_RETURN(backend_result,
CompileAndLink(module->config(), compile_module_results,
gpu_device_info.gpu_compute_capability(),
executor, options, module));
} else {
CHECK(compile_module_results.llvm_module_constants == nullptr);
TF_ASSIGN_OR_RETURN(
backend_result,
CompileSingleModule(module->config(),
gpu_device_info.gpu_compute_capability(), module,
&*compile_module_results.llvm_module,
false, options,
std::nullopt));
}
RecordXlaDeviceBinarySize(backend_result.binary.size());
if (DumpingEnabledForHloModule(*module)) {
DumpToFileInDirOrStdout(
*module, "", "thunk_sequence.txt",
compile_module_results.executable->ToString(0));
}
return CompileResultWithMetadata{std::move(backend_result),
std::move(compile_module_results)};
}
absl::StatusOr<std::unique_ptr<Executable>> GpuCompiler::RunBackend(
std::unique_ptr<HloModule> module, se::StreamExecutor* stream_exec,
const CompileOptions& options) {
tsl::profiler::ScopedAnnotation backend_annotation{[&] {
return absl::StrFormat("XlaCompileBackend:#module=%s,program_id=%d#",
module->name(), module->unique_id());
}};
BinaryMap dnn_compiled_graphs;
if (stream_exec) {
TF_RETURN_IF_ERROR(RunCudnnCompilerPasses(module.get(), stream_exec,
&dnn_compiled_graphs));
}
const DebugOptions& debug_opts = module->config().debug_options();
TF_ASSIGN_OR_RETURN(TargetConfig gpu_target_config,
GetTargetConfig(options, debug_opts, stream_exec));
if (DumpingEnabledForHloModule(*module)) {
std::string textproto;
tsl::protobuf::TextFormat::PrintToString(gpu_target_config.ToProto(),
&textproto);
DumpToFileInDirOrStdout(*module, "", "gpu_target_config.pbtxt", textproto);
}
if (!options.is_autotuning_compilation) {
VLOG(1) << "Starting to compile HLO module " << module->name();
}
XLA_SCOPED_LOGGING_TIMER_IF(
absl::StrCat("GpuCompiler::RunBackend for ", module->name()),
!options.is_autotuning_compilation);
std::string slow_compilation_msg =
absl::StrCat("Compiling module ", module->name());
auto slow_compile_alarm = SlowCompilationAlarm(slow_compilation_msg);
if (options.is_autotuning_compilation) {
if (module->config().debug_options().xla_embed_ir_in_executable()) {
LOG(WARNING) << "Doing autotuning compilations with "
"xla_embed_ir_in_executable wastes memory!";
}
}
llvm::LLVMContext llvm_context;
const se::DeviceDescription& gpu_device_info =
gpu_target_config.device_description;
if (module->config().hlo_profiling_enabled() || VLOG_IS_ON(1)) {
HloCostAnalysis::Options cost_analysis_options{ShapeSizeBytesFunction()};
cost_analysis_options.set_bytes_per_second(
gpu_device_info.memory_bandwidth());
GpuHloCostAnalysis cost_analysis(cost_analysis_options, gpu_device_info);
TF_RETURN_IF_ERROR(module->entry_computation()->Accept(&cost_analysis));
if (!options.is_autotuning_compilation) {
VLOG(1) << "HLO memory read+written: "
<< tsl::strings::HumanReadableNumBytes(
cost_analysis.bytes_accessed());
}
if (module->config().hlo_profiling_enabled()) {
LOG(ERROR) << "--xla_hlo_profile for GPU is unsupported.";
}
}
TF_ASSIGN_OR_RETURN(
CompileResultWithMetadata res,
CompileToBackendResult(module.get(), &llvm_context, stream_exec, options,
gpu_device_info));
if (DumpingEnabledForHloModule(*module)) {
DumpToFileInDirOrStdout(
*module, "", "thunk_sequence.txt",
res.compile_module_results.executable->ToString(0));
}
bool embed_ir_in_executable =
module->config().debug_options().xla_embed_ir_in_executable();
int64_t debug_buffer_assignment_show_max =
module->config().debug_options().xla_debug_buffer_assignment_show_max();
tsl::profiler::ScopedAnnotation annotation([&] {
return absl::StrFormat("XlaCreateGpuExecutable:#module=%s#",
module->name());
});
TF_ASSIGN_OR_RETURN(
auto gpu_executable,
GpuExecutable::Create(GpuExecutable::Params{
(options.is_autotuning_compilation &&
!res.backend_result.binary.empty())
? std::string()
: std::move(res.backend_result.asm_text),
std::move(res.backend_result.binary),
std::move(dnn_compiled_graphs),
gpu_device_info.gpu_compute_capability(),
std::move(res.compile_module_results.executable),
std::move(res.compile_module_results.constants),
std::move(res.compile_module_results.output_info),
std::move(res.compile_module_results.module_name),
std::move(res.compile_module_results.output_shape),
(res.compile_module_results.use_original_allocations
? std::optional<std::vector<BufferAllocation>>()
: std::move(res.compile_module_results.allocations)),
std::move(res.compile_module_results.buffer_assignment),
debug_buffer_assignment_show_max,
options.is_autotuning_compilation
? std::unique_ptr<HloModule>()
: std::move(module),
!options.is_autotuning_compilation}));
if (embed_ir_in_executable) {
std::string ir_module_string_before_opt =
llvm_ir::DumpToString(res.compile_module_results.llvm_module.get());
gpu_executable->set_ir_module_string(ir_module_string_before_opt);
DCHECK_NE("", ir_module_string_before_opt);
}
IncrementCompiledProgramsCount();
if (!options.is_autotuning_compilation && gpu_executable->has_module()) {
auto hlo_proto = std::make_unique<HloProto>();
*hlo_proto->mutable_buffer_assignment() =
gpu_executable->buffer_assignment()->ToProto();
gpu_executable->set_hlo_proto(std::move(hlo_proto));
gpu_executable->set_debug_info(
gpu_executable->buffer_assignment()->GetStats().ToString());
}
return static_cast<std::unique_ptr<Executable>>(std::move(gpu_executable));
}
absl::StatusOr<std::vector<std::unique_ptr<AotCompilationResult>>>
GpuCompiler::CompileAheadOfTime(std::unique_ptr<HloModuleGroup> module_group,
const AotCompilationOptions& options) {
CHECK_EQ(options.PlatformId(), PlatformId());
std::vector<std::unique_ptr<HloModule>> modules =
module_group->ConsumeModules();
std::vector<std::unique_ptr<HloModule>> optimized_modules;
optimized_modules.reserve(modules.size());
for (std::unique_ptr<HloModule>& module : modules) {
if (!module->has_schedule()) {
tsl::profiler::ScopedAnnotation annotation{[&] {
return absl::StrFormat("XlaCompile:#module=%s,program_id=%d#",
module->name(), module->unique_id());
}};
CompileOptions compile_options;
compile_options.device_allocator = options.device_allocator();
compile_options.target_config = options.target_config();
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModule> optimized_module,
RunHloPasses(std::move(module), options.executor(), compile_options));
optimized_modules.push_back(std::move(optimized_module));
} else {
optimized_modules.push_back(std::move(module));
}
}
modules = std::move(optimized_modules);
std::vector<std::unique_ptr<AotCompilationResult>> results;
const std::optional<Compiler::TargetConfig>& target_config =
options.target_config();
CHECK(target_config.has_value() || options.executor() != nullptr);
const se::DeviceDescription& gpu_device_info =
target_config.has_value() ? target_config->device_description
: options.executor()->GetDeviceDescription();
for (const std::unique_ptr<HloModule>& module : modules) {
llvm::LLVMContext llvm_context;
TF_ASSIGN_OR_RETURN(
CompileResultWithMetadata res,
CompileToBackendResult(module.get(), &llvm_context, options.executor(),
{options.device_allocator()}, gpu_device_info));
TF_ASSIGN_OR_RETURN(
results.emplace_back(),
GpuThunkAotCompilationResult::FromModule(
module.get(), res.compile_module_results.buffer_assignment.get(),
res.backend_result.asm_text, res.backend_result.binary,
res.backend_result.dnn_compiled_graphs));
}
return std::move(results);
}
HloCostAnalysis::ShapeSizeFunction GpuCompiler::ShapeSizeBytesFunction() const {
return [pointer_size = pointer_size_](const Shape& shape) {
return GetSizeOfShape(shape, pointer_size);
};
}
absl::StatusOr<std::unique_ptr<AotCompilationResult>> GpuCompiler::Export(
Executable* executable) const {
auto* gpu_executable = tensorflow::down_cast<GpuExecutable*>(executable);
if (!gpu_executable) return Internal("GpuExecutable is null");
return GpuThunkAotCompilationResult::FromModule(
&gpu_executable->module(), gpu_executable->buffer_assignment(),
gpu_executable->text(), gpu_executable->binary(),
gpu_executable->dnn_compiled_graphs());
}
absl::Status GpuCompiler::RunPreSchedulingPasses(
HloModule* module, se::StreamExecutor* stream_exec) {
HloPassPipeline pipeline("pre-scheduling-passes");
pipeline.AddPass<FusionWrapper>();
return pipeline.Run(module).status();
}
HloCostAnalysis::Options CreateHloAnalysisOpts(
const HloModule& module, const se::DeviceDescription& gpu_device_info,
ShapeSizeFn shape_size_fn) {
HloCostAnalysis::Options hlo_cost_analysis_options;
hlo_cost_analysis_options.shape_size = shape_size_fn;
std::optional<HloRematerialization::HostMemoryOffloadConfig>
offloading_config = std::nullopt;
if (module.config().debug_options().xla_gpu_enable_host_memory_offloading()) {
constexpr float kGiga = 1e+9;
constexpr float kFma = 2;
float flops_per_sec = gpu_device_info.core_count() *
gpu_device_info.fpus_per_core() *
gpu_device_info.clock_rate_ghz() * kGiga * kFma;
int64_t host_memory_space_color =
static_cast<int64_t>(se::MemoryType::kHost);
hlo_cost_analysis_options.set_flops_per_second(flops_per_sec);
hlo_cost_analysis_options.set_transcendentals_per_second(flops_per_sec);
offloading_config =
std::make_optional<HloRematerialization::HostMemoryOffloadConfig>(
host_memory_space_color,
gpu_device_info.memory_bandwidth(),
gpu_device_info.memory_bandwidth());
}
return hlo_cost_analysis_options;
}
HloRematerialization::Options CreateRematOpts(
const HloModule& module, const se::DeviceDescription& gpu_device_info,
HloCostAnalysis& hlo_cost_analysis, int64_t scheduler_mem_limit) {
bool enable_offloading =
module.config().debug_options().xla_gpu_enable_host_memory_offloading();
std::optional<HloRematerialization::HostMemoryOffloadConfig>
offloading_config = std::nullopt;
if (enable_offloading) {
int64_t host_memory_space_color =
static_cast<int64_t>(se::MemoryType::kHost);
offloading_config =
std::make_optional<HloRematerialization::HostMemoryOffloadConfig>(
host_memory_space_color,
gpu_device_info.memory_bandwidth(),
gpu_device_info.memory_bandwidth());
}
HloRematerialization::RematerializationModeConfig
rematerialization_mode_config(true, true,
enable_offloading);
HloRematerialization::Options options(
hlo_cost_analysis, rematerialization_mode_config,
scheduler_mem_limit,
1, 1,
0, nullptr,
offloading_config);
return options;
}
absl::Status GpuCompiler::RunPostSchedulingPipelines(
HloModule* module, int64_t scheduler_mem_limit,
const se::DeviceDescription& gpu_device_info) const {
TF_RETURN_IF_ERROR(
RunPostSchedulingCopyInsertion(module, GetCanShareBuffer()));
HloPassPipeline main_pipeline("post-scheduling-passes");
HloPredicate is_nop =
HloPredicateIsOp<HloOpcode::kParameter, HloOpcode::kConstant,
HloOpcode::kBitcast, HloOpcode::kGetTupleElement>;
{
HloPassPipeline& pipeline =
main_pipeline.AddPass<HloPassPipeline>("async-to-sync-converter");
if (module->config()
.debug_options()
.xla_gpu_enable_pipelined_collectives() ||
module->config().debug_options().xla_gpu_enable_pipelined_p2p()) {
pipeline.AddPass<PipelinedP2PRewriter>();
}
pipeline.AddPass<GpuConvertAsyncCollectivesToSync>(is_nop);
}
HloRematerialization::RematerializationSizes sizes;
HloCostAnalysis::Options hlo_cost_analysis_opts =
CreateHloAnalysisOpts(*module, gpu_device_info, ShapeSizeBytesFunction());
HloCostAnalysis hlo_cost_analysis(hlo_cost_analysis_opts);
HloRematerialization::Options remat_opts = CreateRematOpts(
*module, gpu_device_info, hlo_cost_analysis, scheduler_mem_limit);
{
HloPassPipeline& pipeline =
main_pipeline.AddPass<HloPassPipeline>("remat-pipeline");
pipeline.AddPass<HloRematerialization>(remat_opts, sizes);
pipeline.AddPass<StreamAttributeAnnotator>();
pipeline.AddPass<OptimizationBarrierExpander>();
}
{
HloPassPipeline& pipeline =
main_pipeline.AddPass<HloPassPipeline>("fusion-wrapper");
pipeline.AddPass<FusionWrapper>();
}
{
HloPassPipeline& pipeline =
main_pipeline.AddPass<HloPassPipeline>("command-buffer-scheduling");
pipeline.AddPass<CommandBufferScheduling>(gpu_device_info);
pipeline.AddPass<SanitizeConstantNames>();
}
if (module->config().debug_options().xla_gpu_enable_pgle_accuracy_checker()) {
AddHloVerifier(
&main_pipeline,
module->config().debug_options().xla_experimental_ignore_channel_id(),
HloVerifierOpts{}.VerifyInstructionNameUnchanged());
}
return main_pipeline.Run(module).status();
}
absl::Status GpuCompiler::LoadAutotuneResultsFromFile(
const DebugOptions& debug_options) {
if (absl::string_view file_path =
debug_options.xla_gpu_load_autotune_results_from();
!file_path.empty()) {
static absl::once_flag once;
absl::Status status = absl::OkStatus();
absl::call_once(once, [&file_path, &status] {
status = AutotunerUtil::LoadAutotuneResultsFromFile(file_path);
});
TF_RETURN_IF_ERROR(status);
}
return absl::OkStatus();
}
absl::Status GpuCompiler::SerializeAutotuneResultsToFile(
const DebugOptions& debug_options) {
if (absl::string_view file_path =
debug_options.xla_gpu_dump_autotune_results_to();
!file_path.empty()) {
TF_RETURN_IF_ERROR(
AutotunerUtil::SerializeAutotuneResultsToFile(file_path));
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<AotCompilationResult>>
GpuCompiler::LoadAotCompilationResult(
const std::string& serialized_aot_result) {
return LoadAotCompilationResultStatic(serialized_aot_result);
}
absl::StatusOr<std::unique_ptr<AotCompilationResult>>
GpuCompiler::LoadAotCompilationResultStatic(
const std::string& serialized_aot_result) {
return GpuThunkAotCompilationResult::FromString(serialized_aot_result);
}
}
} | #include "xla/service/gpu/gpu_compiler.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/autotune_results.pb.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_module_group.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/compiler.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/service/gpu/gpu_hlo_schedule.h"
#include "xla/service/gpu/metrics.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/xla_debug_info_manager.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using ::testing::IsEmpty;
using ::testing::Not;
using ::testing::TempDir;
class GpuCompilerTest : public HloTestBase {
public:
absl::Status Schedule(HloModule* module) {
auto compiler = backend().compiler();
const se::DeviceDescription& gpu_device_info =
backend().default_stream_executor()->GetDeviceDescription();
TF_RETURN_IF_ERROR(ScheduleGpuModule(module, 4, gpu_device_info).status());
return tensorflow::down_cast<GpuCompiler*>(compiler)
->RunPostSchedulingPipelines(module, 4 * 1024 * 1024, gpu_device_info);
}
const stream_executor::GpuComputeCapability& GpuComputeComp() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability();
}
};
TEST_F(GpuCompilerTest, CompiledProgramsCount) {
const char* hlo_text = R"(
HloModule test
ENTRY main {
p = f32[10]{0} parameter(0)
ROOT neg = f32[10]{0} negate(p)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_text).value();
ResetCompiledProgramsCountForTesting();
std::unique_ptr<Executable> executable =
backend()
.compiler()
->RunBackend(std::move(module), backend().default_stream_executor(),
{nullptr,
nullptr,
{},
false})
.value();
EXPECT_EQ(GetCompiledProgramsCount(), 1);
}
TEST_F(GpuCompilerTest, GenerateDebugInfoForNonAutotuningCompilations) {
const char* hlo_text = R"(
HloModule test
ENTRY main {
p = f32[10]{0} parameter(0)
ROOT neg = f32[10]{0} negate(p)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_text).value();
std::unique_ptr<Executable> executable =
backend()
.compiler()
->RunBackend(std::move(module), backend().default_stream_executor(),
{nullptr,
nullptr,
{},
false})
.value();
EXPECT_TRUE(XlaDebugInfoManager::Get()->TracksModule(
executable->module().unique_id()));
}
TEST_F(GpuCompilerTest, DoesNotGenerateDebugInfoForAutotuningCompilations) {
const char* hlo_text = R"(
HloModule test
ENTRY main {
p = f32[10]{0} parameter(0)
ROOT neg = f32[10]{0} negate(p)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_text).value();
int module_id = module->unique_id();
std::unique_ptr<Executable> executable =
backend()
.compiler()
->RunBackend(std::move(module), backend().default_stream_executor(),
{nullptr,
nullptr,
{},
true})
.value();
EXPECT_FALSE(XlaDebugInfoManager::Get()->TracksModule(module_id));
}
TEST_F(GpuCompilerTest, CopyInsertionFusion) {
const char* hlo_text = R"(
HloModule cluster
ENTRY main {
cst = f32[1]{0} constant({0})
ROOT tuple_out = (f32[1]{0}, f32[1]{0}, f32[1]{0}, f32[1]{0}) tuple(cst, cst, cst, cst)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{0, 0}));
auto module = ParseAndReturnVerifiedModule(hlo_text).value();
std::unique_ptr<HloModule> compiled_module =
backend()
.compiler()
->RunHloPasses(module->Clone(), backend().default_stream_executor(),
nullptr)
.value();
VLOG(2) << compiled_module->ToString();
size_t total_fusion_instrs = 0;
for (const HloInstruction* instr :
compiled_module->entry_computation()->instructions()) {
if (instr->opcode() == HloOpcode::kFusion) {
++total_fusion_instrs;
}
}
EXPECT_EQ(total_fusion_instrs, 1);
const HloInstruction* entry_root =
compiled_module->entry_computation()->root_instruction();
EXPECT_THAT(
entry_root,
GmockMatch(m::Tuple(
m::GetTupleElement(m::Fusion()), m::GetTupleElement(m::Fusion()),
m::GetTupleElement(m::Fusion()), m::GetTupleElement(m::Fusion()))));
}
TEST_F(GpuCompilerTest, CanRunScheduledModules) {
HloModuleConfig config;
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_disable_all_hlo_passes(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule m, is_scheduled=true
w {
p = s8[] parameter(0)
ROOT n = s8[] negate(p)
}
ENTRY e {
p = s8[] parameter(0)
ROOT _ = s8[] fusion(p), kind=kLoop, calls=w
})",
config));
EXPECT_TRUE(Run(std::move(module), true));
}
TEST_F(GpuCompilerTest, NonFusedInstructionsAreWrapped) {
HloModuleConfig config;
DebugOptions debug_options = GetDebugOptionsForTest();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule m
ENTRY e {
p = f32[2,4,4] parameter(0)
ROOT _ = f32[2,4,4]{2,1,0} transpose(p), dimensions={0,2,1}
})",
config));
config.set_debug_options(debug_options);
std::unique_ptr<Executable> executable =
backend()
.compiler()
->RunBackend(std::move(module), backend().default_stream_executor(),
{nullptr,
nullptr,
{},
false})
.value();
HloModule& compiled_module = executable->module();
const HloInstruction* entry_root =
compiled_module.entry_computation()->root_instruction();
EXPECT_THAT(entry_root, GmockMatch(m::Fusion()));
}
class PersistedAutotuningTest : public HloTestBase {
protected:
static constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
p0 = f16[1,16,17,3] parameter(0)
p1 = s8[16,17,3] parameter(1)
cp1 = f16[16,17,3] convert(p1)
ROOT _ = f16[1,16,16] dot(p0, cp1),
lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2}
})";
std::string GetUniqueTempFilePath(absl::string_view suffix) {
std::string filename = TempDir();
CHECK(tsl::Env::Default()->CreateUniqueFileName(&filename,
std::string(suffix)));
return filename;
}
std::string ExpectToReadNonEmptyFile(absl::string_view file_path) {
std::string str;
tsl::Env* env = tsl::Env::Default();
TF_EXPECT_OK(tsl::ReadFileToString(env, std::string(file_path), &str));
EXPECT_THAT(str, Not(IsEmpty()));
return str;
}
DebugOptions GetDebugOptionsForTest() override {
DebugOptions options = HloTestBase::GetDebugOptionsForTest();
options.set_xla_gpu_dump_autotune_results_to(
xla_gpu_dump_autotune_results_to_);
options.set_xla_gpu_load_autotune_results_from(
xla_gpu_load_autotune_results_from_);
return options;
}
std::string xla_gpu_dump_autotune_results_to_;
std::string xla_gpu_load_autotune_results_from_;
};
TEST_F(PersistedAutotuningTest, WriteResultsOnEachCompilation) {
constexpr absl::string_view kInvalidTextProto = "Invalid!";
xla_gpu_dump_autotune_results_to_ = GetUniqueTempFilePath(".txt");
TF_EXPECT_OK(GetOptimizedModule(kHloText).status());
{
std::string autotune_results_str =
ExpectToReadNonEmptyFile(xla_gpu_dump_autotune_results_to_);
AutotuneResults results;
EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(autotune_results_str,
&results));
}
tsl::Env* env = tsl::Env::Default();
TF_EXPECT_OK(tsl::WriteStringToFile(env, xla_gpu_dump_autotune_results_to_,
kInvalidTextProto));
TF_EXPECT_OK(GetOptimizedModule(kHloText).status());
{
std::string autotune_results_str =
ExpectToReadNonEmptyFile(xla_gpu_dump_autotune_results_to_);
AutotuneResults results;
EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(autotune_results_str,
&results));
}
}
int64_t CountCopies(const HloComputation& computation) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
if (instruction->opcode() == HloOpcode::kCopy) {
count++;
}
}
return count;
}
int64_t CountCopies(const HloModule& module) {
int64_t count = 0;
for (const auto& computation : module.computations()) {
count += CountCopies(*computation);
}
return count;
}
TEST_F(GpuCompilerTest, RemovesUnnecessaryCopyAfterScheduling) {
const absl::string_view hlo_string = R"(
HloModule all_gather_overlapping
condition {
input_tuple = (f32[1,128], f32[2,128], pred[]) parameter(0)
ROOT cond = pred[] get-tuple-element(input_tuple), index=2
}
body {
input_tuple = (f32[1,128], f32[2,128], pred[]) parameter(0)
param_0 = f32[1,128] get-tuple-element(input_tuple), index=0
param_1 = f32[2,128] get-tuple-element(input_tuple), index=1
cond = pred[] get-tuple-element(input_tuple), index=2
c0 = f32[] constant(0)
splat_c0 = f32[1,128] broadcast(c0), dimensions={}
add = f32[1,128] add(splat_c0, param_0)
all-gather-start = (f32[1,128], f32[2,128]) all-gather-start(add), channel_id=1337, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true
c1_s32 = s32[] constant(1)
c0_s32 = s32[] constant(0)
dynamic-slice = f32[1,128] dynamic-slice(param_1, c1_s32, c0_s32), dynamic_slice_sizes={1,128}
all-gather-done = f32[2,128] all-gather-done(all-gather-start)
ROOT output_tuple = (f32[1,128], f32[2,128], pred[]) tuple(dynamic-slice, all-gather-done, cond)
}
ENTRY main {
param_0 = f32[1,128] parameter(0)
param_1 = f32[2,128] parameter(1)
param_2 = pred[] parameter(2)
tuple = (f32[1,128], f32[2,128], pred[]) tuple(param_0, param_1, param_2)
ROOT while = (f32[1,128], f32[2,128], pred[]) while(tuple), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
GetOptimizedModule(hlo_string));
EXPECT_EQ(CountCopies(*module), 7);
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* while_op = root->operand(0)->operand(0);
EXPECT_EQ(while_op->while_body()->root_instruction()->operand(1)->opcode(),
HloOpcode::kCopy);
TF_ASSERT_OK(Schedule(module.get()));
EXPECT_EQ(CountCopies(*module), 4);
module->entry_computation()->root_instruction();
while_op = root->operand(0)->operand(0);
EXPECT_EQ(while_op->while_body()->root_instruction()->operand(1)->opcode(),
HloOpcode::kAllGatherDone);
}
TEST_F(GpuCompilerTest,
GemmFusionIsNoOpWhenGemmFusionAutotunerFallsBackToCublas) {
auto cc = backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
if (!cc.IsAtLeastAmpere()) {
GTEST_SKIP() << "Autotuning results have only been generated for Ampere "
<< "and Hopper GPUs";
}
const absl::string_view hlo_string = R"(
HloModule test
ENTRY main {
param_0 = bf16[3,32,1024,4,1024]{4,3,2,1,0} parameter(0)
param_1 = bf16[4,3,32,1024]{3,2,1,0} parameter(1)
param_2 = s32[] parameter(2)
constant_0 = s32[] constant(0)
dynamic-slice_0 = bf16[1,3,32,1024]{3,2,1,0} dynamic-slice(param_1, param_2, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,3,32,1024}
reshape_0 = bf16[3,32,1024]{2,1,0} reshape(dynamic-slice_0)
broadcast_0 = bf16[3,32,1024,4,1024]{2,1,4,3,0} broadcast(reshape_0), dimensions={0,1,2}
add_0 = bf16[3,32,1024,4,1024]{4,3,2,1,0} add(param_0, broadcast_0)
transpose_0 = bf16[3,4,1024,32,1024]{2,1,4,3,0} transpose(add_0), dimensions={0,3,4,1,2}
slice_0 = bf16[1,4,1024,32,1024]{4,3,2,1,0} slice(transpose_0), slice={[0:1], [0:4], [0:1024], [0:32], [0:1024]}
reshape_1 = bf16[4,1024,32,1024]{3,2,1,0} reshape(slice_0)
copy_0 = bf16[4,1024,32,1024]{3,2,1,0} copy(reshape_1)
constant_1 = bf16[] constant(0.08838)
broadcast_1 = bf16[4,1024,32,1024]{3,2,1,0} broadcast(constant_1), dimensions={}
multiply_0 = bf16[4,1024,32,1024]{3,2,1,0} multiply(copy_0, broadcast_1)
slice_1 = bf16[1,4,1024,32,1024]{4,3,2,1,0} slice(transpose_0), slice={[1:2], [0:4], [0:1024], [0:32], [0:1024]}
reshape_2 = bf16[4,1024,32,1024]{3,2,1,0} reshape(slice_1)
copy_1 = bf16[4,1024,32,1024]{3,2,1,0} copy(reshape_2)
ROOT dot_0 = bf16[4,32,1024,1024]{3,2,1,0} dot(multiply_0, copy_1), lhs_batch_dims={0,2}, lhs_contracting_dims={3}, rhs_batch_dims={0,2}, rhs_contracting_dims={3}
}
)";
HloModuleConfig config;
DebugOptions triton_enabled_debug_options = GetDebugOptionsForTest();
triton_enabled_debug_options.set_xla_gpu_enable_dynamic_slice_fusion(false);
triton_enabled_debug_options
.set_xla_gpu_require_complete_aot_autotune_results(true);
config.set_debug_options(triton_enabled_debug_options);
config.set_replica_count(1);
config.set_num_partitions(1);
std::string path =
tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "service", "gpu",
"gpu_compiler_test_autotune_db.textproto");
TF_EXPECT_OK(AutotunerUtil::LoadAutotuneResultsFromFile(path));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, config));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> triton_enabled_module,
GetOptimizedModule(std::move(module)));
AutotunerUtil::ClearAutotuneResults();
DebugOptions triton_disabled_debug_options = GetDebugOptionsForTest();
triton_disabled_debug_options.set_xla_gpu_enable_dynamic_slice_fusion(false);
triton_disabled_debug_options.set_xla_gpu_enable_triton_gemm(false);
config.set_debug_options(triton_disabled_debug_options);
TF_ASSERT_OK_AND_ASSIGN(module,
ParseAndReturnVerifiedModule(hlo_string, config));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> triton_disabled_module,
GetOptimizedModule(std::move(module)));
const HloInstruction* root =
triton_enabled_module->entry_computation()->root_instruction();
const HloInstruction* custom_op = root->operand(0)->operand(0);
EXPECT_TRUE(custom_op->IsCustomCall("__cublas$gemm"));
EXPECT_EQ(triton_enabled_module->computation_count(),
triton_disabled_module->computation_count());
}
class FloatNormalizationTest : public GpuCompilerTest,
public ::testing::WithParamInterface<
std::pair<PrimitiveType, PrimitiveType>> {};
INSTANTIATE_TEST_SUITE_P(
Fp8s, FloatNormalizationTest,
::testing::Values(
std::make_pair(PrimitiveType::F8E4M3FN, PrimitiveType::F8E4M3FN),
std::make_pair(PrimitiveType::F8E5M2, PrimitiveType::F8E4M3FN),
std::make_pair(PrimitiveType::F8E4M3FN, PrimitiveType::F8E5M2),
std::make_pair(PrimitiveType::F8E5M2, PrimitiveType::F8E5M2)));
TEST_P(FloatNormalizationTest, Fp8Normalization) {
const PrimitiveType lhs_type = GetParam().first;
const PrimitiveType rhs_type = GetParam().second;
const std::string lhs_name =
primitive_util::LowercasePrimitiveTypeName(lhs_type);
const std::string rhs_name =
primitive_util::LowercasePrimitiveTypeName(rhs_type);
const std::string module_str = absl::Substitute(R"(
HloModule sch
ENTRY main {
parameter = $0[1600,1600]{1,0} parameter(0)
parameter.1 = $1[1600,1600]{1,0} parameter(1)
neg = $1[1600,1600]{1,0} negate(parameter.1)
dot = f16[1600,1600]{1,0} dot(parameter,neg), lhs_contracting_dims={1}, rhs_contracting_dims={0}
constant = f16[] constant(0)
broadcast = f16[1600,1600]{1,0} broadcast(constant), dimensions={}
ROOT maximum = f16[1600,1600]{1,0} maximum(dot,broadcast)
})",
lhs_name, rhs_name);
auto optimize_module = [&](bool enable_triton, bool enable_blas,
bool enable_blas_fallback)
-> absl::StatusOr<std::unique_ptr<HloModule>> {
HloModuleConfig config;
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_cublas_fallback(enable_blas_fallback);
debug_options.set_xla_gpu_enable_triton_gemm(enable_triton);
if (!enable_blas) {
debug_options.add_xla_disable_hlo_passes("cublas-gemm-rewriter");
}
config.set_debug_options(debug_options);
config.set_num_partitions(1);
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, config));
return GetOptimizedModule(std::move(module));
};
auto cc = backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
const std::string triton_keep_types = absl::Substitute(
R"(CHECK: fusion($0{{[^)]*}}, $1{{[^)]*}}){{.*}}"kind":"__triton_gemm")",
lhs_name, rhs_name);
const std::string cublaslt_keep_types = absl::Substitute(
R"(CHECK: custom-call($0{{[^)]*}}, $1{{[^)]*}}){{.*}}custom_call_target="__cublas$$lt$$matmul$$f8")",
lhs_name, rhs_name);
const std::string cublas_convert_to_f16 =
R"(CHECK: custom-call(f16{{[^)]*}}, f16{{[^)]*}}){{.*}}custom_call_target="__cublas$gemm")";
const std::string fallback_convert_to_f16 =
R"(CHECK: dot(f16{{[^)]*}}, f16{{[^)]*}}))";
{
TF_ASSERT_OK_AND_ASSIGN(auto optimized_module_no_fallback,
optimize_module(true,
true,
false));
const std::string triton_expected_check =
(cc.IsAtLeastHopper() ||
(cc.IsAtLeastAmpere() && lhs_type == F8E5M2 && rhs_type == F8E5M2))
? triton_keep_types
: cublas_convert_to_f16;
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matched,
RunFileCheck(optimized_module_no_fallback->ToString(),
triton_expected_check));
EXPECT_TRUE(filecheck_matched);
}
{
TF_ASSERT_OK_AND_ASSIGN(auto optimized_module_no_triton,
optimize_module(false,
true,
true));
const std::string blas_expected_check =
(cc.IsAtLeastHopper() && !(lhs_type == F8E5M2 && rhs_type == F8E5M2))
? cublaslt_keep_types
: cublas_convert_to_f16;
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched,
RunFileCheck(optimized_module_no_triton->ToString(),
blas_expected_check));
EXPECT_TRUE(filecheck_matched);
}
{
TF_ASSERT_OK_AND_ASSIGN(auto optimized_module_nothing,
optimize_module(false,
false,
false));
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched,
RunFileCheck(optimized_module_nothing->ToString(),
fallback_convert_to_f16));
EXPECT_TRUE(filecheck_matched);
}
}
TEST_F(GpuCompilerTest, CollectivePermuteDecompositionAndPipelining) {
const char* kModuleStr = R"(
HloModule cp
cond {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(11)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
send-data = get-tuple-element(%param), index=1
recv-data = f32[1, 1024, 1024] collective-permute(send-data),
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}, channel_id=1
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
replica = u32[] replica-id()
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
p = f32[1, 1024, 1024] broadcast(conv), dimensions={}
b = f32[1, 1024, 1024] add(p, recv-data)
c = f32[1, 1024, 1024] multiply(b, b)
d = f32[1, 1024, 1024] tan(c)
s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
ROOT result = (u32[], f32[1, 1024, 1024]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
while_init = (u32[], f32[1, 1024, 1024]) tuple(c0, init)
while_result = (u32[], f32[1, 1024, 1024]) while(while_init), body=body, condition=cond
ROOT result = f32[1, 1024, 1024] get-tuple-element(while_result), index=1
}
)";
const char* kExpected = R"(
CHECK: recv-done
CHECK-SAME: channel_id=[[CHANNEL_ID:[0-9]+]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: send-done
CHECK-SAME: channel_id=[[CHANNEL_ID]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: %[[CUSTOM_CALL:.*]] = custom-call
CHECK: %[[AFTER_ALL:.*]] = after-all
CHECK: %[[RESULT_RECV:.*]] = recv(%[[AFTER_ALL]])
CHECK-SAME: channel_id=[[CHANNEL_ID]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0",
CHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}},
CHECK-SAME: control-predecessors={%[[CUSTOM_CALL]]}
CHECK: %[[RESULT_SEND:.*]] = send(%[[SOME_SEND_ARG:.*]], %[[AFTER_ALL]])
CHECK-SAME: channel_id=1
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0",
CHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}},
CHECK-SAME: control-predecessors={%[[RESULT_RECV]]}
CHECK: ROOT
CHECK-SAME: %[[RESULT_RECV]]
CHECK: ENTRY
CHECK: %[[ENTRY_AFTER_ALL:.*]] = after-all
CHECK: %[[ENTRY_RECV:.*]] = recv(%[[ENTRY_AFTER_ALL]])
CHECK-SAME: channel_id=[[CHANNEL_ID]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0",
CHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}}
CHECK: %[[ENTRY_SEND:.*]] = send(%[[SOME_SEND_ARG:.*]], %[[ENTRY_AFTER_ALL]])
CHECK-SAME: channel_id=1
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0",
CHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}},
CHECK-SAME: control-predecessors={%[[ENTRY_RECV]]}
CHECK: %[[WHILE_INIT:.*]] = tuple
CHECK-SAME: %[[ENTRY_SEND]]
CHECK: while(%[[WHILE_INIT]])
CHECK: recv-done
CHECK-SAME: channel_id=[[CHANNEL_ID]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: send-done
CHECK-SAME: channel_id=[[CHANNEL_ID]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0"}
)";
HloModuleConfig config;
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_latency_hiding_scheduler(true);
debug_options.set_xla_gpu_collective_permute_decomposer_threshold(1);
debug_options.set_xla_gpu_enable_pipelined_p2p(true);
debug_options.set_xla_gpu_enable_triton_gemm(false);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(std::move(module)));
TF_ASSERT_OK(Schedule(optimized_module.get()));
HloPrintOptions options;
options.set_print_operand_shape(false);
options.set_print_result_shape(false);
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matched,
RunFileCheck(optimized_module->ToString(options), kExpected));
EXPECT_TRUE(filecheck_matched);
}
class KernelCacheTest : public HloTestBase {
public:
void SetUp() override {
CHECK(tsl::Env::Default()->LocalTempFilename(&cache_file_name_));
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsForTest());
TF_ASSERT_OK_AND_ASSIGN(bool can_use_link_modules,
dynamic_cast<GpuCompiler*>(backend().compiler())
->CanUseLinkModules(config));
if (!can_use_link_modules) {
GTEST_SKIP() << "Caching compiled kernels requires support of linking.";
}
}
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_gpu_kernel_cache_file(cache_file_name_);
debug_options.set_xla_gpu_enable_llvm_module_compilation_parallelism(true);
return debug_options;
}
bool CacheFileExists() {
if (!tsl::Env::Default()->FileExists(cache_file_name_).ok()) {
return false;
}
return true;
}
int CacheEntryCount() {
if (!CacheFileExists()) {
return 0;
}
std::string serialized;
TF_EXPECT_OK(tsl::ReadFileToString(tsl::Env::Default(), cache_file_name_,
&serialized));
CompilationCacheProto proto;
EXPECT_TRUE(proto.ParseFromString(std::string(serialized)));
return proto.entries_size();
}
std::string cache_file_name_;
static constexpr absl::string_view kHloText = R"(
ENTRY e {
p = s8[] parameter(0)
c = s8[] constant(8)
ROOT _ = s8[] add(p, c)
})";
};
TEST_F(KernelCacheTest, CacheIsGenerated) {
EXPECT_FALSE(CacheFileExists());
EXPECT_TRUE(Run(kHloText, false));
EXPECT_EQ(CacheEntryCount(), 1);
EXPECT_TRUE(Run(kHloText, false));
EXPECT_EQ(CacheEntryCount(), 1);
}
TEST_F(KernelCacheTest, NoCacheIsGeneratedWithoutCompiledKernels) {
EXPECT_FALSE(CacheFileExists());
EXPECT_TRUE(Run(R"(
ENTRY e {
a = f32[5,5] parameter(0)
ROOT _ = f32[5,5] custom-call(a, a), custom_call_target="__cublas$gemm",
backend_config="{ \"gemm_backend_config\": {\"alpha_real\":1,\"beta\":0,\"dot_dimension_numbers\":{\"lhs_contracting_dimensions\":[\"1\"],\"rhs_contracting_dimensions\":[\"0\"],\"lhs_batch_dimensions\":[],\"rhs_batch_dimensions\":[]},\"alpha_imag\":0,\"precision_config\":{\"operand_precision\":[\"DEFAULT\",\"DEFAULT\"]},\"epilogue\":\"DEFAULT\"}}"
})",
false));
EXPECT_FALSE(CacheFileExists());
}
TEST_F(KernelCacheTest, CacheGrowsWithNewKernels) {
EXPECT_FALSE(CacheFileExists());
EXPECT_TRUE(Run(kHloText, false));
EXPECT_EQ(CacheEntryCount(), 1);
EXPECT_TRUE(Run(R"(
ENTRY e {
p = s8[] parameter(0)
ROOT _ = s8[] multiply(p, p)
})",
false));
EXPECT_EQ(CacheEntryCount(), 2);
}
TEST_F(KernelCacheTest, AllKernelsAreCachedBecauseSplitModuleUsesRoundRobin) {
EXPECT_FALSE(CacheFileExists());
EXPECT_TRUE(Run(R"(
ENTRY e {
p = s8[] parameter(0)
n = s8[] negate(p)
a = s8[] add(n, n)
s = s8[] subtract(p, a)
ROOT _ = s8[] multiply(s, p)
})",
false));
EXPECT_EQ(CacheEntryCount(), 4);
}
TEST_F(KernelCacheTest, CachingWorksWithLoadedExecutables) {
const std::string kHloAdd1 = R"(
add1 {
p = s32[] parameter(0)
c = s32[] constant(1)
ROOT a = s32[] add(p, c)
}
ENTRY e {
p = s32[] parameter(0)
ROOT r = s32[] fusion(p), kind=kLoop, calls=add1
})";
const std::string kHloAdd2 = R"(
add2 {
p = s32[] parameter(0)
c = s32[] constant(2)
ROOT a = s32[] add(p, c)
}
ENTRY e {
p = s32[] parameter(0)
ROOT r = s32[] fusion(p), kind=kLoop, calls=add2
})";
TF_ASSERT_OK_AND_ASSIGN(se::Platform * platform,
se::PlatformManager::PlatformWithName("cuda"));
TF_ASSERT_OK_AND_ASSIGN(se::StreamExecutor * stream_exec,
platform->ExecutorForDevice(0));
Compiler* compiler = backend().compiler();
AotCompilationOptions aot_options(compiler->PlatformId());
aot_options.set_executor(stream_exec);
auto test = [this, &compiler, &aot_options](absl::string_view hlo, int input,
int expected_result) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
auto module_group = std::make_unique<HloModuleGroup>(std::move(module));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<std::unique_ptr<AotCompilationResult>> aot_results,
compiler->CompileAheadOfTime(std::move(module_group), aot_options));
TF_ASSERT_OK_AND_ASSIGN(std::string serialized_aot_result,
aot_results[0]->SerializeAsString());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<AotCompilationResult> aot_result,
compiler->LoadAotCompilationResult(serialized_aot_result));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<Executable> executable,
aot_result->LoadExecutable(compiler, aot_options.executor()));
const xla::Literal literal_input =
xla::LiteralUtil::CreateR0<int32_t>(input);
const xla::Literal literal_expected_result =
xla::LiteralUtil::CreateR0<int32_t>(expected_result);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
GetHloRunner().value()->ExecuteWithExecutable(
executable.get(), {&literal_input}));
EXPECT_TRUE(LiteralTestUtil::Equal(result, literal_expected_result));
};
test(kHloAdd1, 1, 2);
test(kHloAdd2, 1, 3);
test(kHloAdd2, 1, 3);
}
class KernelCacheTestSingleThreaded : public KernelCacheTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = KernelCacheTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_force_compilation_parallelism(1);
return debug_options;
}
};
TEST_F(KernelCacheTestSingleThreaded, CacheIsGenerated) {
EXPECT_FALSE(CacheFileExists());
EXPECT_TRUE(Run(kHloText, false));
EXPECT_EQ(CacheEntryCount(), 1);
EXPECT_TRUE(Run(kHloText, false));
EXPECT_EQ(CacheEntryCount(), 1);
}
class NoKernelCacheTest : public KernelCacheTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = KernelCacheTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_llvm_module_compilation_parallelism(false);
return debug_options;
}
};
TEST_F(NoKernelCacheTest, NoCacheWithoutCompilationParallelism) {
EXPECT_TRUE(Run(kHloText, false));
EXPECT_FALSE(CacheFileExists());
}
TEST_F(GpuCompilerTest, TestFlag_xla_gpu_unsafe_pipelined_loop_annotator) {
const char* hlo = R"(
HloModule test, entry_computation_layout={()->(s32[], s32[])}
%Body (param: (s32[], s32[])) -> (s32[], s32[]) {
%param = (s32[], s32[]) parameter(0)
%i = s32[] get-tuple-element((s32[], s32[]) %param), index=1
%one = s32[] constant(1)
%i_plus_one = s32[] add(s32[] %i, s32[] %one)
%permute = s32[] collective-permute(%i_plus_one), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,0}}
ROOT %tuple = (s32[], s32[]) tuple(s32[] %permute, s32[] %i_plus_one)
}
%Cond (param.1: (s32[], s32[])) -> pred[] {
%param.1 = (s32[], s32[]) parameter(0)
%i.1 = s32[] get-tuple-element((s32[], s32[]) %param.1), index=1
%trip_count = s32[] constant(10)
ROOT %done = pred[] compare(s32[] %i.1, s32[] %trip_count), direction=LT
}
ENTRY %test () -> (s32[], s32[]) {
%i_start = s32[] constant(0)
%p_start = s32[] constant(0)
%initial_tuple = (s32[], s32[]) tuple(s32[] %i_start, s32[] %p_start)
ROOT %while = (s32[], s32[]) while((s32[], s32[]) %initial_tuple), condition=%Cond, body=%Body, frontend_attributes={is_pipelined_while_loop="true"}
})";
const char* kExpected = R"(
)";
DebugOptions debug_options;
HloModuleConfig config;
debug_options.set_xla_gpu_unsafe_pipelined_loop_annotator(true);
config.set_debug_options(debug_options);
config.set_num_partitions(4);
config.set_use_spmd_partitioning(true);
TF_ASSERT_OK_AND_ASSIGN(auto unoptimized_module,
ParseAndReturnVerifiedModule(hlo, config));
TF_ASSERT_OK_AND_ASSIGN(auto optimized_module,
GetOptimizedModule(std::move(unoptimized_module)));
HloPrintOptions options;
options.set_print_operand_shape(false);
options.set_print_result_shape(false);
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matched,
RunFileCheck(optimized_module->ToString(options), kExpected));
EXPECT_TRUE(filecheck_matched);
}
using GpuCompilerPassTest = GpuCompilerTest;
TEST_F(GpuCompilerPassTest,
GpuCompilerRunsTritonGemmRewriterByDefaultFromAmpere) {
if (std::holds_alternative<se::RocmComputeCapability>(GpuComputeComp())) {
GTEST_SKIP() << "TritonGemmRewriter disabled for ROCm until autotuner "
<< "is included.";
}
auto cc = backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
bool is_rocm = std::holds_alternative<stream_executor::RocmComputeCapability>(
backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability());
bool expect_triton_gemm_rewriter_has_run = cc.IsAtLeastAmpere() || is_rocm;
constexpr absl::string_view constant_module = R"(
HloModule noop
ENTRY main {
ROOT constant = f32[] constant(0)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(constant_module));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(std::move(module)));
const HloModuleMetadataProto& module_metadata =
optimized_module->metadata()->proto();
bool triton_gemm_rewriter_has_run = false;
for (const HloPassMetadata& pass_metadata : module_metadata.pass_metadata()) {
triton_gemm_rewriter_has_run |=
pass_metadata.pass_name() == "triton-gemm-rewriter";
}
EXPECT_EQ(triton_gemm_rewriter_has_run, expect_triton_gemm_rewriter_has_run);
}
TEST_F(GpuCompilerPassTest,
GpuCompilerRunsCustomKernelFusionByDefaultFromVolta) {
auto cc = backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
bool expect_custom_kernel_fusion_rewriter_has_run =
cc.major == se::CudaComputeCapability::VOLTA;
constexpr absl::string_view constant_module = R"(
HloModule noop
ENTRY main {
ROOT constant = f32[] constant(0)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(constant_module));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(std::move(module)));
const HloModuleMetadataProto& module_metadata =
optimized_module->metadata()->proto();
bool custom_kernel_fusion_rewriter_has_run = false;
for (const HloPassMetadata& pass_metadata : module_metadata.pass_metadata()) {
custom_kernel_fusion_rewriter_has_run |=
pass_metadata.pass_name() == "custom-kernel-fusion-rewriter";
}
EXPECT_EQ(custom_kernel_fusion_rewriter_has_run,
expect_custom_kernel_fusion_rewriter_has_run);
}
struct PassRunIndex {
int first_run = std::numeric_limits<int>::max();
int last_run = std::numeric_limits<int>::min();
};
void VerifyPassOrder(
const absl::flat_hash_map<std::string, PassRunIndex>& passes,
absl::string_view before, absl::string_view after) {
ASSERT_TRUE(passes.contains(before))
<< "Expected pass did not run: " << before;
ASSERT_TRUE(passes.contains(after)) << "Expected pass did not run: " << after;
EXPECT_LT(passes.at(before).last_run, passes.at(after).first_run)
<< "Pass " << before << " ran after " << after;
}
absl::flat_hash_map<std::string, PassRunIndex> GatherPassOrderInformation(
const HloModule& module) {
absl::flat_hash_map<std::string, PassRunIndex> passes;
int run_index = 0;
for (const HloPassMetadata& pass_metadata :
module.metadata().proto().pass_metadata()) {
auto& pass = passes[pass_metadata.pass_name()];
pass.first_run = std::min(pass.first_run, run_index);
pass.last_run = std::max(pass.last_run, run_index);
++run_index;
}
return passes;
}
TEST_F(GpuCompilerPassTest, PassesAreRunInCorrectOrder) {
constexpr absl::string_view constant_module = R"(
ENTRY main {
ROOT constant = f32[] constant(0)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(constant_module));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(std::move(module)));
absl::flat_hash_map<std::string, PassRunIndex> passes =
GatherPassOrderInformation(*optimized_module);
VerifyPassOrder(passes, "layout-assignment",
"priority-fusion");
VerifyPassOrder(passes, "layout-assignment",
"layout_normalization");
VerifyPassOrder(passes, "host-offload-legalize",
"layout_normalization");
}
TEST_F(GpuCompilerPassTest, FusionBlockLevelRewriterRunsAfterAllFusionPasses) {
auto cc = backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
if (!cc.IsAtLeastAmpere()) {
GTEST_SKIP() << "FusionBlockLevelRewriter requires Ampere+ to run.";
}
constexpr absl::string_view constant_module = R"(
ENTRY main {
ROOT constant = f32[] constant(0)
})";
HloModuleConfig config;
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_experimental_enable_fusion_block_level_rewriter(
true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(constant_module, config));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(std::move(module)));
absl::flat_hash_map<std::string, PassRunIndex> passes =
GatherPassOrderInformation(*optimized_module);
absl::string_view kFusionBlockLevelRewriterName =
"fusion-block-level-rewriter";
for (const auto& [pass_name, _] : passes) {
if (pass_name != kFusionBlockLevelRewriterName &&
absl::StrContains(pass_name, "fusion")) {
VerifyPassOrder(passes, pass_name,
kFusionBlockLevelRewriterName);
VLOG(2) << "Verified pass order: " << pass_name << " -> "
<< kFusionBlockLevelRewriterName;
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_compiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_compiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
Subsets and Splits