ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 13
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 78
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 93
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
26ad6729-c38a-4ff8-bc05-3336cfc5b0f0 | cpp | tensorflow/tensorflow | hlo_op_profiler | third_party/xla/xla/service/gpu/model/hlo_op_profiler.cc | third_party/xla/xla/service/gpu/model/hlo_op_profiler_test.cc | #include "xla/service/gpu/model/hlo_op_profiler.h"
#include <cstdint>
#include <memory>
#include <random>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/model/hlo_op_profile.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_runner.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/test_utils.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#ifdef GOOGLE_CUDA
#include "xla/backends/profiler/gpu/cupti_collector.h"
#include "xla/backends/profiler/gpu/cupti_tracer.h"
#endif
namespace xla {
namespace gpu {
#ifdef GOOGLE_CUDA
class CuptiKernelTracer : public profiler::CuptiTraceCollector {
public:
CuptiKernelTracer()
: profiler::CuptiTraceCollector({}),
cupti_tracer_(profiler::CuptiTracer::GetCuptiTracerSingleton()) {
CHECK(cupti_tracer_->IsAvailable());
profiler::CuptiTracerOptions options;
options.cbids_selected.push_back(
CUPTI_DRIVER_TRACE_CBID_cu64GLMapBufferObject);
options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_KERNEL);
cupti_tracer_->Enable(options, this);
}
uint64_t getMedianKernelTimeNs() && {
cupti_tracer_->Disable();
if (kernel_times_ns_.empty()) {
LOG(ERROR) << "No kernel events";
return 0;
}
std::sort(kernel_times_ns_.begin(), kernel_times_ns_.end());
size_t i = kernel_times_ns_.size() / 2;
if (kernel_times_ns_.size() % 2 != 0) {
return kernel_times_ns_[i];
}
return (kernel_times_ns_[i - 1] + kernel_times_ns_[i] + 1) / 2;
}
private:
void AddEvent(profiler::CuptiTracerEvent&& event) override {
if (event.type == profiler::CuptiTracerEventType::Kernel) {
kernel_times_ns_.push_back(event.end_time_ns - event.start_time_ns);
}
VLOG(5) << "CuptiTracerEvent: " << event.name << ", "
<< event.end_time_ns - event.start_time_ns << "ns";
}
void OnEventsDropped(const std::string& reason,
uint32_t num_events) override {
LOG(WARNING) << "Dropped " << num_events << " events: " << reason;
}
void Flush() override {}
profiler::CuptiTracer* cupti_tracer_;
std::vector<uint64_t> kernel_times_ns_;
};
#else
class CuptiKernelTracer {
public:
uint64_t getMedianKernelTimeNs() && {
LOG(FATAL) << "Not built with --config=cuda";
}
};
#endif
std::unique_ptr<HloModule> HloOpProfiler::MakeModuleForMeasurements(
HloOpcode op, PrimitiveType data_type, int chain_length) {
constexpr int64_t kInputSize = 1;
const Shape shape = ShapeUtil::MakeShape(data_type, {kInputSize});
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsFromFlags());
auto module = std::make_unique<HloModule>("module", config);
HloComputation::Builder entry_builder("entry");
HloComputation::Builder fusion_builder("fusion");
HloInstruction* pf = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "pf"));
HloInstruction* last = pf;
for (int i = 0; i < chain_length; ++i) {
switch (HloOpcodeArity(op).value_or(0)) {
case 1:
last = fusion_builder.AddInstruction(
HloInstruction::CreateUnary(shape, op, last));
break;
case 2:
last = fusion_builder.AddInstruction(
HloInstruction::CreateBinary(shape, op, last, pf));
break;
default:
LOG(FATAL) << "Unsupported opcode: " << HloOpcodeString(op);
}
}
HloComputation* subcomp =
module->AddEmbeddedComputation(fusion_builder.Build());
HloInstruction* p0 = entry_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "p0"));
entry_builder.AddInstruction(HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kLoop, {p0}, subcomp));
module->AddEntryComputation(entry_builder.Build());
VLOG(9) << module->ToString();
return module;
}
absl::StatusOr<absl::Duration> HloOpProfiler::MeasureOpChainDuration(
HloOpcode op, PrimitiveType data_type, int chain_length) {
#ifndef GOOGLE_CUDA
return FailedPrecondition("Not built with --config=cuda");
#endif
std::unique_ptr<HloModule> module =
MakeModuleForMeasurements(op, data_type, chain_length);
std::minstd_rand0 engine;
std::vector<Literal> args_small = MakeFakeArguments(module.get(), &engine,
false)
.value();
std::vector<Literal> args_large = MakeFakeArguments(module.get(), &engine,
true)
.value();
const absl::Time t_compile_start = absl::Now();
TF_ASSIGN_OR_RETURN(std::unique_ptr<Executable> ex,
runner_.CreateExecutable(std::move(module),
false));
if (absl::Now() - t_compile_start > absl::Seconds(10)) {
return ResourceExhausted("Too slow compilation");
}
TF_RETURN_IF_ERROR(
runner_.ExecuteWithExecutable(ex.get(), args_small).status());
CuptiKernelTracer cupti_tracer;
for (int i = 0; i < 10; ++i) {
TF_RETURN_IF_ERROR(
runner_.ExecuteWithExecutable(ex.get(), args_small).status());
TF_RETURN_IF_ERROR(
runner_.ExecuteWithExecutable(ex.get(), args_large).status());
}
return absl::Nanoseconds(std::move(cupti_tracer).getMedianKernelTimeNs());
}
HloOpProfiler::HloOpProfiler(HloRunner& runner)
: runner_(runner),
dev_info_(runner.backend().stream_executors()[0]->GetDeviceDescription()),
min_duration_(2 * MeasureOpChainDuration(HloOpcode::kNegate, F32, 0)
.value_or(absl::ZeroDuration())) {
VLOG(3) << "Minimum kernel duration: " << min_duration_;
CHECK_GT(min_duration_, absl::ZeroDuration())
<< "Failed to measure kernel runtime";
}
absl::StatusOr<HloInstructionProfile> HloOpProfiler::MeasureClockCyclesPerOp(
HloOpcode op, PrimitiveType data_type) {
VLOG(2) << "Measuring " << HloOpcodeString(op) << " "
<< primitive_util::LowercasePrimitiveTypeName(data_type);
constexpr int kMinOpChainLength = 16;
constexpr int kMaxOpChainLength = 8192;
absl::Duration duration = absl::ZeroDuration();
int chain_length = kMinOpChainLength;
do {
if (chain_length * 2 > kMaxOpChainLength) {
return FailedPrecondition("%s is too fast to measure",
HloOpcodeString(op));
}
TF_ASSIGN_OR_RETURN(duration,
MeasureOpChainDuration(op, data_type, chain_length));
VLOG(3) << chain_length << "\t" << duration;
chain_length *= 2;
} while (duration < min_duration_);
TF_ASSIGN_OR_RETURN(absl::Duration double_duration,
MeasureOpChainDuration(op, data_type, chain_length));
VLOG(3) << chain_length << "\t" << double_duration;
const absl::Duration time_per_op =
(double_duration - duration) * 2.0 / chain_length;
const float clocks_per_nanosecond =
dev_info_.clock_rate_ghz() * 2;
const int64_t n_clocks =
absl::ToInt64Nanoseconds(time_per_op) * clocks_per_nanosecond;
VLOG(3) << time_per_op << " = " << n_clocks << " clock cycles";
HloInstructionProfile profile;
profile.mutable_instruction()->mutable_opcode()->assign(HloOpcodeString(op));
profile.mutable_instruction()->mutable_shape()->set_element_type(data_type);
profile.set_clock_cycles(n_clocks);
return profile;
}
}
} | #include "xla/service/gpu/model/hlo_op_profiler.h"
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
using HloOpProfilerTest = HloTestBase;
TEST_F(HloOpProfilerTest, BasicMeasurementsAreCorrect) {
#ifndef GOOGLE_CUDA
GTEST_SKIP() << "Not built with --config=cuda";
#endif
HloOpProfiler profiler(test_runner_);
EXPECT_GT(profiler.MeasureClockCyclesPerOp(HloOpcode::kAdd, F32)
.value()
.clock_cycles(),
0);
EXPECT_GT(profiler.MeasureClockCyclesPerOp(HloOpcode::kDivide, F64)
.value()
.clock_cycles(),
300);
EXPECT_GT(profiler.MeasureClockCyclesPerOp(HloOpcode::kSqrt, C128)
.value()
.clock_cycles(),
1000);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/hlo_op_profiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/hlo_op_profiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6e92a8e9-0fdc-4b80-93d8-bd223b973557 | cpp | tensorflow/tensorflow | gpu_cost_model_stats_collection | third_party/xla/xla/service/gpu/model/gpu_cost_model_stats_collection.cc | third_party/xla/xla/service/gpu/model/gpu_cost_model_stats_collection_test.cc | #include "xla/service/gpu/model/gpu_cost_model_stats_collection.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/model/gpu_performance_model.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "tsl/platform/status.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> GpuCostModelStatsCollection::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
for (auto* computation : module->MakeComputationPostOrder()) {
TF_CHECK_OK(computation->Accept(&cost_analysis_));
for (auto* fusion_instr : computation->instructions()) {
if (fusion_instr->opcode() != HloOpcode::kFusion) continue;
GpuPerformanceModel::RecordEstimatedRunTime(
fusion_instr, device_info_, &cost_analysis_,
GpuPerformanceModelOptions::ForModule(module));
}
}
return false;
}
}
} | #include "xla/service/gpu/model/gpu_cost_model_stats_collection.h"
#include <stdint.h>
#include <memory>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
class GpuCostModelStatsCollectionTest : public HloTestBase {
HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
public:
GpuCostModelStatsCollection cost_model_stats_{
TestGpuDeviceInfo::RTXA6000DeviceInfo(),
GpuHloCostAnalysis::Options{ShapeSizeBytesFunction(),
{},
{},
true}};
};
TEST_F(GpuCostModelStatsCollectionTest, FusinInEntryComputation) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
log {
p = f32[16384]{0} parameter(0)
ROOT l = f32[16384]{0} log(p)
}
ENTRY main {
%p0 = f32[16384] parameter(0)
ROOT %res = f32[16384]{0} fusion(p0), kind=kInput, calls=log
}
)"));
EXPECT_FALSE(cost_model_stats_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
root->backend_config<GpuBackendConfig>());
const FusionBackendConfig& backend_config =
gpu_config.fusion_backend_config();
EXPECT_TRUE(backend_config.has_reification_cost());
EXPECT_GT(backend_config.reification_cost().end_to_end_cycles(), 0);
}
TEST_F(GpuCostModelStatsCollectionTest, FusinInWhileComputation) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
cond {
p = f32[16384]{0} parameter(0)
ROOT %constant.2 = pred[] constant(true)
}
log {
p = f32[16384]{0} parameter(0)
ROOT l = f32[16384]{0} log(p)
}
loop {
%p0 = f32[16384] parameter(0)
ROOT %res = f32[16384]{0} fusion(p0), kind=kInput, calls=log
}
ENTRY main {
%p0 = f32[16384] parameter(0)
ROOT %while = f32[16384] while(%p0), body=%loop, condition=%cond
})"));
EXPECT_FALSE(cost_model_stats_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()
->root_instruction()
->while_body()
->root_instruction();
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
root->backend_config<GpuBackendConfig>());
const FusionBackendConfig& backend_config =
gpu_config.fusion_backend_config();
EXPECT_TRUE(backend_config.has_reification_cost());
EXPECT_GT(backend_config.reification_cost().end_to_end_cycles(), 0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/gpu_cost_model_stats_collection.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/gpu_cost_model_stats_collection_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
170f268f-ba87-4ee9-b093-b6449fd5a01a | cpp | tensorflow/tensorflow | gpu_performance_model_base | third_party/xla/xla/service/gpu/model/gpu_performance_model_base.cc | third_party/xla/xla/service/gpu/model/gpu_performance_model_base_test.cc | #include "xla/service/gpu/model/gpu_performance_model_base.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/fusions/fusions.h"
#include "xla/service/gpu/fusions/triton.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
bool FusionUsesParameterElementwiseFromRoot(
const HloInstruction* fusion, int parameter_index,
const GpuHloCostAnalysis* cost_analysis) {
return cost_analysis->CommonElementwiseUtilization(
fusion->fused_parameter(parameter_index),
fusion->fused_expression_root()) == 1.f;
}
int GetCoalescingWasteFactor(PrimitiveType element_type,
const se::DeviceDescription& gpu_device_info) {
int64_t element_size_bytes =
element_type == PrimitiveType::TUPLE ||
element_type == PrimitiveType::TOKEN
? 4
: ShapeUtil::ByteSizeOfPrimitiveType(element_type);
return gpu_device_info.dram_to_l2_transaction_size_bytes() /
element_size_bytes;
}
float AdjustBandwidth(const se::DeviceDescription& gpu_device_info,
float bandwidth, int64_t num_blocks) {
float per_block_bandwidth = gpu_device_info.clock_rate_ghz() * 1.0e9f *
gpu_device_info.memory_transactions_per_clock();
float max_bandwidth = num_blocks * per_block_bandwidth;
return std::min(bandwidth, max_bandwidth);
}
}
std::optional<EstimateRunTimeData> GpuPerformanceModelCache::Get(
const HloInstruction& instruction) {
auto it = instruction_runtime_data_.find(&instruction);
if (it != instruction_runtime_data_.end()) {
return it->second;
}
return std::nullopt;
}
std::optional<absl::Duration> GpuPerformanceModelCache::Get(
const HloInstruction& producer, const HloInstruction& consumer) {
absl::MutexLock lock(&mutex_);
auto it = fusion_runtime_data_.find(&producer);
if (it != fusion_runtime_data_.end()) {
auto jt = it->second.find(&consumer);
if (jt != it->second.end()) {
return jt->second;
}
}
return std::nullopt;
}
const absl::flat_hash_map<const HloInstruction*, absl::Duration>&
GpuPerformanceModelCache::GetAllConsumers(const HloInstruction& producer) {
return fusion_runtime_data_[&producer];
}
bool GpuPerformanceModelCache::ContainsConsumers(
const HloInstruction& producer) {
return fusion_runtime_data_.contains(&producer);
}
void GpuPerformanceModelCache::Set(const HloInstruction& instruction,
const EstimateRunTimeData& runtime_data) {
instruction_runtime_data_[&instruction] = runtime_data;
}
void GpuPerformanceModelCache::Set(const HloInstruction& producer,
const HloInstruction& consumer,
absl::Duration runtime) {
absl::MutexLock lock(&mutex_);
fusion_runtime_data_[&producer][&consumer] = runtime;
}
void GpuPerformanceModelCache::Invalidate(const HloInstruction& instruction) {
instruction_runtime_data_.erase(&instruction);
fusion_runtime_data_.erase(&instruction);
for (auto* operand : instruction.operands()) {
if (operand->opcode() == HloOpcode::kGetTupleElement) {
operand = operand->mutable_operand(0);
}
auto it = fusion_runtime_data_.find(operand);
if (it != fusion_runtime_data_.end()) {
it->second.erase(&instruction);
}
}
}
LaunchDimensions GpuPerformanceModelBase::EstimateFusionLaunchDimensions(
const HloFusionAnalysis& fusion_analysis) {
auto emitter =
GetFusionEmitter(PreBufferAssignmentFusionInfo{fusion_analysis});
if (const auto* kernel_emitter =
dynamic_cast<const KernelFusionInterface*>(emitter.get())) {
return kernel_emitter->launch_dimensions();
}
if (const auto* triton_emitter =
dynamic_cast<const TritonFusion*>(emitter.get())) {
if (auto launch_config = triton_emitter->launch_config()) {
return launch_config->launch_dimensions;
}
}
VLOG(5) << "Using fallback launch dimensions estimate for "
<< fusion_analysis.fusion().ToString();
int64_t num_threads_per_block = 128;
int64_t estimated_num_threads =
ShapeUtil::ElementsInRecursive(fusion_analysis.fusion_root(0).shape());
int64_t num_blocks =
CeilOfRatio(estimated_num_threads, num_threads_per_block);
return LaunchDimensions(num_blocks, num_threads_per_block);
}
int64_t GpuPerformanceModelBase::GetOperandBytesAccessed(
const GpuHloCostAnalysis* cost_analysis, const HloInstruction* instr,
const HloInstruction* operand) {
if (!instr->IsUserOf(operand)) {
return 0;
}
return cost_analysis->operand_bytes_accessed(*instr,
instr->operand_index(operand));
}
float GpuPerformanceModelBase::GetOperandUtilization(
const GpuHloCostAnalysis* cost_analysis, const HloInstruction* instr,
const HloInstruction* operand) {
if (operand->IsMultiOutputFusion()) {
float res = 0.f;
for (int64_t i = 0; i < instr->operand_count(); ++i) {
if (instr->operand(i)->opcode() == HloOpcode::kGetTupleElement &&
instr->operand(i)->operand(0) == operand) {
res += cost_analysis->operand_utilization(*instr, i);
}
}
return res;
}
if (!instr->IsUserOf(operand)) {
return 0.f;
}
return cost_analysis->operand_utilization(*instr,
instr->operand_index(operand));
}
float GpuPerformanceModelBase::GetCommonUtilization(
const GpuHloCostAnalysis* cost_analysis, const HloInstruction* producer,
int64_t producer_idx_of_operand, const HloInstruction* consumer) {
const auto* operand = producer->operand(producer_idx_of_operand);
if (!consumer || !consumer->IsUserOf(operand)) {
return 0.f;
}
if (producer->IsElementwise() ||
(producer->opcode() == HloOpcode::kFusion &&
FusionUsesParameterElementwiseFromRoot(producer, producer_idx_of_operand,
cost_analysis))) {
if (consumer->opcode() == HloOpcode::kFusion) {
int64_t consumer_idx_of_common_operand = consumer->operand_index(operand);
float res = 0.f;
std::vector<int64_t> consumer_indices_of_producer;
if (producer->IsMultiOutputFusion()) {
for (int64_t i = 0; i < consumer->operand_count(); ++i) {
if (consumer->operand(i)->opcode() == HloOpcode::kGetTupleElement &&
consumer->operand(i)->operand(0) == producer) {
consumer_indices_of_producer.push_back(i);
}
}
} else {
consumer_indices_of_producer.push_back(
consumer->operand_index(producer));
}
for (int64_t consumer_idx_of_producer : consumer_indices_of_producer) {
res += cost_analysis->CommonElementwiseUtilization(
consumer->fused_parameter(consumer_idx_of_common_operand),
consumer->fused_parameter(consumer_idx_of_producer));
}
return res;
} else if (consumer->IsElementwise()) {
return 1.f;
}
}
return 0.f;
}
int64_t GpuPerformanceModelBase::GetSharedOperandBytesAccessed(
const GpuHloCostAnalysis* cost_analysis, const HloInstruction* producer,
const HloInstruction* consumer, const HloInstruction* operand) {
float producer_utilization_by_consumer =
GetOperandUtilization(cost_analysis, consumer, producer);
int64_t bytes_accessed_by_producer =
GetOperandBytesAccessed(cost_analysis, producer, operand);
int64_t bytes_accessed_by_consumer =
GetOperandBytesAccessed(cost_analysis, consumer, operand);
float common_utilization =
producer->IsUserOf(operand)
? GetCommonUtilization(cost_analysis, producer,
producer->operand_index(operand), consumer)
: 0.f;
int64_t operand_size = cost_analysis->GetShapeSize(operand->shape());
int64_t common_bytes_accessed =
std::llround(operand_size * common_utilization);
return std::llround(bytes_accessed_by_producer *
producer_utilization_by_consumer) +
bytes_accessed_by_consumer - common_bytes_accessed;
}
absl::Duration GpuPerformanceModelBase::ReadTime(
const se::DeviceDescription& gpu_device_info, int64_t num_blocks,
int64_t n_bytes_net, int64_t n_bytes_total) {
float bandwidth = gpu_device_info.memory_bandwidth();
if (n_bytes_net < gpu_device_info.l2_cache_size()) {
bandwidth *= kL2CacheSpeedup;
if (n_bytes_net <
gpu_device_info.l1_cache_size_per_SM() * gpu_device_info.core_count()) {
bandwidth *= kL1CacheSpeedup;
}
}
bandwidth = AdjustBandwidth(gpu_device_info, bandwidth, num_blocks);
return absl::Seconds(n_bytes_total / bandwidth);
}
absl::Duration GpuPerformanceModelBase::ReadTimeWithDRAMHeuristic(
const se::DeviceDescription& gpu_device_info, int64_t num_blocks,
int64_t n_bytes_net, int64_t n_bytes_total, PrimitiveType element_type,
bool coalesced) {
int waste_factor =
coalesced ? 1 : GetCoalescingWasteFactor(element_type, gpu_device_info);
float dram_bandwidth = gpu_device_info.memory_bandwidth() / waste_factor;
float rest_bandwidth = gpu_device_info.memory_bandwidth();
if (n_bytes_net < gpu_device_info.l2_cache_size()) {
rest_bandwidth *= kL2CacheSpeedup;
if (n_bytes_net <
gpu_device_info.l1_cache_size_per_SM() * gpu_device_info.core_count()) {
rest_bandwidth *= kL1CacheSpeedup;
}
} else {
rest_bandwidth /= waste_factor;
}
dram_bandwidth = AdjustBandwidth(gpu_device_info, dram_bandwidth, num_blocks);
rest_bandwidth = AdjustBandwidth(gpu_device_info, rest_bandwidth, num_blocks);
int64_t n_bytes_read_dram = std::min(n_bytes_net, n_bytes_total);
int64_t n_bytes_read_cache = n_bytes_total - n_bytes_read_dram;
return absl::Seconds(n_bytes_read_dram / dram_bandwidth) +
absl::Seconds(n_bytes_read_cache / rest_bandwidth);
}
absl::Duration GpuPerformanceModelBase::ProducerInputAccessTime(
const GpuHloCostAnalysis* cost_analysis,
const se::DeviceDescription& gpu_device_info, int64_t num_blocks,
const HloInstruction* producer, const HloFusionAnalysis& fusion_analysis,
const GpuPerformanceModelOptions& config,
const HloInstruction* fused_consumer) {
absl::Duration ret = absl::ZeroDuration();
float producer_output_utilization =
fused_consumer
? GetOperandUtilization(cost_analysis, fused_consumer, producer)
: 1.f;
for (int i = 0; i < producer->operand_count(); ++i) {
int64_t operand_bytes_accessed =
cost_analysis->operand_bytes_accessed(*producer, i);
float operand_utilization =
cost_analysis->operand_utilization(*producer, i);
int64_t n_bytes_net = std::llround(operand_bytes_accessed /
std::max(operand_utilization, 1.0f));
float common_utilization = GetCommonUtilization(
cost_analysis, producer, i, fused_consumer);
CHECK_LE(common_utilization, producer_output_utilization);
float n_bytes_total = operand_bytes_accessed *
(producer_output_utilization - common_utilization);
ret += ReadTime(gpu_device_info, num_blocks, n_bytes_net, n_bytes_total);
}
return ret;
}
absl::Duration GpuPerformanceModelBase::WriteTime(
const se::DeviceDescription& gpu_device_info, int64_t bytes_written) {
return absl::Seconds(1.0f * bytes_written /
gpu_device_info.memory_bandwidth());
}
absl::Duration GpuPerformanceModelBase::ComputeTime(
const se::DeviceDescription& gpu_device_info, int64_t flops,
int64_t num_blocks, int64_t num_threads_per_block) {
int64_t n_active_fpus_per_core =
std::min<int64_t>(num_threads_per_block, gpu_device_info.fpus_per_core());
int64_t n_active_core =
std::min<int64_t>(num_blocks, gpu_device_info.core_count());
int64_t fpu_count = n_active_core * n_active_fpus_per_core;
int64_t flop_per_ns_per_fpu = gpu_device_info.clock_rate_ghz() * 2;
int64_t flop_per_ns_effective = flop_per_ns_per_fpu * fpu_count;
return absl::Nanoseconds(1.0f * flops / flop_per_ns_effective);
}
absl::Duration GpuPerformanceModelBase::CombineComputeAndMemoryAccessTime(
absl::Duration compute_time, absl::Duration memory_access_time,
const GpuPerformanceModelOptions& config) {
return compute_time + memory_access_time -
std::min(compute_time, memory_access_time) *
config.memory_compute_parallelism;
}
void GpuPerformanceModelBase::VLogOperandRead(const HloInstruction* operand,
int64_t n_bytes_total,
int64_t n_bytes_net,
bool coalesced) {
VLOG(8) << "operand " << operand->name()
<< ", n_bytes_total: " << n_bytes_total
<< ", n_bytes_net: " << n_bytes_net << ", coalesced: " << coalesced;
}
}
} | #include "xla/service/gpu/model/gpu_performance_model_base.h"
#include <cstdint>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class GpuPerformanceModelBaseTest : public HloTestBase {
public:
GpuHloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
GpuHloCostAnalysis::Options options_{ShapeSizeBytesFunction(),
{},
{},
true};
se::DeviceDescription device_info_{TestGpuDeviceInfo::RTXA6000DeviceInfo()};
GpuHloCostAnalysis analysis_{options_, device_info_};
GpuPerformanceModelBaseTest() : HloTestBase() {}
};
TEST_F(GpuPerformanceModelBaseTest, SharedOperandBytesAccessed_InPlaceDUS) {
absl::string_view hlo_string = R"(
HloModule m
ENTRY entry_computation {
param_0 = f32[8,16] parameter(0)
param_1 = f32[4,4] parameter(1)
c_0 = s32[] constant(0)
log = f32[4,4] log(param_1)
ROOT dynamic-update-slice = f32[8,16] dynamic-update-slice(param_0, log, c_0, c_0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
ASSERT_IS_OK(computation->Accept(&analysis_));
auto dus_consumer = computation->root_instruction();
auto log_producer = dus_consumer->mutable_operand(1);
auto get_shared_operand_bytes_accessed = [&](const HloInstruction* operand) {
return GpuPerformanceModelBase::GetSharedOperandBytesAccessed(
&analysis_, log_producer, dus_consumer, operand);
};
EXPECT_EQ(get_shared_operand_bytes_accessed(dus_consumer->operand(0)), 0);
EXPECT_EQ(get_shared_operand_bytes_accessed(log_producer->operand(0)), 64);
}
TEST_F(GpuPerformanceModelBaseTest, SharedOperandBytesAccessed_DUS) {
absl::string_view hlo_string = R"(
HloModule m
ENTRY entry_computation {
param_0 = f32[8,16] parameter(0)
param_1 = f32[4,4] parameter(1)
c_0 = s32[] constant(0)
log = f32[8,16] log(param_0)
ROOT dynamic-update-slice = f32[8,16] dynamic-update-slice(log, param_1, c_0, c_0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
ASSERT_IS_OK(computation->Accept(&analysis_));
auto dus_consumer = computation->root_instruction();
auto log_producer = dus_consumer->mutable_operand(0);
auto get_shared_operand_bytes_accessed = [&](const HloInstruction* operand) {
return GpuPerformanceModelBase::GetSharedOperandBytesAccessed(
&analysis_, log_producer, dus_consumer, operand);
};
EXPECT_EQ(get_shared_operand_bytes_accessed(dus_consumer->operand(1)), 64);
EXPECT_EQ(get_shared_operand_bytes_accessed(log_producer->operand(0)), 448);
}
TEST_F(GpuPerformanceModelBaseTest,
ReduceBroadcastedDim_IncorrectBytesAccessed) {
absl::string_view hlo_string = R"(
HloModule m
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
f1 {
p0 = f32[128] parameter(0)
c0 = f32[] constant(0)
broadcast = f32[128,256] broadcast(p0), dimensions={0}
ROOT reduce = f32[128] reduce(broadcast, c0), dimensions={1}, to_apply=add
}
ENTRY entry_computation {
param_0 = f32[128] parameter(0)
param_1 = f32[4,4] parameter(1)
ROOT fusion = f32[128] fusion(param_0), kind=kLoop, calls=f1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
ASSERT_IS_OK(computation->Accept(&analysis_));
auto root = computation->root_instruction();
EXPECT_EQ(GpuPerformanceModelBase::GetOperandBytesAccessed(&analysis_, root,
root->operand(0)),
131072);
}
TEST_F(GpuPerformanceModelBaseTest, ElementwiseBitcast_IncorrectBytesAccessed) {
absl::string_view hlo_string = R"(
HloModule m
f1 {
p0 = f32[128] parameter(0)
bitcast.1 = f32[8,16] bitcast(p0)
log = f32[128] log(p0)
bitcast.2 = f32[8,16] bitcast(log)
ROOT add = f32[8,16] add(bitcast.1, bitcast.2)
}
ENTRY entry_computation {
param_0 = f32[128] parameter(0)
ROOT fusion = f32[8,16] fusion(param_0), kind=kLoop, calls=f1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
ASSERT_IS_OK(computation->Accept(&analysis_));
auto root = computation->root_instruction();
EXPECT_EQ(GpuPerformanceModelBase::GetOperandBytesAccessed(&analysis_, root,
root->operand(0)),
1024);
}
TEST_F(GpuPerformanceModelBaseTest, EstimateFusionLaunchDimensions_LoopFusion) {
absl::string_view hlo_string = R"(
HloModule m
f1 {
p0 = f32[8,16,128] parameter(0)
log = f32[8,16,128] log(p0)
ROOT add = f32[8,16,128] add(p0, log)
}
ENTRY entry_computation {
param_0 = f32[8,16,128] parameter(0)
ROOT fusion = f32[8,16,128] fusion(param_0), kind=kLoop, calls=f1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto fusion_analysis = HloFusionAnalysis::Create(
*module->entry_computation()->root_instruction(), device_info_);
auto launch_dimensions =
GpuPerformanceModelBase::EstimateFusionLaunchDimensions(fusion_analysis);
EXPECT_EQ(launch_dimensions.num_blocks(), 128);
EXPECT_EQ(launch_dimensions.num_threads_per_block(), 128);
}
TEST_F(GpuPerformanceModelBaseTest,
EstimateFusionLaunchDimensions_TritonSoftMaxFusion) {
absl::string_view hlo_string = R"(
max {
p1 = f32[] parameter(1)
p0 = f32[] parameter(0)
ROOT m = f32[] maximum(p0, p1)
}
triton_softmax_computation {
p0 = f32[16,970] parameter(0)
constant = f32[] constant(-inf)
reduce = f32[16] reduce(p0, constant), dimensions={1}, to_apply=max
broadcast = f32[16,970] broadcast(reduce), dimensions={0}
ROOT subtract = f32[16,970] subtract(p0, broadcast)
}
ENTRY e {
p0 = f32[16,970]{1,0} parameter(0)
ROOT r = f32[16,970]{1,0} fusion(p0), kind=kCustom,
calls=triton_softmax_computation,
backend_config={"fusion_backend_config": {kind: "__triton","block_level_fusion_config":{"output_tile_sizes":["1","970"],"num_warps":"2"}}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto fusion_analysis = HloFusionAnalysis::Create(
*module->entry_computation()->root_instruction(), device_info_);
auto launch_dimensions =
GpuPerformanceModelBase::EstimateFusionLaunchDimensions(fusion_analysis);
EXPECT_EQ(launch_dimensions.num_blocks(), 16);
EXPECT_EQ(launch_dimensions.num_threads_per_block(), 64);
}
TEST_F(GpuPerformanceModelBaseTest,
EstimateFusionLaunchDimensions_CudnnFusion) {
absl::string_view hlo_string = R"(
fusion1 {
p0 = f32[32,96] parameter(0)
p1 = f32[96,256] parameter(1)
ROOT r = f32[32,256] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f32[32,96] parameter(0)
p1 = f32[96,256] parameter(1)
ROOT _ = f32[32,256] fusion(p0, p1), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto fusion_analysis = HloFusionAnalysis::Create(
*module->entry_computation()->root_instruction(), device_info_);
auto launch_dimensions =
GpuPerformanceModelBase::EstimateFusionLaunchDimensions(fusion_analysis);
EXPECT_EQ(launch_dimensions.num_blocks(), 64);
EXPECT_EQ(launch_dimensions.num_threads_per_block(), 128);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/gpu_performance_model_base.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/gpu_performance_model_base_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c981c3cc-c1bc-48ff-92ea-e891c374416e | cpp | tensorflow/tensorflow | indexing_map_serialization | third_party/xla/xla/service/gpu/model/indexing_map_serialization.cc | third_party/xla/xla/service/gpu/model/indexing_map_serialization_test.cc | #include "xla/service/gpu/model/indexing_map_serialization.h"
#include <algorithm>
#include <cctype>
#include <cstdint>
#include <optional>
#include <ostream>
#include <sstream>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/AsmParser/AsmParser.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "xla/service/gpu/model/indexing_map.h"
namespace xla {
namespace gpu {
namespace {
using llvm::SmallVector;
using llvm::SmallVectorImpl;
using llvm::StringRef;
using mlir::AffineBinaryOpExpr;
using mlir::AffineConstantExpr;
using mlir::AffineDimExpr;
using mlir::AffineExpr;
using mlir::AffineExprKind;
using mlir::AffineMap;
using mlir::AffineMapAttr;
using mlir::AffineSymbolExpr;
using mlir::ArrayRef;
using mlir::MLIRContext;
enum class Delimeter { kParen, kBracket };
struct Token {
enum class Kind {
kVarName,
kIntLiteral,
kBoolLiteral,
kKeywordDomain,
kKeywordIn,
kKeywordIsSimplified,
kPlus,
kMinus,
kTimes,
kFloorDiv,
kMod,
kArrow,
kLParen,
kRParen,
kLBracket,
kRBracket,
kComma,
kColon,
kError,
kEOF
};
StringRef spelling;
Token::Kind kind;
};
Token::Kind GetSingleCharTokenType(char c) {
switch (c) {
case '(':
return Token::Kind::kLParen;
case ')':
return Token::Kind::kRParen;
case '[':
return Token::Kind::kLBracket;
case ']':
return Token::Kind::kRBracket;
case ',':
return Token::Kind::kComma;
case ':':
return Token::Kind::kColon;
case '+':
return Token::Kind::kPlus;
case '-':
return Token::Kind::kMinus;
case '*':
return Token::Kind::kTimes;
default:
return Token::Kind::kError;
}
}
bool IsPartOfAffineExpr(Token token) {
return token.kind == Token::Kind::kVarName ||
token.kind == Token::Kind::kIntLiteral ||
token.kind == Token::Kind::kPlus ||
token.kind == Token::Kind::kMinus ||
token.kind == Token::Kind::kTimes ||
token.kind == Token::Kind::kFloorDiv ||
token.kind == Token::Kind::kMod;
}
class Parser {
public:
explicit Parser(llvm::StringRef input) : input_(input), it_(input.begin()) {
current_token_ = GetNextTokenImpl();
}
const Token& GetCurrentToken() const { return current_token_; };
void Advance() {
if (current_token_.kind == Token::Kind::kError ||
current_token_.kind == Token::Kind::kEOF) {
return;
}
current_token_ = GetNextTokenImpl();
}
Token GetNextToken() {
Advance();
return current_token_;
}
bool ConsumeToken(Token::Kind kind);
bool ParseVarName(std::string* var_name);
bool ParseInt(int64_t* value);
bool ParseBool(bool* boolean);
bool ParseInterval(Interval* interval);
bool ParseAffineExprString(std::string* affine_expr_str);
bool ParseCommaSeparatedVarList(
Delimeter delimeter,
llvm::function_ref<bool(Parser& parser)> parse_element_fn);
private:
void ConsumeWhitespace() {
while (it_ != input_.end() && std::isspace(*it_)) ++it_;
}
Token GetNextTokenImpl();
llvm::StringRef input_;
llvm::StringRef::iterator it_;
Token current_token_;
};
bool Parser::ParseVarName(std::string* var_name) {
if (current_token_.kind != Token::Kind::kVarName) {
llvm::errs() << "Expected var name, got: " << current_token_.spelling
<< "\n";
return false;
}
*var_name = current_token_.spelling.str();
Advance();
return true;
}
bool Parser::ParseInt(int64_t* value) {
int val;
if (current_token_.kind != Token::Kind::kIntLiteral ||
current_token_.spelling.getAsInteger(0, val)) {
llvm::errs() << "Expected int literal, got: " << current_token_.spelling
<< "\n";
return false;
}
*value = static_cast<int64_t>(val);
Advance();
return true;
}
bool Parser::ParseBool(bool* boolean) {
if (current_token_.kind != Token::Kind::kBoolLiteral) {
llvm::errs() << "Expected bool literal, got: " << current_token_.spelling
<< "\n";
return false;
}
*boolean = current_token_.spelling.compare("true") == 0;
Advance();
return true;
}
bool Parser::ParseInterval(Interval* interval) {
if (!ConsumeToken(Token::Kind::kLBracket) || !ParseInt(&interval->lower) ||
!ConsumeToken(Token::Kind::kComma) || !ParseInt(&interval->upper) ||
!ConsumeToken(Token::Kind::kRBracket)) {
return false;
}
return interval;
}
bool Parser::ParseAffineExprString(std::string* affine_expr_str) {
unsigned num_unmatched_parens = 0;
while (true) {
if (IsPartOfAffineExpr(current_token_)) {
affine_expr_str->append(current_token_.spelling);
affine_expr_str->push_back(' ');
Advance();
continue;
}
if (ConsumeToken(Token::Kind::kLParen)) {
affine_expr_str->push_back('(');
++num_unmatched_parens;
continue;
}
if (current_token_.kind == Token::Kind::kRParen &&
num_unmatched_parens > 0) {
affine_expr_str->push_back(')');
--num_unmatched_parens;
Advance();
continue;
}
break;
}
return current_token_.kind != Token::Kind::kError;
}
bool Parser::ParseCommaSeparatedVarList(
Delimeter delimeter,
llvm::function_ref<bool(Parser& parser)> parse_element_fn) {
auto left_delimiter = delimeter == Delimeter::kParen ? Token::Kind::kLParen
: Token::Kind::kLBracket;
auto right_delimiter = delimeter == Delimeter::kParen
? Token::Kind::kRParen
: Token::Kind::kRBracket;
if (!ConsumeToken(left_delimiter)) {
return false;
}
if (ConsumeToken(right_delimiter)) {
return true;
}
std::string element;
while (parse_element_fn(*this)) {
if (ConsumeToken(Token::Kind::kComma)) continue;
return ConsumeToken(right_delimiter);
}
return false;
}
bool Parser::ConsumeToken(Token::Kind kind) {
Token token = GetCurrentToken();
if (token.kind != kind) {
return false;
}
GetNextToken();
return true;
}
Token Parser::GetNextTokenImpl() {
ConsumeWhitespace();
if (it_ == input_.end()) {
return Token{"", Token::Kind::kEOF};
}
auto start = it_;
if (std::isalpha(*it_)) {
while (it_ != input_.end() &&
(std::isalpha(*it_) || std::isdigit(*it_) || *it_ == '_')) {
++it_;
}
StringRef spelling = input_.substr(start - input_.data(), it_ - start);
if (spelling == "true" || spelling == "false") {
return Token{spelling, Token::Kind::kBoolLiteral};
}
if (spelling == "domain") {
return Token{spelling, Token::Kind::kKeywordDomain};
}
if (spelling == "in") {
return Token{spelling, Token::Kind::kKeywordIn};
}
if (spelling == "mod") {
return Token{spelling, Token::Kind::kMod};
}
if (spelling == "floorDiv") {
return Token{spelling, Token::Kind::kFloorDiv};
}
return Token{spelling, Token::Kind::kVarName};
}
if (std::isdigit(*it_)) {
auto start = it_;
while (it_ != input_.end() && std::isdigit(*it_)) {
++it_;
}
StringRef spelling = input_.substr(start - input_.data(), it_ - start);
return Token{spelling, Token::Kind::kIntLiteral};
}
if (*it_ == '-') {
++it_;
if (it_ != input_.end()) {
if (*it_ == '>') {
++it_;
return Token{"->", Token::Kind::kArrow};
} else if (std::isdigit(*it_)) {
auto start = it_ - 1;
while (it_ != input_.end() && std::isdigit(*it_)) {
++it_;
}
StringRef spelling = input_.substr(start - input_.data(), it_ - start);
return Token{spelling, Token::Kind::kIntLiteral};
} else {
return Token{"-", Token::Kind::kMinus};
}
}
}
StringRef spelling = input_.substr(start - input_.data(), 1);
return Token{spelling, GetSingleCharTokenType(*(it_++))};
}
bool ParseVarNames(Parser& parser, Delimeter delimeter,
SmallVectorImpl<std::string>& var_names) {
auto parse_var_name_fn = [&](Parser& parser) {
std::string var_name;
if (!parser.ParseVarName(&var_name)) {
return false;
}
var_names.push_back(var_name);
return true;
};
return parser.ParseCommaSeparatedVarList(delimeter, parse_var_name_fn);
}
bool ParseAffineMapResults(Parser& parser,
SmallVectorImpl<std::string>& affine_expr_strs) {
auto parse_var_name_fn = [&](Parser& parser) {
std::string affine_expr_str;
if (!parser.ParseAffineExprString(&affine_expr_str)) {
return false;
}
affine_expr_strs.push_back(affine_expr_str);
return true;
};
return parser.ParseCommaSeparatedVarList(Delimeter::kParen,
parse_var_name_fn);
}
bool ParseAffineExprsWithMLIR(ArrayRef<std::string> dim_var_names,
ArrayRef<std::string> symbol_var_names,
ArrayRef<std::string> affine_expr_strings,
MLIRContext* context,
SmallVectorImpl<AffineExpr>& affine_exprs) {
std::stringstream ss;
ss << "affine_map<(" << absl::StrJoin(dim_var_names, ", ") << ") ";
if (!symbol_var_names.empty()) {
ss << '[' << absl::StrJoin(symbol_var_names, ", ") << "] ";
}
ss << " -> (" << absl::StrJoin(affine_expr_strings, ", ") << ")>";
auto affine_map_attr = mlir::parseAttribute(ss.str(), context);
if (!affine_map_attr) {
llvm::errs() << "Failed to parse affine map: " << ss.str() << "\n";
return false;
}
AffineMap affine_map = mlir::cast<AffineMapAttr>(affine_map_attr).getValue();
affine_exprs = llvm::to_vector(affine_map.getResults());
return true;
}
std::string GetVarName(int64_t id, std::string_view name,
std::string_view prefix) {
if (!name.empty()) {
return std::string(name);
}
return absl::StrFormat("%s%d", prefix, id);
}
std::string GetDimVarName(int64_t dim_id, std::string_view dim_name = "") {
return GetVarName(dim_id, dim_name, "d");
}
std::string GetRangeVarName(int64_t range_id,
std::string_view range_name = "") {
return GetVarName(range_id, range_name, "s");
}
std::string GetRTVarName(int64_t rt_id, std::string_view rt_name = "") {
return GetVarName(rt_id, rt_name, "rt");
}
std::string GetAffineSymbolName(
int64_t id, absl::Span<const std::string> symbol_names = {}) {
if (id < symbol_names.size()) {
const auto& name = symbol_names[id];
if (!name.empty()) {
return name;
}
}
return absl::StrFormat("%s%d", "s", id);
}
std::string GetAffineDimensionName(
int64_t id, absl::Span<const std::string> dim_names = {}) {
if (id < dim_names.size()) {
const auto& name = dim_names[id];
if (!name.empty()) {
return name;
}
}
return absl::StrFormat("%s%d", "d", id);
}
void PrintAffineExprImpl(const AffineExpr affine_expr,
absl::Span<const std::string> dim_names,
absl::Span<const std::string> symbol_names,
bool add_parentheses, llvm::raw_ostream& os) {
const char* binopSpelling = nullptr;
switch (affine_expr.getKind()) {
case AffineExprKind::SymbolId: {
unsigned symbol_id =
mlir::cast<AffineSymbolExpr>(affine_expr).getPosition();
os << GetAffineSymbolName(symbol_id, symbol_names);
return;
}
case AffineExprKind::DimId: {
unsigned dim_id = mlir::cast<AffineDimExpr>(affine_expr).getPosition();
os << GetAffineDimensionName(dim_id, dim_names);
return;
}
case AffineExprKind::Constant:
os << mlir::cast<AffineConstantExpr>(affine_expr).getValue();
return;
case AffineExprKind::Add:
binopSpelling = " + ";
break;
case AffineExprKind::Mul:
binopSpelling = " * ";
break;
case AffineExprKind::FloorDiv:
binopSpelling = " floordiv ";
break;
case AffineExprKind::CeilDiv:
binopSpelling = " ceildiv ";
break;
case AffineExprKind::Mod:
binopSpelling = " mod ";
break;
}
auto binOp = mlir::cast<AffineBinaryOpExpr>(affine_expr);
AffineExpr lhsExpr = binOp.getLHS();
AffineExpr rhsExpr = binOp.getRHS();
if (binOp.getKind() != AffineExprKind::Add) {
if (add_parentheses) {
os << '(';
}
auto rhsConst = mlir::dyn_cast<AffineConstantExpr>(rhsExpr);
if (rhsConst && binOp.getKind() == AffineExprKind::Mul &&
rhsConst.getValue() == -1) {
os << "-";
PrintAffineExprImpl(lhsExpr, dim_names, symbol_names,
true, os);
if (add_parentheses) {
os << ')';
}
return;
}
PrintAffineExprImpl(lhsExpr, dim_names, symbol_names,
true, os);
os << binopSpelling;
PrintAffineExprImpl(rhsExpr, dim_names, symbol_names,
true, os);
if (add_parentheses) {
os << ')';
}
return;
}
if (add_parentheses) {
os << '(';
}
if (auto rhs = mlir::dyn_cast<AffineBinaryOpExpr>(rhsExpr)) {
if (rhs.getKind() == AffineExprKind::Mul) {
AffineExpr rrhsExpr = rhs.getRHS();
if (auto rrhs = mlir::dyn_cast<AffineConstantExpr>(rrhsExpr)) {
if (rrhs.getValue() == -1) {
PrintAffineExprImpl(lhsExpr, dim_names, symbol_names,
false, os);
os << " - ";
if (rhs.getLHS().getKind() == AffineExprKind::Add) {
PrintAffineExprImpl(rhs.getLHS(), dim_names, symbol_names,
true, os);
} else {
PrintAffineExprImpl(rhs.getLHS(), dim_names, symbol_names,
false, os);
}
if (add_parentheses) {
os << ')';
}
return;
}
if (rrhs.getValue() < -1) {
PrintAffineExprImpl(lhsExpr, dim_names, symbol_names,
false, os);
os << " - ";
PrintAffineExprImpl(rhs.getLHS(), dim_names, symbol_names,
true, os);
os << " * " << -rrhs.getValue();
if (add_parentheses) {
os << ')';
}
return;
}
}
}
}
if (auto rhsConst = mlir::dyn_cast<AffineConstantExpr>(rhsExpr)) {
if (rhsConst.getValue() < 0) {
PrintAffineExprImpl(lhsExpr, dim_names, symbol_names,
false, os);
os << " - " << -rhsConst.getValue();
if (add_parentheses) {
os << ')';
}
return;
}
}
PrintAffineExprImpl(lhsExpr, dim_names, symbol_names,
false, os);
os << " + ";
PrintAffineExprImpl(rhsExpr, dim_names, symbol_names,
false, os);
if (add_parentheses) {
os << ')';
}
}
}
std::optional<IndexingMap> ParseIndexingMap(llvm::StringRef input,
MLIRContext* context) {
Parser parser(input);
SmallVector<std::string, 8> dim_var_names;
SmallVector<std::string, 4> symbol_var_names;
if (!ParseVarNames(parser, Delimeter::kParen, dim_var_names) ||
(parser.GetCurrentToken().kind == Token::Kind::kLBracket &&
!ParseVarNames(parser, Delimeter::kBracket, symbol_var_names))) {
llvm::errs() << "Failed to parse variable names\n";
return std::nullopt;
}
SmallVector<std::string, 3> affine_expr_strs;
if (!parser.ConsumeToken(Token::Kind::kArrow) ||
!ParseAffineMapResults(parser, affine_expr_strs)) {
llvm::errs() << "Failed to parse affine map results\n";
return std::nullopt;
}
int num_affine_map_results = affine_expr_strs.size();
if (dim_var_names.empty() && symbol_var_names.empty()) {
if (num_affine_map_results != 0 ||
parser.GetCurrentToken().kind != Token::Kind::kEOF) {
llvm::errs() << "Expected an empty indexing map\n";
return std::nullopt;
}
return IndexingMap{AffineMap::get(context), {},
{}, {}};
}
if (!parser.ConsumeToken(Token::Kind::kComma) ||
!parser.ConsumeToken(Token::Kind::kKeywordDomain) ||
!parser.ConsumeToken(Token::Kind::kColon)) {
llvm::errs() << "Failed to parse domain keyword\n";
return std::nullopt;
}
std::vector<IndexingMap::Variable> dim_vars;
for (const auto& [dim_id, dim_name] : llvm::enumerate(dim_var_names)) {
std::string var_name;
Interval interval;
if (!parser.ParseVarName(&var_name) ||
!parser.ConsumeToken(Token::Kind::kKeywordIn) ||
!parser.ParseInterval(&interval) ||
(parser.GetCurrentToken().kind != Token::Kind::kEOF &&
!parser.ConsumeToken(Token::Kind::kComma))) {
llvm::errs() << "Failed to parse DimVar\n";
return std::nullopt;
}
if (var_name != dim_name) {
llvm::errs() << "Dimension name mismatch\n";
return std::nullopt;
}
if (var_name == GetDimVarName(dim_id)) {
var_name = "";
}
dim_vars.push_back(IndexingMap::Variable{interval, var_name});
}
std::vector<IndexingMap::Variable> range_vars;
for (const auto& [index, range_name] : llvm::enumerate(symbol_var_names)) {
std::string var_name;
Interval interval;
if (!parser.ParseVarName(&var_name) ||
!parser.ConsumeToken(Token::Kind::kKeywordIn) ||
!parser.ParseInterval(&interval) ||
(parser.GetCurrentToken().kind != Token::Kind::kEOF &&
!parser.ConsumeToken(Token::Kind::kComma))) {
llvm::errs() << "Failed to parse RangeVar\n";
return std::nullopt;
}
if (var_name != range_name) {
llvm::errs() << "Symbol name mismatch\n";
return std::nullopt;
}
if (var_name == GetRangeVarName(index)) {
var_name = "";
}
range_vars.push_back(IndexingMap::Variable{interval, var_name});
}
SmallVector<Interval> constraint_bounds;
while (!parser.ConsumeToken(Token::Kind::kEOF)) {
std::string affine_expr_str;
Interval interval;
if (!parser.ParseAffineExprString(&affine_expr_str) ||
!parser.ConsumeToken(Token::Kind::kKeywordIn) ||
!parser.ParseInterval(&interval) ||
(parser.GetCurrentToken().kind != Token::Kind::kEOF &&
!parser.ConsumeToken(Token::Kind::kComma))) {
llvm::errs() << "Failed to parse constraint\n";
return std::nullopt;
}
affine_expr_strs.push_back(affine_expr_str);
constraint_bounds.push_back(interval);
}
SmallVector<AffineExpr> affine_exprs;
if (!ParseAffineExprsWithMLIR(dim_var_names, symbol_var_names,
affine_expr_strs, context, affine_exprs)) {
return std::nullopt;
}
ArrayRef<AffineExpr> affine_map_results =
ArrayRef(affine_exprs).take_front(num_affine_map_results);
ArrayRef<AffineExpr> constraint_exprs =
ArrayRef(affine_exprs).drop_front(num_affine_map_results);
SmallVector<std::pair<AffineExpr, Interval>> constraints;
constraints.reserve(constraint_exprs.size());
for (const auto& [expr, bounds] :
llvm::zip(constraint_exprs, constraint_bounds)) {
constraints.push_back(std::make_pair(expr, bounds));
}
auto map = AffineMap::get(dim_vars.size(), range_vars.size(),
affine_map_results, context);
return IndexingMap{map, std::move(dim_vars), std::move(range_vars),
{}, constraints};
}
std::string ToString(AffineExpr affine_expr,
absl::Span<const std::string> dim_names,
absl::Span<const std::string> symbol_names) {
std::string s;
llvm::raw_string_ostream ss(s);
PrintAffineExprImpl(affine_expr, dim_names, symbol_names,
false, ss);
return s;
}
std::string ToString(AffineExpr affine_expr) {
return ToString(affine_expr, {}, {});
}
std::ostream& operator<<(std::ostream& out, AffineExpr affine_expr) {
out << ToString(affine_expr);
return out;
}
std::string ToString(AffineMap affine_map,
absl::Span<const std::string> dim_names,
absl::Span<const std::string> symbol_names) {
CHECK_EQ(dim_names.size(), affine_map.getNumDims());
CHECK_EQ(symbol_names.size(), affine_map.getNumSymbols());
std::string s;
llvm::raw_string_ostream ss(s);
ss << '(' << absl::StrJoin(dim_names, ", ") << ')';
if (affine_map.getNumSymbols() != 0) {
ss << '[' << absl::StrJoin(symbol_names, ", ") << ']';
}
ss << " -> (";
llvm::interleaveComma(affine_map.getResults(), ss, [&](AffineExpr expr) {
PrintAffineExprImpl(expr, dim_names, symbol_names,
false, ss);
});
ss << ')';
return s;
}
std::string ToString(AffineMap affine_map) {
int dim_count = affine_map.getNumDims();
SmallVector<std::string, 3> dim_names;
dim_names.reserve(affine_map.getNumDims());
for (int64_t dim_id = 0; dim_id < dim_count; ++dim_id) {
dim_names.push_back(GetAffineDimensionName(dim_id));
}
int symbol_count = affine_map.getNumSymbols();
SmallVector<std::string, 3> symbol_names;
symbol_names.reserve(affine_map.getNumSymbols());
for (int64_t symbol_id = 0; symbol_id < symbol_count; ++symbol_id) {
symbol_names.push_back(GetAffineSymbolName(symbol_id));
}
return ToString(affine_map, dim_names, symbol_names);
}
std::ostream& operator<<(std::ostream& out, AffineMap affine_map) {
out << ToString(affine_map);
return out;
}
std::string ToString(const IndexingMap& indexing_map,
absl::Span<const std::string> dim_names,
absl::Span<const std::string> range_names,
absl::Span<const std::string> rt_names) {
std::stringstream ss;
if (indexing_map.IsKnownEmpty()) {
ss << "KNOWN EMPTY\n";
return ss.str();
}
const auto& dim_vars = indexing_map.GetDimVars();
CHECK_EQ(dim_names.size(), dim_vars.size());
const auto& range_vars = indexing_map.GetRangeVars();
CHECK_EQ(range_names.size(), range_vars.size());
const auto& rt_vars = indexing_map.GetRTVars();
CHECK_EQ(rt_names.size(), rt_vars.size());
SmallVector<std::string, 3> symbol_names;
symbol_names.reserve(range_names.size() + rt_names.size());
symbol_names.append(range_names.begin(), range_names.end());
symbol_names.append(rt_names.begin(), rt_names.end());
ss << ToString(indexing_map.GetAffineMap(), dim_names, symbol_names);
if (dim_vars.empty() && range_vars.empty() && rt_vars.empty()) {
return ss.str();
}
ss << ", domain: ";
int64_t remaining_vars_to_print =
dim_vars.size() + range_vars.size() + rt_vars.size();
for (const auto& [index, dim_var] : llvm::enumerate(dim_vars)) {
ss << dim_names[index] << " in " << dim_var.bounds;
if (--remaining_vars_to_print > 0) {
ss << ", ";
}
}
for (const auto& [index, range_var] : llvm::enumerate(range_vars)) {
ss << symbol_names[index] << " in " << range_var.bounds;
if (--remaining_vars_to_print > 0) {
ss << ", ";
}
}
for (const auto& [index, rt_var] : llvm::enumerate(rt_vars)) {
ss << rt_names[index] << " in " << rt_var.bounds;
if (--remaining_vars_to_print > 0) {
ss << ", ";
}
}
std::vector<std::string> expr_range_strings;
const auto& constraints = indexing_map.GetConstraints();
expr_range_strings.reserve(constraints.size());
for (const auto& [expr, range] : constraints) {
expr_range_strings.push_back(absl::StrCat(
ToString(expr, dim_names, symbol_names), " in ", range.ToString()));
}
std::sort(expr_range_strings.begin(), expr_range_strings.end());
if (!expr_range_strings.empty()) {
ss << ", " << absl::StrJoin(expr_range_strings, ", ");
}
return ss.str();
}
std::string ToString(const IndexingMap& indexing_map) {
SmallVector<std::string, 3> dim_names;
dim_names.reserve(indexing_map.GetDimensionCount());
for (const auto& [index, dim_var] :
llvm::enumerate(indexing_map.GetDimVars())) {
dim_names.push_back(GetDimVarName(index, dim_var.name));
}
SmallVector<std::string, 3> range_names;
range_names.reserve(indexing_map.GetRangeVarsCount());
for (const auto& [index, range_var] :
llvm::enumerate(indexing_map.GetRangeVars())) {
range_names.push_back(GetRangeVarName(index, range_var.name));
}
SmallVector<std::string, 3> rt_names;
rt_names.reserve(indexing_map.GetRTVarsCount());
for (const auto& [index, rt_var] :
llvm::enumerate(indexing_map.GetRTVars())) {
rt_names.push_back(GetRTVarName(index, rt_var.name));
}
return ToString(indexing_map, dim_names, range_names, rt_names);
}
std::ostream& operator<<(std::ostream& out, const IndexingMap& indexing_map) {
out << ToString(indexing_map);
return out;
}
}
} | #include "xla/service/gpu/model/indexing_map_serialization.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::HasSubstr;
class IndexingMapSerializationTest : public HloTestBase {
public:
mlir::MLIRContext mlir_context_;
void ParseAndCheck(absl::string_view indexing_map_str) {
auto indexing_map = ParseIndexingMap(indexing_map_str, &mlir_context_);
ASSERT_TRUE(indexing_map.has_value());
EXPECT_THAT(ToString(*indexing_map), MatchIndexingString(indexing_map_str));
}
};
TEST_F(IndexingMapSerializationTest, EmptyMap) { ParseAndCheck("() -> ()"); }
TEST_F(IndexingMapSerializationTest, DimsOnly) {
ParseAndCheck(R"(
(d0, d1) -> (d0 mod 2 + d1),
domain:
d0 in [0, 3],
d1 in [-4, 4]
)");
}
TEST_F(IndexingMapSerializationTest, SymbolsOnly) {
ParseAndCheck(R"(
()[s0, s1] -> (s0 floordiv s1),
domain:
s0 in [0, 3],
s1 in [0, 4]
)");
}
TEST_F(IndexingMapSerializationTest, DimsAndSymbolsNoConstraints) {
ParseAndCheck(R"(
(d0, d1)[s0, s1, s2] -> (s2, d0 + d1, s1, s0),
domain:
d0 in [0, 3],
d1 in [0, 4],
s0 in [0, 1],
s1 in [0, 1],
s2 in [0, 3]
)");
}
TEST_F(IndexingMapSerializationTest, DimsAndSymbolsAndConstraints) {
ParseAndCheck(R"(
(d0, d1)[s0, s1, s2] -> (s2, d0 + d1, s1, s0),
domain:
d0 in [0, 3],
d1 in [0, 4],
s0 in [0, 1],
s1 in [0, 1],
s2 in [0, 3],
d0 mod 4 in [0, 0],
d1 + s0 in [0, 45]
)");
}
TEST_F(IndexingMapSerializationTest, AffineExprsWithParens) {
ParseAndCheck(R"(
(d0, d1)[s0, s1] -> ((d0 + d0 mod 3) floordiv 3
+ s0 + (s0 * 2) mod 3 + (d0 + s0) mod 3),
domain:
d0 in [0, 9],
d1 in [0, 19],
s0 in [0, 29],
s1 in [0, 39]
)");
}
TEST_F(IndexingMapSerializationTest, CustomNames) {
ParseAndCheck(R"(
(th_x, bl_x)[s0, vector_elem, s2] -> (s2, th_x + bl_x, vector_elem, s0),
domain:
th_x in [0, 3],
bl_x in [0, 4],
s0 in [0, 1],
vector_elem in [0, 1],
s2 in [0, 3],
bl_x + s0 in [0, 45],
th_x mod 4 in [0, 0]
)");
}
TEST_F(IndexingMapSerializationTest, AffineMapPrinterTest) {
mlir::AffineExpr d0, d1, s0, s1;
mlir::bindDims(&mlir_context_, d0, d1);
mlir::bindSymbols(&mlir_context_, s0, s1);
auto map = mlir::AffineMap::get(2, 2, {d0 + d1.floorDiv(8), s0 + s1 % 16},
&mlir_context_);
EXPECT_THAT(ToString(map, {"offset", "d1"}, {"s0", "linear_index"}),
HasSubstr("(offset, d1)[s0, linear_index] -> "
"(offset + d1 floordiv 8, s0 + linear_index mod 16)"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/indexing_map_serialization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/indexing_map_serialization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c7a7d7c3-ab9c-464d-ab87-e8ba974ca712 | cpp | tensorflow/tensorflow | indexing_analysis | third_party/xla/xla/service/gpu/model/indexing_analysis.cc | third_party/xla/xla/service/gpu/model/indexing_analysis_test.cc | #include "xla/service/gpu/model/indexing_analysis.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <optional>
#include <ostream>
#include <sstream>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/permutation_util.h"
#include "xla/service/gather_simplifier.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
using llvm::SmallVector;
using mlir::AffineExpr;
using mlir::AffineMap;
using mlir::getAffineConstantExpr;
using mlir::getAffineDimExpr;
using mlir::getAffineSymbolExpr;
using mlir::MLIRContext;
HloInstructionIndexing CreateUnknownIndexing(int64_t count = 1) {
HloInstructionIndexing indexing;
indexing.indexing_maps = std::vector<absl::flat_hash_set<IndexingMap>>(
count, {IndexingMap::GetUndefined()});
return indexing;
}
struct HLORTVar {
Interval feasible_values;
const HloInstruction* hlo;
mlir::AffineMap map;
};
bool operator==(const HLORTVar& lhs, const HLORTVar& rhs) {
return lhs.feasible_values == rhs.feasible_values && lhs.hlo == rhs.hlo &&
lhs.map == rhs.map;
}
inline bool operator!=(const HLORTVar& lhs, const HLORTVar& rhs) {
return !(lhs == rhs);
}
struct RTVarOptimizationResult {
AffineExpr remapped_symbol;
HLORTVar rt_var;
};
RTVarOptimizationResult OptimizeRTVar(HLORTVar rt_var, int64_t symbol_index,
MLIRContext* mlir_context) {
const auto symbol = getAffineSymbolExpr(symbol_index, mlir_context);
auto result_expr = symbol;
while (true) {
if (auto constant_expr = DynCast<HloConstantInstruction>(rt_var.hlo)) {
if (rt_var.map.isConstant()) {
const auto idx = rt_var.map.getConstantResults();
result_expr = result_expr.replace(
symbol, getAffineConstantExpr(
constant_expr->literal().GetIntegralAsS64(idx).value(),
mlir_context));
}
return {result_expr, rt_var};
}
if (auto iota_expr = DynCast<HloIotaInstruction>(rt_var.hlo)) {
auto iota_dimension = iota_expr->iota_dimension();
CHECK(iota_dimension < rt_var.map.getNumResults());
return {
result_expr.replace(symbol, rt_var.map.getResults()[iota_dimension]),
rt_var};
}
auto is_indexing_transformation = [](const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kBitcast ||
instr->opcode() == HloOpcode::kBroadcast ||
instr->opcode() == HloOpcode::kReshape ||
instr->opcode() == HloOpcode::kReverse ||
instr->opcode() == HloOpcode::kSlice ||
instr->opcode() == HloOpcode::kTranspose;
};
if (is_indexing_transformation(rt_var.hlo)) {
auto instr_indexing_map =
*ComputeOutputToInputIndexing(rt_var.hlo, 0, mlir_context)
.indexing_maps[0]
.begin();
rt_var.hlo = rt_var.hlo->operand(0);
rt_var.map = instr_indexing_map.GetAffineMap().compose(rt_var.map);
continue;
}
if (rt_var.hlo->opcode() == HloOpcode::kNegate) {
rt_var.hlo = rt_var.hlo->operand(0);
result_expr = result_expr.replace(symbol, -symbol);
continue;
}
if (rt_var.hlo->opcode() == HloOpcode::kAdd ||
rt_var.hlo->opcode() == HloOpcode::kSubtract ||
rt_var.hlo->opcode() == HloOpcode::kMultiply ||
rt_var.hlo->opcode() == HloOpcode::kDivide) {
const auto apply_op = [&](const AffineExpr& lhs,
const AffineExpr& rhs) -> AffineExpr {
switch (rt_var.hlo->opcode()) {
case HloOpcode::kAdd:
return lhs + rhs;
case HloOpcode::kSubtract:
return lhs - rhs;
case HloOpcode::kMultiply:
return lhs * rhs;
case HloOpcode::kDivide:
return lhs.floorDiv(rhs);
default:
ABSL_UNREACHABLE();
}
};
auto lhs = OptimizeRTVar(
HLORTVar{rt_var.feasible_values, rt_var.hlo->operand(0), rt_var.map},
symbol_index, mlir_context);
if (!lhs.remapped_symbol.isFunctionOfSymbol(symbol_index)) {
result_expr =
result_expr.replace(symbol, apply_op(lhs.remapped_symbol, symbol));
rt_var.hlo = rt_var.hlo->operand(1);
continue;
}
auto rhs = OptimizeRTVar(
HLORTVar{rt_var.feasible_values, rt_var.hlo->operand(1), rt_var.map},
symbol_index, mlir_context);
if (!rhs.remapped_symbol.isFunctionOfSymbol(symbol_index)) {
result_expr =
result_expr.replace(symbol, apply_op(symbol, rhs.remapped_symbol));
result_expr = result_expr.replace(symbol, lhs.remapped_symbol);
rt_var = lhs.rt_var;
continue;
}
}
return {result_expr, rt_var};
}
}
std::vector<IndexingMap::Variable> ConvertHLORTVarsToRTVars(
const std::vector<HLORTVar>& hlo_rt_vars) {
std::vector<IndexingMap::Variable> rt_vars;
rt_vars.reserve(hlo_rt_vars.size());
for (const HLORTVar& hlo_rt_var : hlo_rt_vars) {
rt_vars.push_back(IndexingMap::Variable{hlo_rt_var.feasible_values});
}
return rt_vars;
}
IndexingMap FoldRTVarsAndConstructIndexingMap(
AffineMap affine_map, std::vector<IndexingMap::Variable> dim_vars,
std::vector<HLORTVar> hlo_rt_vars) {
if (hlo_rt_vars.empty()) {
return IndexingMap(affine_map, std::move(dim_vars), {},
ConvertHLORTVarsToRTVars(hlo_rt_vars));
}
auto* ctx = affine_map.getContext();
for (auto symbol_index = 0; symbol_index < hlo_rt_vars.size();
++symbol_index) {
auto& rt_var = hlo_rt_vars[symbol_index];
auto rt_var_symbol = getAffineSymbolExpr(symbol_index, ctx);
RTVarOptimizationResult result = OptimizeRTVar(rt_var, symbol_index, ctx);
if (result.remapped_symbol != rt_var_symbol) {
affine_map = affine_map.replace({{rt_var_symbol, result.remapped_symbol}},
affine_map.getNumDims(),
affine_map.getNumSymbols());
llvm::DenseMap<AffineExpr, AffineExpr> replacements;
}
if (result.remapped_symbol.isFunctionOfSymbol(symbol_index)) {
if (rt_var != result.rt_var) {
rt_var = std::move(result.rt_var);
}
}
}
return IndexingMap(affine_map, std::move(dim_vars), {},
ConvertHLORTVarsToRTVars(hlo_rt_vars));
}
HloInstructionIndexing ComputeOutputToInputCwiseOpIndexing(
const HloInstruction* instr, MLIRContext* mlir_context) {
IndexingMap identity_map = CreateIdentityMap(instr->shape(), mlir_context);
IndexingMap unit_map(
mlir::AffineMap::get(identity_map.GetAffineMap().getNumDims(),
0, mlir_context),
identity_map.GetDimVars(), {}, {});
HloInstructionIndexing instr_indexing;
instr_indexing.indexing_maps.resize(instr->operand_count());
int64_t operand_count = instr->operand_count();
for (int64_t operand_id = 0; operand_id < operand_count; ++operand_id) {
auto* operand = instr->operand(operand_id);
if (operand->shape().rank() == 0 && instr->shape().rank() > 0) {
instr_indexing.indexing_maps[operand_id].insert(unit_map);
} else {
instr_indexing.indexing_maps[operand_id].insert(identity_map);
}
}
return instr_indexing;
}
HloInstructionIndexing ComputeInputToOutputCwiseOpIndexing(
const HloInstruction* instr, MLIRContext* mlir_context) {
IndexingMap identity_map = CreateIdentityMap(instr->shape(), mlir_context);
return HloInstructionIndexing::FromIndexingMaps({identity_map});
}
HloInstructionIndexing ComputeOutputToInputBroadcastOpIndexing(
const HloBroadcastInstruction* bcast, MLIRContext* mlir_context) {
auto output_dims = bcast->shape().dimensions();
std::vector<AffineExpr> exprs;
exprs.reserve(bcast->dimensions().size());
for (int64_t bcast_dim : bcast->dimensions()) {
exprs.push_back(getAffineDimExpr(bcast_dim, mlir_context));
}
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(output_dims.size(), 0, exprs,
mlir_context),
output_dims, {});
return HloInstructionIndexing::FromIndexingMaps({indexing_map});
}
HloInstructionIndexing ComputeInputToOutputBroadcastOpIndexing(
const HloBroadcastInstruction* bcast, MLIRContext* mlir_context) {
absl::Span<const int64_t> bcast_dims = bcast->dimensions();
const Shape& input_shape = bcast->operand(0)->shape();
const Shape& output_shape = bcast->shape();
std::vector<int64_t> added_dims_sizes;
std::vector<AffineExpr> exprs;
exprs.reserve(output_shape.rank());
for (auto [output_dim_id, output_dim] :
llvm::enumerate(output_shape.dimensions())) {
auto bcast_dim =
std::find(bcast_dims.begin(), bcast_dims.end(), output_dim_id);
if (bcast_dim == bcast_dims.end()) {
exprs.push_back(
getAffineSymbolExpr(added_dims_sizes.size(), mlir_context));
added_dims_sizes.push_back(output_dim);
continue;
}
exprs.push_back(getAffineDimExpr(
std::distance(bcast_dims.begin(), bcast_dim), mlir_context));
}
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(input_shape.rank(), added_dims_sizes.size(), exprs,
mlir_context),
input_shape.dimensions(), added_dims_sizes);
return HloInstructionIndexing::FromIndexingMaps({indexing_map});
}
HloInstructionIndexing ComputeOutputToInputConcatenateOpIndexing(
const HloConcatenateInstruction* concat, MLIRContext* mlir_context) {
const auto& operand_0_dims = concat->operand(0)->shape().dimensions();
mlir::MutableAffineMap affine_map =
AffineMap::getMultiDimIdentityMap(operand_0_dims.size(), mlir_context);
std::vector<IndexingMap::Variable> dim_vars =
DimVarsFromTensorSizes(operand_0_dims);
HloInstructionIndexing concat_indexing;
concat_indexing.indexing_maps.resize(concat->operand_count());
int64_t concat_dim = concat->concatenate_dimension();
AffineExpr concat_dim_expr = getAffineDimExpr(concat_dim, mlir_context);
int64_t offset = 0;
for (const auto [operand_id, operand] : llvm::enumerate(concat->operands())) {
affine_map.setResult(concat_dim, concat_dim_expr - offset);
int64_t operand_concat_dim = operand->shape().dimensions()[concat_dim];
dim_vars[concat_dim] =
IndexingMap::Variable{{offset, offset + operand_concat_dim - 1}};
concat_indexing.indexing_maps[operand_id].insert(
IndexingMap(affine_map.getAffineMap(), dim_vars,
{}, {}));
offset += operand_concat_dim;
}
return concat_indexing;
}
HloInstructionIndexing ComputeInputToOutputConcatenateOpIndexing(
const HloConcatenateInstruction* concat, int input_id,
MLIRContext* mlir_context) {
int64_t concat_dim = concat->concatenate_dimension();
int64_t offset = 0;
for (int64_t operand_id = 0; operand_id < input_id; ++operand_id) {
offset += concat->operand(operand_id)->shape().dimensions()[concat_dim];
}
const auto& operand_dims = concat->operand(input_id)->shape().dimensions();
mlir::MutableAffineMap affine_map =
AffineMap::getMultiDimIdentityMap(operand_dims.size(), mlir_context);
affine_map.setResult(concat_dim,
getAffineDimExpr(concat_dim, mlir_context) + offset);
IndexingMap indexing_map =
IndexingMap::FromTensorSizes(affine_map.getAffineMap(), operand_dims, {});
return HloInstructionIndexing::FromIndexingMaps({indexing_map});
}
HloInstructionIndexing ComputeOutputToInputFusionOpIndexing(
const HloFusionInstruction* fusion, int output_id,
MLIRContext* mlir_context) {
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(fusion);
auto grouped_indexing_maps = ComputeGroupedOutputToInputIndexing(
*fusion_adaptor, fusion_adaptor->GetRoots()[output_id], mlir_context);
HloInstructionIndexing fusion_indexing;
fusion_indexing.indexing_maps.resize(fusion->operand_count());
for (auto [operand_id, operand] : llvm::enumerate(fusion->operands())) {
fusion_indexing.indexing_maps[operand_id] = grouped_indexing_maps[operand];
}
return fusion_indexing;
}
HloInstructionIndexing ComputeOutputToInputDotOpIndexing(
const HloDotInstruction* dot, MLIRContext* mlir_context) {
CHECK_NE(dot, nullptr);
const DotDimensionNumbers& dim_numbers = dot->dot_dimension_numbers();
absl::Span<const int64_t> lhs_contracting_dims(
dim_numbers.lhs_contracting_dimensions());
absl::Span<const int64_t> rhs_contracting_dims =
dim_numbers.rhs_contracting_dimensions();
absl::Span<const int64_t> lhs_batch_dims = dim_numbers.lhs_batch_dimensions();
absl::Span<const int64_t> rhs_batch_dims = dim_numbers.rhs_batch_dimensions();
const Shape& lhs_shape = dot->operand(0)->shape();
const Shape& rhs_shape = dot->operand(1)->shape();
SmallVector<AffineExpr> lhs_exprs(lhs_shape.rank());
SmallVector<AffineExpr> rhs_exprs(rhs_shape.rank());
int64_t output_dim_id = 0;
for (auto [lhs_batch_dim, rhs_batch_dim] :
llvm::zip(lhs_batch_dims, rhs_batch_dims)) {
AffineExpr output_dim_expr = getAffineDimExpr(output_dim_id, mlir_context);
lhs_exprs[lhs_batch_dim] = output_dim_expr;
rhs_exprs[rhs_batch_dim] = output_dim_expr;
++output_dim_id;
}
auto lhs_non_contracting_dims =
GetNonContractingDims(lhs_shape, lhs_batch_dims, lhs_contracting_dims);
assert(lhs_non_contracting_dims.ok());
for (int64_t lhs_non_contracting_dim : lhs_non_contracting_dims.value()) {
lhs_exprs[lhs_non_contracting_dim] =
getAffineDimExpr(output_dim_id++, mlir_context);
}
auto rhs_non_contracting_dims =
GetNonContractingDims(rhs_shape, rhs_batch_dims, rhs_contracting_dims);
assert(rhs_non_contracting_dims.ok());
for (int64_t rhs_non_contracting_dim : rhs_non_contracting_dims.value()) {
rhs_exprs[rhs_non_contracting_dim] =
getAffineDimExpr(output_dim_id++, mlir_context);
}
int64_t input_dim_id = 0;
std::vector<int64_t> input_dim_sizes;
input_dim_sizes.reserve(lhs_contracting_dims.size());
for (auto [lhs_contracting_dim, rhs_contracting_dim] :
llvm::zip(lhs_contracting_dims, rhs_contracting_dims)) {
AffineExpr input_dim_expr = getAffineSymbolExpr(input_dim_id, mlir_context);
lhs_exprs[lhs_contracting_dim] = input_dim_expr;
rhs_exprs[rhs_contracting_dim] = input_dim_expr;
++input_dim_id;
input_dim_sizes.push_back(lhs_shape.dimensions(lhs_contracting_dim));
}
IndexingMap lhs_indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(dot->shape().rank(), input_dim_sizes.size(), lhs_exprs,
mlir_context),
dot->shape().dimensions(), input_dim_sizes);
IndexingMap rhs_indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(dot->shape().rank(), input_dim_sizes.size(), rhs_exprs,
mlir_context),
dot->shape().dimensions(), input_dim_sizes);
return HloInstructionIndexing::FromIndexingMaps(
{lhs_indexing_map, rhs_indexing_map});
}
HloInstructionIndexing ComputeOutputToInputDynamicSliceOpIndexing(
const HloDynamicSliceInstruction* dynamic_slice,
MLIRContext* mlir_context) {
const Shape& input_shape = dynamic_slice->operand(0)->shape();
const Shape& output_shape = dynamic_slice->shape();
int64_t rank = output_shape.rank();
const int64_t first_index_num = dynamic_slice->first_index_operand_number();
CHECK(dynamic_slice->operand(first_index_num)->shape().rank() == 0)
<< "b/118437727: Old form, not supported.";
AffineMap empty_results_affine_map = AffineMap::get(
rank, 0, {}, mlir_context);
IndexingMap start_indices_map = IndexingMap::FromTensorSizes(
empty_results_affine_map, output_shape.dimensions(), {});
std::vector<HLORTVar> offsets_rt_vars;
offsets_rt_vars.reserve(rank);
std::vector<AffineExpr> exprs;
exprs.reserve(rank);
for (auto [dim, slice_size] :
llvm::enumerate(dynamic_slice->dynamic_slice_sizes())) {
exprs.push_back(getAffineDimExpr(dim, mlir_context) +
getAffineSymbolExpr(dim, mlir_context));
offsets_rt_vars.push_back(
HLORTVar{Interval{0, input_shape.dimensions(dim) - slice_size},
dynamic_slice->operand(dim + first_index_num),
empty_results_affine_map});
}
std::vector<IndexingMap> indexing_maps(dynamic_slice->operand_count(),
start_indices_map);
indexing_maps.front() = FoldRTVarsAndConstructIndexingMap(
AffineMap::get(rank, rank, exprs,
mlir_context),
start_indices_map.GetDimVars(), std::move(offsets_rt_vars));
return HloInstructionIndexing::FromIndexingMaps(indexing_maps);
}
HloInstructionIndexing ComputeOutputToInputDynamicUpdateSliceOpIndexing(
const HloDynamicUpdateSliceInstruction* dus, MLIRContext* mlir_context) {
const Shape& update_shape = dus->update()->shape();
const Shape& output_shape = dus->shape();
int64_t rank = output_shape.rank();
std::vector<AffineExpr> identity;
identity.reserve(rank);
for (int64_t dim = 0; dim < rank; ++dim) {
identity.push_back(getAffineDimExpr(dim, mlir_context));
}
IndexingMap operand_map = IndexingMap::FromTensorSizes(
AffineMap::get(rank, 0, identity,
mlir_context),
output_shape.dimensions(), {});
AffineMap empty_results_affine_map = AffineMap::get(
rank, 0, {}, mlir_context);
IndexingMap start_indices_map = IndexingMap::FromTensorSizes(
empty_results_affine_map, output_shape.dimensions(), {});
std::vector<AffineExpr> exprs;
exprs.reserve(rank);
std::vector<HLORTVar> rt_vars;
rt_vars.reserve(rank);
for (auto [dim, slice_size] : llvm::enumerate(update_shape.dimensions())) {
exprs.push_back(getAffineDimExpr(dim, mlir_context) -
getAffineSymbolExpr(dim, mlir_context));
Interval feasible_values{0, output_shape.dimensions(dim) - slice_size};
rt_vars.push_back(HLORTVar{feasible_values, dus->operand(2 + dim),
empty_results_affine_map});
}
IndexingMap update_map = FoldRTVarsAndConstructIndexingMap(
AffineMap::get(rank, rank,
exprs, mlir_context),
operand_map.GetDimVars(), std::move(rt_vars));
std::vector<IndexingMap> indexing_maps(dus->operand_count(),
start_indices_map);
indexing_maps[0] = std::move(operand_map);
indexing_maps[1] = std::move(update_map);
return HloInstructionIndexing::FromIndexingMaps(indexing_maps);
}
HloInstructionIndexing ComputeOutputToInputGatherOpIndexing(
const HloGatherInstruction* gather, MLIRContext* mlir_context) {
CHECK(GatherSimplifier::IsSimplifiedGather(gather))
<< "Non-simplified HLO Gather is not supported.";
const Shape& operand_shape = gather->operand(0)->shape();
const Shape& indices_shape = gather->operand(1)->shape();
const GatherDimensionNumbers& dimension_numbers =
gather->gather_dimension_numbers();
int64_t index_vector_length =
indices_shape.dimensions(dimension_numbers.index_vector_dim());
const Shape& output_shape = gather->shape();
int64_t output_rank = output_shape.rank();
AffineExpr indices_id_dim = getAffineDimExpr(0, mlir_context);
std::vector<IndexingMap::Variable> dim_vars =
DimVarsFromTensorSizes(output_shape.dimensions());
IndexingMap indices_map{
AffineMap::get(output_rank, 1,
{indices_id_dim, getAffineSymbolExpr(0, mlir_context)},
mlir_context),
dim_vars,
{IndexingMap::Variable{{0, index_vector_length - 1}}},
{}};
std::vector<HLORTVar> rt_vars;
std::vector<AffineExpr> exprs;
exprs.reserve(operand_shape.rank());
for (auto [operand_dim_id, slice_size] :
llvm::enumerate(gather->gather_slice_sizes())) {
int64_t output_dim_id = dimension_numbers.offset_dims(operand_dim_id);
exprs.push_back(getAffineDimExpr(output_dim_id, mlir_context));
if (operand_dim_id >= index_vector_length) continue;
rt_vars.push_back(HLORTVar{
Interval{0, operand_shape.dimensions(operand_dim_id) - slice_size},
gather->operand(1),
AffineMap::get(output_rank, 0,
{indices_id_dim,
getAffineConstantExpr(operand_dim_id, mlir_context)},
mlir_context)});
exprs.back() =
exprs.back() + getAffineSymbolExpr(operand_dim_id, mlir_context);
}
IndexingMap operand_map = FoldRTVarsAndConstructIndexingMap(
AffineMap::get(output_rank,
index_vector_length, exprs, mlir_context),
std::move(dim_vars), std::move(rt_vars));
return HloInstructionIndexing::FromIndexingMaps({operand_map, indices_map});
}
IndexingMap ComputeOutputToInputPadOpIndexingImpl(
absl::Span<const int64_t> output_dims,
absl::Span<const int64_t> padding_low,
absl::Span<const int64_t> padding_high,
absl::Span<const int64_t> padding_interior, MLIRContext* mlir_context) {
int64_t output_rank = output_dims.size();
std::vector<AffineExpr> exprs;
std::vector<std::pair<AffineExpr, Interval>> constraints;
std::vector<IndexingMap::Variable> dim_vars;
exprs.reserve(output_rank);
constraints.reserve(output_rank);
int64_t output_dim_id = 0;
for (const auto [output_dim, pad_low, pad_high, pad_interior] :
llvm::zip(output_dims, padding_low, padding_high, padding_interior)) {
AffineExpr dim_expr = getAffineDimExpr(output_dim_id, mlir_context);
dim_vars.push_back({IndexingMap::Variable{
std::max(int64_t{0}, pad_low),
std::min(output_dim - 1, output_dim - 1 - pad_high)}});
if (pad_interior == 0) {
exprs.push_back(dim_expr - pad_low);
} else {
exprs.push_back((dim_expr - pad_low).floorDiv(pad_interior + 1));
constraints.push_back(
{(dim_expr - pad_low) % (pad_interior + 1), Interval{0, 0}});
}
++output_dim_id;
}
return IndexingMap{
AffineMap::get(output_rank, 0, exprs, mlir_context),
std::move(dim_vars),
{},
{}, absl::MakeSpan(constraints)};
}
HloInstructionIndexing ComputeOutputToInputPadOpIndexing(
const HloPadInstruction* pad, MLIRContext* mlir_context) {
const Shape& output_shape = pad->shape();
int64_t rank = output_shape.rank();
SmallVector<int64_t> padding_low, padding_high, padding_interior;
padding_low.reserve(rank);
padding_high.reserve(rank);
padding_interior.reserve(rank);
for (const auto& dim_config : pad->padding_config().dimensions()) {
padding_low.push_back(dim_config.edge_padding_low());
padding_high.push_back(dim_config.edge_padding_high());
padding_interior.push_back(dim_config.interior_padding());
}
IndexingMap input_indexing_map = ComputeOutputToInputPadOpIndexingImpl(
output_shape.dimensions(), padding_low, padding_high, padding_interior,
mlir_context);
IndexingMap padding_value_indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(output_shape.rank(), 0, {}, mlir_context),
output_shape.dimensions(), {});
return HloInstructionIndexing::FromIndexingMaps(
{input_indexing_map, padding_value_indexing_map});
}
HloInstructionIndexing ComputeOutputToInputReduceOpIndexing(
const HloReduceInstruction* reduce, int output_id,
MLIRContext* mlir_context) {
absl::flat_hash_set<int64_t> reduce_dims_ids(reduce->dimensions().begin(),
reduce->dimensions().end());
const Shape& input_shape = reduce->operand(output_id)->shape();
const Shape& output_shape = GetOutputShape(reduce, 0);
std::vector<int64_t> parallel_dims_sizes;
int64_t output_dim_id = 0;
std::vector<AffineExpr> exprs;
exprs.reserve(input_shape.rank());
for (auto [input_dim_id, input_dim] :
llvm::enumerate(input_shape.dimensions())) {
if (reduce_dims_ids.contains(input_dim_id)) {
exprs.push_back(
getAffineSymbolExpr(parallel_dims_sizes.size(), mlir_context));
parallel_dims_sizes.push_back(input_dim);
continue;
}
exprs.push_back(getAffineDimExpr(output_dim_id++, mlir_context));
}
IndexingMap inputs_indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(output_shape.rank(), reduce_dims_ids.size(), exprs,
mlir_context),
output_shape.dimensions(), parallel_dims_sizes);
IndexingMap inits_indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(output_shape.rank(), 0, {}, mlir_context),
output_shape.dimensions(), {});
HloInstructionIndexing instr_indexing;
instr_indexing.indexing_maps.resize(reduce->operand_count());
for (int64_t id = 0; id < reduce->input_count(); ++id) {
instr_indexing.indexing_maps[id].insert(inputs_indexing_map);
}
for (int64_t id = reduce->input_count(); id < reduce->operand_count(); ++id) {
instr_indexing.indexing_maps[id].insert(inits_indexing_map);
}
return instr_indexing;
}
HloInstructionIndexing ComputeInputToOutputReduceOpIndexing(
const HloReduceInstruction* reduce, int input_id,
MLIRContext* mlir_context) {
const Shape& output_shape = GetOutputShape(reduce, 0);
int64_t output_rank = output_shape.rank();
HloInstructionIndexing instr_indexing;
int arity = reduce->input_count();
instr_indexing.indexing_maps.resize(arity);
if (input_id >= arity) {
std::vector<AffineExpr> inits_exprs;
inits_exprs.reserve(output_rank);
for (int sym = 0; sym < output_rank; ++sym) {
inits_exprs.push_back(getAffineSymbolExpr(sym, mlir_context));
}
IndexingMap inits_indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(0, output_rank, inits_exprs,
mlir_context),
{}, output_shape.dimensions());
for (int64_t id = 0; id < arity; ++id) {
instr_indexing.indexing_maps[id].insert(inits_indexing_map);
}
return instr_indexing;
}
const Shape& input_shape = reduce->operand(input_id)->shape();
std::vector<AffineExpr> inputs_exprs;
inputs_exprs.reserve(output_rank);
for (auto [input_dim_id, input_dim] :
llvm::enumerate(input_shape.dimensions())) {
if (!absl::c_linear_search(reduce->dimensions(), input_dim_id)) {
inputs_exprs.push_back(getAffineDimExpr(input_dim_id, mlir_context));
}
}
IndexingMap inputs_indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(input_shape.rank(), 0, inputs_exprs,
mlir_context),
input_shape.dimensions(), {});
for (int64_t id = 0; id < arity; ++id) {
instr_indexing.indexing_maps[id].insert(inputs_indexing_map);
}
return instr_indexing;
}
IndexingMap ComposeIndexingMapsForWindow(
absl::Span<const int64_t> input_dimensions,
absl::Span<const int64_t> output_dimensions, const Window& window,
MLIRContext* mlir_context) {
size_t rank = input_dimensions.size();
SmallVector<int64_t> padding_low, padding_high, padding_interior,
padded_input_dimensions;
padding_low.reserve(rank);
padding_high.reserve(rank);
padding_interior.reserve(rank);
padded_input_dimensions.reserve(rank);
SmallVector<AffineExpr, 4> exprs;
std::vector<IndexingMap::Variable> dim_vars;
std::vector<IndexingMap::Variable> range_vars;
exprs.reserve(rank);
dim_vars.reserve(rank);
range_vars.reserve(rank);
for (const auto& [dim_id, window_config] :
llvm::enumerate(window.dimensions())) {
padding_low.push_back(window_config.padding_low());
padding_high.push_back(window_config.padding_high());
padding_interior.push_back(window_config.base_dilation() - 1);
padded_input_dimensions.push_back(
input_dimensions[dim_id] + window_config.padding_low() +
window_config.padding_high() +
(input_dimensions[dim_id] - 1) * (window_config.base_dilation() - 1));
AffineExpr dim_expr = getAffineDimExpr(dim_id, mlir_context);
AffineExpr symbol_expr = getAffineSymbolExpr(dim_id, mlir_context);
exprs.push_back(symbol_expr * window_config.window_dilation() +
window_config.stride() * dim_expr);
dim_vars.push_back(
{IndexingMap::Variable{0, output_dimensions[dim_id] - 1}});
range_vars.push_back({IndexingMap::Variable{0, window_config.size() - 1}});
}
IndexingMap padded_input_indexing = ComputeOutputToInputPadOpIndexingImpl(
padded_input_dimensions, padding_low, padding_high, padding_interior,
mlir_context);
IndexingMap input_indexing_no_padding(
AffineMap::get(rank, rank, exprs, mlir_context), dim_vars, range_vars,
{});
IndexingMap result =
ComposeIndexingMaps(input_indexing_no_padding, padded_input_indexing);
result.Simplify();
result.RemoveUnusedSymbols();
return result;
}
HloInstructionIndexing ComputeOutputToInputReduceWindowOpIndexing(
const HloReduceWindowInstruction* reduce_window, int output_id,
MLIRContext* mlir_context) {
const Shape& input_shape = reduce_window->operand(0)->shape();
const Shape& output_shape = GetOutputShape(reduce_window, 0);
IndexingMap inputs_indexing = ComposeIndexingMapsForWindow(
input_shape.dimensions(), output_shape.dimensions(),
reduce_window->window(), mlir_context);
IndexingMap inits_indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(output_shape.rank(), 0, {}, mlir_context),
output_shape.dimensions(), {});
HloInstructionIndexing instr_indexing;
instr_indexing.indexing_maps.resize(reduce_window->operand_count());
for (int64_t id = 0; id < reduce_window->input_count(); ++id) {
instr_indexing.indexing_maps[id].insert(inputs_indexing);
}
for (int64_t id = reduce_window->input_count();
id < reduce_window->operand_count(); ++id) {
instr_indexing.indexing_maps[id].insert(inits_indexing_map);
}
return instr_indexing;
}
HloInstructionIndexing ComputeOutputToInputConvolutionOpIndexing(
const HloConvolutionInstruction* convolution, MLIRContext* mlir_context) {
const Shape& input_shape = convolution->operand(0)->shape();
const Shape& kernel_shape = convolution->operand(1)->shape();
const Shape& output_shape = convolution->shape();
const ConvolutionDimensionNumbers& dnums =
convolution->convolution_dimension_numbers();
size_t rank = output_shape.rank();
size_t spatial_rank = rank - 2;
std::vector<int64_t> input_spatial_sizes(spatial_rank);
std::vector<int64_t> kernel_spatial_sizes(spatial_rank);
std::vector<int64_t> output_spatial_sizes(spatial_rank);
for (int i = 0; i < spatial_rank; ++i) {
input_spatial_sizes[i] =
input_shape.dimensions(dnums.input_spatial_dimensions(i));
kernel_spatial_sizes[i] =
kernel_shape.dimensions(dnums.kernel_spatial_dimensions(i));
output_spatial_sizes[i] =
output_shape.dimensions(dnums.output_spatial_dimensions(i));
}
IndexingMap input_spatial_indexing =
ComposeIndexingMapsForWindow(input_spatial_sizes, output_spatial_sizes,
convolution->window(), mlir_context);
std::vector<AffineExpr> replacement_dims(spatial_rank);
for (int i = 0; i < spatial_rank; ++i) {
replacement_dims[i] =
getAffineDimExpr(dnums.output_spatial_dimensions(i), mlir_context);
}
std::vector<AffineExpr> input_exprs(rank);
for (int i = 0; i < spatial_rank; ++i) {
input_exprs[dnums.input_spatial_dimensions(i)] =
input_spatial_indexing.GetAffineMap().getResult(i).replaceDims(
replacement_dims);
}
llvm::DenseMap<AffineExpr, Interval> input_constraints;
for (const auto& [key, val] : input_spatial_indexing.GetConstraints()) {
input_constraints[key.replaceDims(replacement_dims)] = val;
}
std::vector<AffineExpr> kernel_exprs(rank);
for (int i = 0; i < spatial_rank; ++i) {
kernel_exprs[dnums.kernel_spatial_dimensions(i)] =
getAffineSymbolExpr(i, mlir_context);
}
AffineExpr dim_expr =
getAffineDimExpr(dnums.output_feature_dimension(), mlir_context);
kernel_exprs[dnums.kernel_output_feature_dimension()] = dim_expr;
std::vector<IndexingMap::Variable> input_symbols =
input_spatial_indexing.GetRangeVars();
std::vector<IndexingMap::Variable> kernel_symbols =
RangeVarsFromTensorSizes(kernel_spatial_sizes);
input_exprs[dnums.input_feature_dimension()] =
getAffineSymbolExpr(input_symbols.size(), mlir_context);
kernel_exprs[dnums.kernel_input_feature_dimension()] =
getAffineSymbolExpr(kernel_symbols.size(), mlir_context);
int64_t input_group_size =
kernel_shape.dimensions(dnums.kernel_input_feature_dimension());
Interval input_feature_range{0, input_group_size - 1};
input_symbols.push_back(IndexingMap::Variable{input_feature_range});
kernel_symbols.push_back(IndexingMap::Variable{input_feature_range});
if (convolution->feature_group_count() > 1) {
AffineExpr& input_feature = input_exprs[dnums.input_feature_dimension()];
int64_t output_group_size =
output_shape.dimensions(dnums.output_feature_dimension());
int64_t feature_group_size =
output_group_size / convolution->feature_group_count();
input_feature = dim_expr.floorDiv(feature_group_size) * input_group_size +
input_feature;
}
AffineExpr batch_dim_expr =
getAffineDimExpr(dnums.output_batch_dimension(), mlir_context);
if (convolution->batch_group_count() > 1) {
int64_t batch_group_size =
output_shape.dimensions(dnums.output_batch_dimension());
AffineExpr batch_group_expr =
getAffineSymbolExpr(input_symbols.size(), mlir_context);
input_symbols.push_back(
IndexingMap::Variable{{0, convolution->batch_group_count() - 1}});
input_exprs[dnums.input_batch_dimension()] =
batch_group_expr * batch_group_size + batch_dim_expr;
} else {
input_exprs[dnums.input_batch_dimension()] = batch_dim_expr;
}
IndexingMap inputs_indexing(
AffineMap::get(rank, input_symbols.size(), input_exprs, mlir_context),
DimVarsFromTensorSizes(output_shape.dimensions()), input_symbols,
{}, input_constraints);
IndexingMap kernel_indexing(
AffineMap::get(rank, kernel_symbols.size(), kernel_exprs, mlir_context),
DimVarsFromTensorSizes(output_shape.dimensions()), kernel_symbols,
{});
return HloInstructionIndexing::FromIndexingMaps(
{inputs_indexing, kernel_indexing});
}
std::vector<int64_t> ComputeStrides(absl::Span<const int64_t> dims) {
int rank = static_cast<int>(dims.size());
std::vector<int64_t> strides(rank, 1);
for (int i = rank - 2; i >= 0; --i) {
strides[i] = dims[i + 1] * strides[i + 1];
}
return strides;
}
}
AffineExpr LinearizeShape(absl::Span<const int64_t> dims,
absl::Span<const AffineExpr> dimension_exprs,
MLIRContext* mlir_context) {
AffineExpr linear_index = getAffineConstantExpr(0, mlir_context);
auto strides = ComputeStrides(dims);
for (auto [stride, dimension_expr] : llvm::zip(strides, dimension_exprs)) {
linear_index = linear_index + dimension_expr * stride;
}
return linear_index;
}
std::vector<AffineExpr> DelinearizeIndex(absl::Span<const int64_t> dims,
AffineExpr linear_index,
MLIRContext* mlir_context) {
std::vector<AffineExpr> multi_index;
multi_index.reserve(dims.size());
AffineExpr remainder = linear_index;
for (int64_t stride : ComputeStrides(dims)) {
multi_index.push_back(remainder.floorDiv(stride));
remainder = remainder % stride;
}
return multi_index;
}
namespace {
void ComputeMinimalReshapeIndexing(
absl::Span<const int64_t> input_dims, absl::Span<const int64_t> output_dims,
absl::Span<const AffineExpr> output_dims_exprs,
std::vector<AffineExpr>* exprs, MLIRContext* mlir_context) {
if (input_dims.size() == 1 && output_dims.size() == 1) {
absl::c_copy(output_dims_exprs, std::back_inserter(*exprs));
return;
}
if (input_dims.size() == 1) {
exprs->push_back(
LinearizeShape(output_dims, output_dims_exprs, mlir_context));
return;
}
if (output_dims.size() == 1) {
auto multi_index =
DelinearizeIndex(input_dims, output_dims_exprs.front(), mlir_context);
absl::c_copy(multi_index, std::back_inserter(*exprs));
return;
}
AffineExpr linear_index =
LinearizeShape(output_dims, output_dims_exprs, mlir_context);
auto multi_index = DelinearizeIndex(input_dims, linear_index, mlir_context);
absl::c_copy(multi_index, std::back_inserter(*exprs));
}
AffineMap ComputeReshapeIndexingMap(const Shape& input, const Shape& output,
MLIRContext* mlir_context) {
absl::Span<const int64_t> input_dims = input.dimensions();
absl::Span<const int64_t> output_dims = output.dimensions();
std::vector<AffineExpr> exprs;
exprs.reserve(input.rank());
if (ShapeUtil::ElementsIn(input) == 0) {
for (int i = 0; i < input.rank(); ++i) {
exprs.push_back(getAffineConstantExpr(0, mlir_context));
}
return AffineMap::get(output_dims.size(), 0, exprs,
mlir_context);
}
std::vector<AffineExpr> output_dims_exprs;
int64_t input_num_elements = 1;
int64_t output_num_elements = 1;
std::vector<int64_t> input_subshape, output_subshape;
size_t input_dim_id = 0, output_dim_id = 0;
while (input_dim_id < input.rank() || output_dim_id < output.rank() ||
!input_subshape.empty()) {
if (input_dim_id < input.rank() &&
(input_subshape.empty() || input_num_elements < output_num_elements ||
input_dims[input_dim_id] == 1)) {
input_num_elements *= input_dims[input_dim_id];
input_subshape.push_back(input_dims[input_dim_id]);
++input_dim_id;
continue;
}
if (output_dim_id < output.rank() &&
(output_subshape.empty() || output_num_elements < input_num_elements ||
output_dims[output_dim_id] == 1)) {
output_num_elements *= output_dims[output_dim_id];
output_subshape.push_back(output_dims[output_dim_id]);
output_dims_exprs.push_back(
getAffineDimExpr(output_dim_id, mlir_context));
++output_dim_id;
continue;
}
ComputeMinimalReshapeIndexing(input_subshape, output_subshape,
output_dims_exprs, &exprs, mlir_context);
input_num_elements = 1;
output_num_elements = 1;
input_subshape.clear();
output_subshape.clear();
output_dims_exprs.clear();
}
return AffineMap::get(output_dims.size(), 0, exprs,
mlir_context);
};
HloInstructionIndexing ComputeOutputToInputReshapeOpIndexing(
const HloReshapeInstruction* reshape, MLIRContext* mlir_context) {
const auto& input = reshape->operand(0)->shape();
const auto& output = reshape->shape();
IndexingMap reshape_indexing_map = IndexingMap::FromTensorSizes(
ComputeReshapeIndexingMap(input, output, mlir_context),
output.dimensions(), {});
reshape_indexing_map.Simplify();
return HloInstructionIndexing::FromIndexingMaps({reshape_indexing_map});
}
HloInstructionIndexing ComputeInputToOutputReshapeOpIndexing(
const HloReshapeInstruction* reshape, MLIRContext* mlir_context) {
const auto& input = reshape->operand(0)->shape();
const auto& output = reshape->shape();
IndexingMap reshape_indexing_map = IndexingMap::FromTensorSizes(
ComputeReshapeIndexingMap(output, input, mlir_context),
input.dimensions(), {});
reshape_indexing_map.Simplify();
return HloInstructionIndexing::FromIndexingMaps({reshape_indexing_map});
}
HloInstructionIndexing ComputeReverseOpIndexing(
const HloReverseInstruction* reverse, MLIRContext* mlir_context) {
absl::flat_hash_set<int64_t> reverse_dims(reverse->dimensions().begin(),
reverse->dimensions().end());
auto output_dims = reverse->shape().dimensions();
std::vector<AffineExpr> exprs;
exprs.reserve(output_dims.size());
for (auto [output_dim_id, output_dim] : llvm::enumerate(output_dims)) {
auto dim_expr = getAffineDimExpr(output_dim_id, mlir_context);
if (!reverse_dims.contains(output_dim_id)) {
exprs.push_back(dim_expr);
continue;
}
exprs.push_back(-dim_expr + output_dim - 1);
}
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(output_dims.size(), 0, exprs,
mlir_context),
output_dims, {});
return HloInstructionIndexing::FromIndexingMaps({indexing_map});
}
HloInstructionIndexing ComputeOutputToInputSliceOpIndexing(
const HloSliceInstruction* slice, MLIRContext* mlir_context) {
auto output_rank = slice->shape().rank();
std::vector<AffineExpr> exprs;
exprs.reserve(output_rank);
for (int64_t dim = 0; dim < output_rank; ++dim) {
AffineExpr dim_expr = getAffineDimExpr(dim, mlir_context);
exprs.push_back(dim_expr * slice->slice_strides()[dim] +
slice->slice_starts()[dim]);
}
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(output_rank, 0, exprs, mlir_context),
slice->shape().dimensions(), {});
return HloInstructionIndexing::FromIndexingMaps({indexing_map});
}
HloInstructionIndexing ComputeInputToOutputSliceOpIndexing(
const HloSliceInstruction* slice, MLIRContext* mlir_context) {
auto output_rank = slice->shape().rank();
std::vector<AffineExpr> exprs;
exprs.reserve(output_rank);
for (int64_t dim = 0; dim < output_rank; ++dim) {
AffineExpr dim_expr = getAffineDimExpr(dim, mlir_context);
exprs.push_back((dim_expr - slice->slice_starts()[dim])
.floorDiv(slice->slice_strides()[dim]));
}
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(output_rank, 0, exprs, mlir_context),
slice->operand(0)->shape().dimensions(), {});
for (int64_t dim = 0; dim < output_rank; ++dim) {
AffineExpr dim_expr = getAffineDimExpr(dim, mlir_context);
int64_t lb = slice->slice_starts()[dim];
int64_t ub =
(slice->shape().dimensions(dim) - 1) * slice->slice_strides()[dim] +
slice->slice_starts()[dim];
indexing_map.AddConstraint(dim_expr, {lb, ub});
indexing_map.AddConstraint((dim_expr - lb) % slice->slice_strides()[dim],
{0, 0});
}
return HloInstructionIndexing::FromIndexingMaps({std::move(indexing_map)});
}
AffineMap ComputeTransposeIndexingMap(absl::Span<const int64_t> permutation,
MLIRContext* mlir_context) {
return AffineMap::getPermutationMap(
std::vector<unsigned>(permutation.begin(), permutation.end()),
mlir_context);
}
HloInstructionIndexing ComputeOutputToInputTransposeOpIndexing(
const HloTransposeInstruction* transpose, MLIRContext* mlir_context) {
AffineMap inverse_permutation = ComputeTransposeIndexingMap(
InversePermutation(transpose->dimensions()), mlir_context);
return HloInstructionIndexing::FromIndexingMaps({IndexingMap::FromTensorSizes(
inverse_permutation, transpose->shape().dimensions(), {})});
}
HloInstructionIndexing ComputeInputToOutputTransposeOpIndexing(
const HloTransposeInstruction* transpose, MLIRContext* mlir_context) {
AffineMap forward_permutation =
ComputeTransposeIndexingMap(transpose->dimensions(), mlir_context);
return HloInstructionIndexing::FromIndexingMaps({IndexingMap::FromTensorSizes(
forward_permutation, transpose->operand(0)->shape().dimensions(), {})});
}
}
IndexingMap GetBitcastMap(absl::Span<const int64_t> input_shape,
const Shape& output_shape,
mlir::MLIRContext* mlir_context) {
return GetBitcastMap(ShapeUtil::MakeShapeWithDescendingLayout(
output_shape.element_type(), input_shape),
output_shape, mlir_context);
}
IndexingMap GetBitcastMap(absl::Span<const int64_t> input_shape,
absl::Span<const int64_t> output_shape,
mlir::MLIRContext* mlir_context) {
return GetBitcastMap(
ShapeUtil::MakeShapeWithDescendingLayout(PrimitiveType::S8, input_shape),
ShapeUtil::MakeShapeWithDescendingLayout(PrimitiveType::S8, output_shape),
mlir_context);
}
IndexingMap GetBitcastMap(const Shape& input_shape, const Shape& output_shape,
MLIRContext* mlir_context) {
ShapeUtil::BitcastDecomposition decomposed_bitcast =
ShapeUtil::DecomposeBitcast(input_shape, output_shape);
if (std::holds_alternative<ShapeUtil::BitcastDecompositionTranspose>(
decomposed_bitcast)) {
auto permutation = ShapeUtil::DeduceTransposeDimensionsForBitcast(
input_shape, output_shape);
CHECK(permutation.has_value())
<< "Failed to deduce permutation for a bitcast.";
return IndexingMap::FromTensorSizes(
ComputeTransposeIndexingMap(permutation.value(), mlir_context),
input_shape.dimensions(), {});
}
if (std::holds_alternative<ShapeUtil::BitcastDecompositionReshape>(
decomposed_bitcast)) {
return IndexingMap::FromTensorSizes(
ComputeReshapeIndexingMap(output_shape, input_shape, mlir_context),
input_shape.dimensions(), {});
}
auto trt = std::get<ShapeUtil::BitcastDecompositionTrt>(decomposed_bitcast);
auto transpose_map_1 =
ComputeTransposeIndexingMap(trt.transpose1_dims, mlir_context);
auto reshape_map = ComputeReshapeIndexingMap(
trt.reshape_shape, trt.transpose1_shape, mlir_context);
auto transpose_map_2 =
ComputeTransposeIndexingMap(trt.transpose2_dims, mlir_context);
auto bitcast_map =
transpose_map_2.compose(reshape_map).compose(transpose_map_1);
return IndexingMap::FromTensorSizes(bitcast_map, input_shape.dimensions(),
{});
}
namespace {
HloInstructionIndexing ComputeOutputToInputBitcastOpIndexing(
const HloInstruction* bitcast, MLIRContext* mlir_context) {
auto bitcast_map = GetBitcastMap(bitcast->shape(),
bitcast->operand(0)->shape(), mlir_context);
bitcast_map.Simplify();
return HloInstructionIndexing::FromIndexingMaps({bitcast_map});
}
HloInstructionIndexing ComputeInputToOutputBitcastOpIndexing(
const HloInstruction* bitcast, MLIRContext* mlir_context) {
auto bitcast_map = GetBitcastMap(bitcast->operand(0)->shape(),
bitcast->shape(), mlir_context);
bitcast_map.Simplify();
return HloInstructionIndexing::FromIndexingMaps({bitcast_map});
}
std::vector<int64_t> ToTransposeDimensions(const Layout& l) {
std::vector<int64_t> out(l.minor_to_major().begin(),
l.minor_to_major().end());
absl::c_reverse(out);
return out;
}
}
IndexingMap CreateIdentityMap(absl::Span<const int64_t> dimensions,
mlir::MLIRContext* mlir_context) {
return IndexingMap::FromTensorSizes(
AffineMap::getMultiDimIdentityMap(dimensions.size(), mlir_context),
dimensions, {});
}
IndexingMap CreateIdentityMap(const Shape& shape, MLIRContext* mlir_context) {
if (shape.IsTuple()) {
return CreateIdentityMap(shape.tuple_shapes(0), mlir_context);
}
return CreateIdentityMap(shape.dimensions(), mlir_context);
}
llvm::SmallVector<AffineExpr, 4> DelinearizeInBoundsIndex(
AffineExpr linear, absl::Span<const int64_t> sizes) {
llvm::SmallVector<AffineExpr, 4> result;
result.reserve(sizes.size());
if (absl::c_linear_search(sizes, 0)) {
for (int dim = 0; dim < sizes.size(); ++dim) {
result.push_back(mlir::getAffineConstantExpr(0, linear.getContext()));
}
return result;
}
auto strides = ComputeStrides(sizes);
for (auto [size, stride] : llvm::zip(sizes, strides)) {
result.push_back(linear.floorDiv(stride) % size);
}
for (int dim = 0; dim < sizes.size(); ++dim) {
if (sizes[dim] > 1) {
result[dim] = linear.floorDiv(strides[dim]);
break;
}
}
return result;
}
IndexingMap GetIndexingMapFromPhysicalLayoutToLogical(
const Shape& shape, MLIRContext* mlir_context) {
if (shape.rank() == 0) {
return IndexingMap(AffineMap::get(mlir_context),
{}, {}, {});
}
return IndexingMap::FromTensorSizes(
ComputeTransposeIndexingMap(
InversePermutation(ToTransposeDimensions(shape.layout())),
mlir_context),
ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(shape)
.dimensions(),
{});
}
IndexingMap GetIndexingMapFromLogicalToPhysicalLayout(
const Shape& shape, MLIRContext* mlir_context) {
if (shape.rank() == 0) {
return IndexingMap(AffineMap::get(mlir_context),
{}, {}, {});
}
return IndexingMap::FromTensorSizes(
ComputeTransposeIndexingMap(ToTransposeDimensions(shape.layout()),
mlir_context),
shape.dimensions(), {});
}
bool HloInstructionIndexing::Simplify() {
bool any_simplified = false;
for (auto& operand_indexing : indexing_maps) {
std::vector<IndexingMap> to_remove, to_add;
for (IndexingMap map : operand_indexing) {
to_remove.push_back(map);
if (map.IsUndefined()) {
to_add.push_back(map);
} else if (map.Simplify()) {
map.RemoveUnusedSymbols();
} else {
to_remove.pop_back();
}
}
for (auto& map : to_remove) {
operand_indexing.erase(map);
}
for (auto& map : to_add) {
operand_indexing.insert(map);
}
any_simplified |= !to_remove.empty();
}
return any_simplified;
}
HloInstructionIndexing HloInstructionIndexing::FromIndexingMaps(
absl::Span<const IndexingMap> indexing_maps) {
HloInstructionIndexing instr_indexing;
instr_indexing.indexing_maps.resize(indexing_maps.size());
for (const auto& [index, map] : llvm::enumerate(indexing_maps)) {
instr_indexing.indexing_maps[index].insert(map);
}
return instr_indexing;
}
std::string HloInstructionIndexing::ToString() const {
std::stringstream ss;
ss << *this;
return ss.str();
}
std::ostream& operator<<(std::ostream& out,
const HloInstructionIndexing& instr_indexing) {
for (const auto& [operand_id, indexing_maps] :
llvm::enumerate(instr_indexing.indexing_maps)) {
out << "operand id = " << operand_id << ' ';
for (const auto& indexing_map : indexing_maps) {
if (indexing_map.IsUndefined()) {
out << "unknown indexing";
continue;
}
out << indexing_map;
}
}
return out;
}
const Shape& GetOutputShape(const HloInstruction* instr, int64_t output_id) {
return instr->shape().IsTuple()
? ShapeUtil::GetSubshape(instr->shape(), {output_id})
: instr->shape();
}
GroupedByOpIndexingMap GroupIndexingMapsByProducers(
const HloInstructionIndexing& indexing, const HloInstruction* instr) {
GroupedByOpIndexingMap result;
for (const auto& [operand_id, indexing_maps] :
llvm::enumerate(indexing.indexing_maps)) {
result[instr->operand(operand_id)].insert(indexing_maps.begin(),
indexing_maps.end());
}
return result;
}
GroupedByOpIndexingMap ComputeGroupedOutputToInputIndexing(
const HloFusionAdaptor& fusion_adaptor, HloInstructionAdaptor target_instr,
MLIRContext* ctx) {
auto initial_map = CreateIdentityMap(target_instr.instruction().shape(), ctx);
GroupedByOpIndexingMap grouped_indexing_maps;
if (fusion_adaptor.ContainsInstruction(target_instr)) {
if (auto parameter_instr =
DynCast<HloParameterInstruction>(&target_instr.instruction())) {
auto fusion_instr = parameter_instr->parent()->FusionInstruction();
auto fusion_operand =
fusion_instr->operand(parameter_instr->parameter_number());
grouped_indexing_maps[fusion_operand] = {initial_map};
return grouped_indexing_maps;
}
}
grouped_indexing_maps[&target_instr.instruction()].insert(initial_map);
auto post_order = fusion_adaptor.MakeInstructionPostOrder();
auto it = std::find(post_order.rbegin(), post_order.rend(), target_instr);
for (; it != post_order.rend(); ++it) {
auto producer_indexing = ComputeOutputToInputIndexing(&it->instruction(),
0, ctx);
auto consumer_indexing_maps =
grouped_indexing_maps.find(&it->instruction());
if (consumer_indexing_maps == grouped_indexing_maps.end()) {
continue;
}
IndexingMapSet consumer_indexing_maps_copy = consumer_indexing_maps->second;
for (const auto& [producer_operand_id, producer_operand_indexing] :
llvm::enumerate(producer_indexing.indexing_maps)) {
auto producer_operand_adaptor = it->GetOperand(producer_operand_id);
for (const IndexingMap& producer_map : producer_operand_indexing) {
for (const IndexingMap& consumer_map : consumer_indexing_maps_copy) {
auto composed_map = ComposeIndexingMaps(consumer_map, producer_map);
composed_map.Simplify();
composed_map.RemoveUnusedSymbols();
grouped_indexing_maps[&producer_operand_adaptor.instruction()].insert(
composed_map);
}
}
}
}
return grouped_indexing_maps;
}
bool FuseProducerConsumerOutputToInputIndexing(
const HloInstruction* producer_instr,
absl::flat_hash_map<const HloInstruction*, IndexingMapSet>*
consumer_indexing,
MLIRContext* mlir_context) {
auto producer_indexing = ComputeOutputToInputIndexing(
producer_instr, 0, mlir_context);
auto consumer_indexing_maps = (*consumer_indexing)[producer_instr];
for (const auto& [producer_operand_id, producer_operand_indexing] :
llvm::enumerate(producer_indexing.indexing_maps)) {
const HloInstruction* producer_operand_instr =
producer_instr->operand(producer_operand_id);
for (const IndexingMap& producer_map : producer_operand_indexing) {
for (const IndexingMap& consumer_map : consumer_indexing_maps) {
(*consumer_indexing)[producer_operand_instr].insert(
ComposeIndexingMaps(producer_map, consumer_map));
}
}
}
consumer_indexing->erase(producer_instr);
return true;
}
HloInstructionIndexing ComputeOutputToInputIndexing(const HloInstruction* instr,
int output_id,
MLIRContext* ctx) {
if (HloInstruction::IsOpElementwise(instr->opcode()) ||
instr->opcode() == HloOpcode::kMap) {
return ComputeOutputToInputCwiseOpIndexing(instr, ctx);
}
if (instr->opcode() == HloOpcode::kBitcast) {
return ComputeOutputToInputBitcastOpIndexing(instr, ctx);
}
if (auto broadcast = DynCast<HloBroadcastInstruction>(instr)) {
return ComputeOutputToInputBroadcastOpIndexing(broadcast, ctx);
}
if (auto concat = DynCast<HloConcatenateInstruction>(instr)) {
return ComputeOutputToInputConcatenateOpIndexing(concat, ctx);
}
if (auto constant = DynCast<HloConstantInstruction>(instr)) {
return HloInstructionIndexing{};
}
if (auto dot = DynCast<HloDotInstruction>(instr)) {
return ComputeOutputToInputDotOpIndexing(dot, ctx);
}
if (auto dynamic_slice = DynCast<HloDynamicSliceInstruction>(instr)) {
return ComputeOutputToInputDynamicSliceOpIndexing(dynamic_slice, ctx);
}
if (auto dus = DynCast<HloDynamicUpdateSliceInstruction>(instr)) {
return ComputeOutputToInputDynamicUpdateSliceOpIndexing(dus, ctx);
}
if (auto fusion = DynCast<HloFusionInstruction>(instr)) {
return ComputeOutputToInputFusionOpIndexing(fusion, output_id, ctx);
}
if (auto gather = DynCast<HloGatherInstruction>(instr)) {
return ComputeOutputToInputGatherOpIndexing(gather, ctx);
}
if (auto iota = DynCast<HloIotaInstruction>(instr)) {
return HloInstructionIndexing{};
}
if (auto pad = DynCast<HloPadInstruction>(instr)) {
return ComputeOutputToInputPadOpIndexing(pad, ctx);
}
if (auto reduce = DynCast<HloReduceInstruction>(instr)) {
return ComputeOutputToInputReduceOpIndexing(reduce, output_id, ctx);
}
if (auto reduce_window = DynCast<HloReduceWindowInstruction>(instr)) {
return ComputeOutputToInputReduceWindowOpIndexing(reduce_window, output_id,
ctx);
}
if (auto convolution = DynCast<HloConvolutionInstruction>(instr)) {
return ComputeOutputToInputConvolutionOpIndexing(convolution, ctx);
}
if (auto reshape = DynCast<HloReshapeInstruction>(instr)) {
return ComputeOutputToInputReshapeOpIndexing(reshape, ctx);
}
if (auto reverse = DynCast<HloReverseInstruction>(instr)) {
return ComputeReverseOpIndexing(reverse, ctx);
}
if (auto slice = DynCast<HloSliceInstruction>(instr)) {
return ComputeOutputToInputSliceOpIndexing(slice, ctx);
}
if (auto transpose = DynCast<HloTransposeInstruction>(instr)) {
return ComputeOutputToInputTransposeOpIndexing(transpose, ctx);
}
return CreateUnknownIndexing(instr->operand_count());
}
HloInstructionIndexing ComputeInputToOutputIndexing(const HloInstruction* instr,
int input_id,
MLIRContext* ctx) {
if (HloInstruction::IsOpElementwise(instr->opcode()) ||
instr->opcode() == HloOpcode::kMap) {
return ComputeInputToOutputCwiseOpIndexing(instr, ctx);
}
if (instr->opcode() == HloOpcode::kBitcast) {
return ComputeInputToOutputBitcastOpIndexing(instr, ctx);
}
if (auto broadcast = DynCast<HloBroadcastInstruction>(instr)) {
return ComputeInputToOutputBroadcastOpIndexing(broadcast, ctx);
}
if (auto concat = DynCast<HloConcatenateInstruction>(instr)) {
return ComputeInputToOutputConcatenateOpIndexing(concat, input_id, ctx);
}
if (auto reduce = DynCast<HloReduceInstruction>(instr)) {
return ComputeInputToOutputReduceOpIndexing(reduce, input_id, ctx);
}
if (auto reshape = DynCast<HloReshapeInstruction>(instr)) {
return ComputeInputToOutputReshapeOpIndexing(reshape, ctx);
}
if (auto reverse = DynCast<HloReverseInstruction>(instr)) {
return ComputeReverseOpIndexing(reverse, ctx);
}
if (auto transpose = DynCast<HloTransposeInstruction>(instr)) {
return ComputeInputToOutputTransposeOpIndexing(transpose, ctx);
}
if (auto slice = DynCast<HloSliceInstruction>(instr)) {
return ComputeInputToOutputSliceOpIndexing(slice, ctx);
}
if (instr->opcode() == HloOpcode::kTuple) {
return HloInstructionIndexing::FromIndexingMaps(
{CreateIdentityMap(instr->shape().tuple_shapes(input_id), ctx)});
}
int64_t num_results =
instr->shape().IsTuple() ? instr->shape().tuple_shapes_size() : 1;
return CreateUnknownIndexing(num_results);
}
IndexingMap ComputeEpilogueInputToOutputIndexing(
HloInstructionAdaptor epilogue_parent, HloInstructionAdaptor epilogue_root,
MLIRContext* mlir_context) {
auto chain = HloFindUseChain(epilogue_parent, epilogue_root);
CHECK(!chain.empty()) << "There is no use chain from parent to root";
auto root_indexing = CreateIdentityMap(epilogue_parent.shape(), mlir_context);
for (int i = 1; i < chain.size(); ++i) {
const auto& producer = chain[i - 1].instruction();
const auto& user = chain[i].instruction();
auto user_indexing = ComputeInputToOutputIndexing(
&user, user.operand_index(&producer), mlir_context);
root_indexing = root_indexing * *user_indexing.indexing_maps[0].begin();
root_indexing.Simplify();
root_indexing.RemoveUnusedSymbols();
}
return root_indexing;
}
}
} | #include "xla/service/gpu/model/indexing_analysis.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/indexing_map_serialization.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::ExplainMatchResult;
using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
MATCHER_P2(MatchInstrIndexing, operand_id, indexing_map_matchers, "") {
return ExplainMatchResult(Eq(operand_id), arg.operand_id, result_listener) &&
ExplainMatchResult(indexing_map_matchers, arg.indexing_maps,
result_listener);
}
using IndexingAnalysisTest = IndexingTestBase;
TEST_F(IndexingAnalysisTest, FuseProducerConsumerOutputToInputIndexing) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1000, 1000] parameter(0)
transpose_p0 = f32[1000, 1000]{0, 1} transpose(p0), dimensions={1, 0}
ROOT a0 = f32[1000, 1000] add(p0, transpose_p0)
}
)");
const HloInstruction* parameter = root->operand(0);
const HloInstruction* transpose = root->operand(1);
auto root_indexing = GetOutputToInputIndexing(root);
auto grouped_by_key = GroupIndexingMapsByProducers(root_indexing, root);
EXPECT_THAT(
grouped_by_key,
UnorderedElementsAre(Pair(parameter, ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 999],
d1 in [0, 999]
)"))),
Pair(transpose, ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 999],
d1 in [0, 999]
)")))));
}
TEST_F(IndexingAnalysisTest, ComputeGroupedOutputToInputIndexing) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1000, 1000] parameter(0)
transpose_p0 = f32[1000, 1000]{0, 1} transpose(p0), dimensions={1, 0}
ROOT a0 = f32[1000, 1000] add(p0, transpose_p0)
}
)");
const HloInstruction* parameter = root->operand(0);
const HloInstruction* transpose = root->operand(1);
auto fusion_adaptor = HloFusionAdaptor::ForProducerConsumer(transpose, root);
auto grouped_indexing = ComputeGroupedOutputToInputIndexing(
*fusion_adaptor, fusion_adaptor->GetRoots()[0], &mlir_context_);
EXPECT_THAT(grouped_indexing,
UnorderedElementsAre(
Pair(root, ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 999],
d1 in [0, 999]
)"))),
Pair(transpose, ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 999],
d1 in [0, 999]
)"))),
Pair(parameter, UnorderedElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 999],
d1 in [0, 999]
)"),
MatchIndexingMap(R"(
(d0, d1) -> (d1, d0),
domain:
d0 in [0, 999],
d1 in [0, 999]
)")))));
}
TEST_F(IndexingAnalysisTest,
ComputeGroupedOutputToInputIndexing_VariadicReduce) {
auto root = ParseAndGetRoot(R"(
HloModule m
add {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
param_2 = f32[] parameter(2)
param_3 = f32[] parameter(3)
add.0 = f32[] add(param_0, param_2)
add.1 = f32[] add(param_1, param_3)
ROOT t = (f32[], f32[]) tuple(add.0, add.1)
}
ENTRY entry_computation {
param_0.3 = f32[32,40]{1,0} parameter(0)
param_1.3 = f32[32,40]{1,0} parameter(1)
param_2.2 = f32[] parameter(2)
constant = f32[] constant(0)
ROOT reduce = (f32[32]{0}, f32[32]{0})
reduce(param_0.3, param_1.3, param_2.2, constant),
dimensions={1}, to_apply=add
}
)");
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(root);
auto grouped_indexing = ComputeGroupedOutputToInputIndexing(
*fusion_adaptor, fusion_adaptor->GetRoots()[0], &mlir_context_);
EXPECT_THAT(grouped_indexing,
UnorderedElementsAre(
Pair(root, ElementsAre(MatchIndexingMap(R"(
(d0) -> (d0),
domain:
d0 in [0, 31]
)"))),
Pair(root->operand(0), ElementsAre(MatchIndexingMap(R"(
(d0)[s0] -> (d0, s0),
domain:
d0 in [0, 31],
s0 in [0, 39]
)"))),
Pair(root->operand(1), ElementsAre(MatchIndexingMap(R"(
(d0)[s0] -> (d0, s0),
domain:
d0 in [0, 31],
s0 in [0, 39]
)"))),
Pair(root->operand(2), ElementsAre(MatchIndexingMap(R"(
(d0) -> (),
domain:
d0 in [0, 31]
)"))),
Pair(root->operand(3), ElementsAre(MatchIndexingMap(R"(
(d0) -> (),
domain:
d0 in [0, 31]
)")))));
}
TEST_F(IndexingAnalysisTest, ComputeGroupedOutputToInputIndexing_SingleOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1000, 1000] parameter(0)
p1 = f32[1000, 1000] parameter(1)
exp0 = f32[1000, 1000] exponential(p1)
ROOT a0 = f32[1000, 1000] add(p0, exp0)
}
)");
HloComputation* entry_computation = root->parent();
const HloInstruction* exponential =
entry_computation->GetInstructionWithName("exp0");
const HloInstruction* parameter =
entry_computation->GetInstructionWithName("p1");
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(exponential);
HloInstructionAdaptor parameter_adaptor =
fusion_adaptor->GetRoots()[0].GetOperand(0);
auto grouped_indexing = ComputeGroupedOutputToInputIndexing(
*fusion_adaptor, parameter_adaptor, &mlir_context_);
EXPECT_THAT(grouped_indexing, UnorderedElementsAre(Pair(
parameter, ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 999],
d1 in [0, 999]
)")))));
}
TEST_F(IndexingAnalysisTest,
ComputeGroupedOutputToInputIndexing_StartNotAtRoot) {
auto root = ParseAndGetRoot(R"(
HloModule m
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
f {
p0 = f32[15, 20] parameter(0)
p0_init = f32[] parameter(1)
p0_bcast = f32[15, 32, 20, 64] broadcast(p0), dimensions={0, 2}
ROOT reduce_2 = f32[15, 64] reduce(p0_bcast, p0_init),
dimensions={1, 2}, to_apply=max
}
ENTRY e {
p0 = f32[15, 20] parameter(0)
p0_init = f32[] constant(-inf)
ROOT fusion = f32[15, 64] fusion(p0, p0_init), kind=kLoop, calls=f
}
)");
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(root);
auto root_adaptor = fusion_adaptor->GetRoots()[0];
auto bcast = root_adaptor.GetOperand(0);
auto parameter_0 = bcast.GetOperand(0);
auto grouped_indexing = ComputeGroupedOutputToInputIndexing(
*fusion_adaptor, bcast, &mlir_context_);
EXPECT_THAT(
grouped_indexing,
UnorderedElementsAre(
Pair(&bcast.instruction(), ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2, d3) -> (d0, d1, d2, d3),
domain:
d0 in [0, 14],
d1 in [0, 31],
d2 in [0, 19],
d3 in [0, 63]
)"))),
Pair(¶meter_0.instruction(), ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2, d3) -> (d0, d2),
domain:
d0 in [0, 14],
d1 in [0, 31],
d2 in [0, 19],
d3 in [0, 63]
)")))));
}
TEST_F(IndexingAnalysisTest, PhysicalLayoutTestOutputPermutation) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[10, 20, 30] parameter(0)
ROOT add0 = f32[10, 20, 30]{1, 0, 2} exponential(p0)
}
)");
auto input_indexing = GetOutputToInputIndexing(root, 0,
true);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d1, d2, d0),
domain:
d0 in [0, 29],
d1 in [0, 9],
d2 in [0, 19]
)"));
auto output_indexing = GetInputToOutputIndexing(root, 0,
true);
EXPECT_THAT(output_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d2, d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 19],
d2 in [0, 29]
)"));
}
TEST_F(IndexingAnalysisTest, CopyNothing) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[0, 0]{0,1} parameter(0)
ROOT copy0 = f32[0, 0]{1,0} copy(p0)
}
)");
auto input_indexing = GetOutputToInputIndexing(root, 0);
input_indexing.Simplify();
EXPECT_THAT(input_indexing.ToString(),
MatchIndexingString("operand id = 0 KNOWN EMPTY"));
auto output_indexing = GetInputToOutputIndexing(root, 0);
output_indexing.Simplify();
EXPECT_THAT(output_indexing.ToString(),
MatchIndexingString("operand id = 0 KNOWN EMPTY"));
}
TEST_F(IndexingAnalysisTest, ReshapeNothing) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1,0,0] parameter(0)
ROOT reshape = f32[0] reshape(p0)
}
)");
auto input_indexing = GetOutputToInputIndexing(root, 0);
input_indexing.Simplify();
EXPECT_THAT(input_indexing.ToString(),
MatchIndexingString("operand id = 0 KNOWN EMPTY"));
auto output_indexing = GetInputToOutputIndexing(root, 0);
output_indexing.Simplify();
EXPECT_THAT(output_indexing.ToString(),
MatchIndexingString("operand id = 0 KNOWN EMPTY"));
EXPECT_EQ(
output_indexing.indexing_maps[0].begin()->GetAffineMap().getNumResults(),
1);
}
TEST_F(IndexingAnalysisTest, PhysicalLayoutTestInputPermutation) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[10, 20, 30]{1, 0, 2} parameter(0)
ROOT add0 = f32[10, 20, 30] exponential(p0)
}
)");
auto input_indexing = GetOutputToInputIndexing(root, 0,
true);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d2, d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 19],
d2 in [0, 29]
)"));
auto output_indexing = GetInputToOutputIndexing(root, 0,
true);
EXPECT_THAT(output_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d1, d2, d0),
domain:
d0 in [0, 29],
d1 in [0, 9],
d2 in [0, 19]
)"));
}
TEST_F(IndexingAnalysisTest, PhysicalLayoutTestInputAndOutputPermutation) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[10, 20, 30]{1, 0, 2} parameter(0)
ROOT add0 = f32[10, 20, 30]{1, 0, 2} exponential(p0)
}
)");
auto input_indexing = GetOutputToInputIndexing(root, 0,
true);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0, d1, d2),
domain:
d0 in [0, 29],
d1 in [0, 9],
d2 in [0, 19]
)"));
auto output_indexing = GetInputToOutputIndexing(root, 0,
true);
EXPECT_THAT(output_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0, d1, d2),
domain:
d0 in [0, 29],
d1 in [0, 9],
d2 in [0, 19]
)"));
}
TEST_F(IndexingAnalysisTest, ElementwiseOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[10, 20] parameter(0)
p1 = f32[10, 20] parameter(1)
ROOT add0 = f32[10, 20] add(p0, p1)
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 19]
operand id = 1
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 19]
)"));
auto output_indexing_0 = GetInputToOutputIndexing(root, 0);
EXPECT_THAT(output_indexing_0.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 19]
)"));
auto output_indexing_1 = GetInputToOutputIndexing(root, 1);
EXPECT_THAT(output_indexing_1.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 19]
)"));
}
TEST_F(IndexingAnalysisTest, Map) {
auto root = ParseAndGetRoot(R"(
HloModule m
mapper {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY e {
p0 = f32[10, 20] parameter(0)
p1 = f32[10, 20] parameter(1)
ROOT add0 = f32[10, 20] map(%p0, %p1), dimensions={}, to_apply=mapper
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 19]
operand id = 1
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 19]
)"));
auto output_indexing_0 = GetInputToOutputIndexing(root, 0);
EXPECT_THAT(output_indexing_0.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 19]
)"));
auto output_indexing_1 = GetInputToOutputIndexing(root, 1);
EXPECT_THAT(output_indexing_1.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 19]
)"));
}
TEST_F(IndexingAnalysisTest, BitcastIsReshape) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[4, 32] parameter(0)
ROOT bitcast = f32[4, 8, 4] bitcast(p0)
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0, d1 * 4 + d2),
domain:
d0 in [0, 3],
d1 in [0, 7],
d2 in [0, 3]
)"));
}
TEST_F(IndexingAnalysisTest, BitcastIsTranspose) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[3, 12288, 6, 128] parameter(0)
ROOT bitcast = f32[3, 6, 128, 12288] {2, 1, 3, 0} bitcast(p0)
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3) -> (d0, d3, d1, d2),
domain:
d0 in [0, 2],
d1 in [0, 5],
d2 in [0, 127],
d3 in [0, 12287]
)"));
}
TEST_F(IndexingAnalysisTest, BitcastIsTransposeReshapeTranspose) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[16, 17, 3] parameter(0)
ROOT bitcast = f32[51, 16] {0, 1} bitcast(p0)
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d1, d0 floordiv 3, d0 mod 3),
domain:
d0 in [0, 50],
d1 in [0, 15]
)"));
auto output_indexing = GetInputToOutputIndexing(root);
EXPECT_THAT(output_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d1 * 3 + d2, d0),
domain:
d0 in [0, 15],
d1 in [0, 16],
d2 in [0, 2]
)"));
}
TEST_F(IndexingAnalysisTest, BroadcastOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[20] parameter(0)
ROOT bc0 = f32[10, 20, 30] broadcast(p0), dimensions={1}
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d1),
domain:
d0 in [0, 9],
d1 in [0, 19],
d2 in [0, 29]
)"));
auto output_indexing = GetInputToOutputIndexing(root);
EXPECT_THAT(output_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0)[s0, s1] -> (s0, d0, s1),
domain:
d0 in [0, 19],
s0 in [0, 9],
s1 in [0, 29]
)"));
}
TEST_F(IndexingAnalysisTest, ConstantOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
ROOT c1 = bf16[17, 22] constant(1)
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), IsEmpty());
}
TEST_F(IndexingAnalysisTest, ConcatenateOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[2, 5, 7] parameter(0)
p1 = f32[2, 11, 7] parameter(1)
p2 = f32[2, 17, 7] parameter(2)
ROOT concat = f32[2, 33, 7] concatenate(
f32[2, 5, 7] p0, f32[2, 11, 7] p1, f32[2, 17, 7] p2), dimensions={1}
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0, d1, d2),
domain:
d0 in [0, 1],
d1 in [0, 4],
d2 in [0, 6]
operand id = 1
(d0, d1, d2) -> (d0, d1 - 5, d2),
domain:
d0 in [0, 1],
d1 in [5, 15],
d2 in [0, 6]
operand id = 2
(d0, d1, d2) -> (d0, d1 - 16, d2),
domain:
d0 in [0, 1],
d1 in [16, 32],
d2 in [0, 6]
)"));
auto output_indexing_0 = GetInputToOutputIndexing(root, 0);
EXPECT_THAT(output_indexing_0.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0, d1, d2),
domain:
d0 in [0, 1],
d1 in [0, 4],
d2 in [0, 6]
)"));
auto output_indexing_1 = GetInputToOutputIndexing(root, 1);
EXPECT_THAT(output_indexing_1.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0, d1 + 5, d2),
domain:
d0 in [0, 1],
d1 in [0, 10],
d2 in [0, 6]
)"));
auto output_indexing_2 = GetInputToOutputIndexing(root, 2);
EXPECT_THAT(output_indexing_2.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0, d1 + 16, d2),
domain:
d0 in [0, 1],
d1 in [0, 16],
d2 in [0, 6]
)"));
}
TEST_F(IndexingAnalysisTest, DynamicSliceOp) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
%src = s32[2,2,258] parameter(0)
%of1 = s32[] parameter(1)
%of2 = s32[] parameter(2)
%of3 = s32[] parameter(3)
ROOT %ds = s32[1,2,32] dynamic-slice(s32[2,2,258] %src,
s32[] %of1, s32[] %of2, s32[] %of3),
dynamic_slice_sizes={1, 2, 32}
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2)[rt0, rt1, rt2] -> (d0 + rt0, d1 + rt1, d2 + rt2),
domain:
d0 in [0, 0],
d1 in [0, 1],
d2 in [0, 31],
rt0 in [0, 1],
rt1 in [0, 0],
rt2 in [0, 226]
operand id = 1
(d0, d1, d2) -> (),
domain:
d0 in [0, 0],
d1 in [0, 1],
d2 in [0, 31]
operand id = 2
(d0, d1, d2) -> (),
domain:
d0 in [0, 0],
d1 in [0, 1],
d2 in [0, 31]
operand id = 3
(d0, d1, d2) -> (),
domain:
d0 in [0, 0],
d1 in [0, 1],
d2 in [0, 31]
)"));
}
TEST_F(IndexingAnalysisTest, DynamicUpdateSliceOp) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
%src = s32[20,30] parameter(0)
%upd = s32[5,10] parameter(1)
%of1 = s32[] parameter(2)
%of2 = s32[] parameter(3)
ROOT %dus = s32[20,30] dynamic-update-slice(
s32[20,30] %src, s32[5,10] %upd, s32[] %of1, s32[] %of2)
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 19],
d1 in [0, 29]
operand id = 1
(d0, d1)[rt0, rt1] -> (d0 - rt0, d1 - rt1),
domain:
d0 in [0, 19],
d1 in [0, 29],
rt0 in [0, 15],
rt1 in [0, 20]
operand id = 2
(d0, d1) -> (),
domain:
d0 in [0, 19],
d1 in [0, 29]
operand id = 3
(d0, d1) -> (),
domain:
d0 in [0, 19],
d1 in [0, 29]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithSingleBinaryOp) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
p0 = f32[100] parameter(0)
p1 = f32[100] parameter(1)
ROOT a0 = f32[100] add(p0, p1)
}
ENTRY e {
p0 = f32[100] parameter(0)
p1 = f32[100] parameter(1)
ROOT fusion = f32[100] fusion(p0, p1), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0) -> (d0),
domain:
d0 in [0, 99]
operand id = 1
(d0) -> (d0),
domain:
d0 in [0, 99]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithDot) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
f {
p0 = s8[3,12288,6,128]{3,2,1,0} parameter(0)
bitcast1 = s8[3,6,128,12288]{2,1,3,0} bitcast(p0)
copy1 = s8[3,6,128,12288]{3,2,1,0} copy(bitcast1)
bitcast2 = s8[2304,12288]{1,0} bitcast(copy1)
convert1 = bf16[2304,12288]{1,0} convert(bitcast2)
bitcast3 = bf16[2304,16,768]{2,1,0} bitcast(convert1)
p3 = bf16[16,12288]{1,0} parameter(3)
convert2 = f32[16,12288]{1,0} convert(p3)
p4 = bf16[16,12288]{1,0} parameter(4)
convert3 = f32[16,12288]{1,0} convert(p4)
add1 = f32[16,12288]{1,0} add(convert2, convert3)
p2 = bf16[16]{0} parameter(2)
convert15 = f32[16]{0} convert(p2)
rsqrt = f32[16]{0} rsqrt(convert15)
convert4 = bf16[16]{0} convert(rsqrt)
bcast1 = bf16[16,12288]{1,0} broadcast(convert4), dimensions={0}
convert5 = f32[16,12288]{1,0} convert(bcast1)
multiply1 = f32[16,12288]{1,0} multiply(add1, convert5)
p1 = bf16[12288]{0} parameter(1)
convert6 = f32[12288]{0} convert(p1)
c1 = bf16[] constant(1)
bcast2 = bf16[12288]{0} broadcast(c1), dimensions={}
convert7 = f32[12288]{0} convert(bcast2)
add2 = f32[12288]{0} add(convert6, convert7)
convert8 = bf16[12288]{0} convert(add2)
bcast3 = bf16[16,12288]{1,0} broadcast(convert8), dimensions={1}
convert9 = f32[16,12288]{1,0} convert(bcast3)
multiply2 = f32[16,12288]{1,0} multiply(multiply1, convert9)
convert10 = bf16[16,12288]{1,0} convert(multiply2)
bcast4 = bf16[16,16,768]{2,1,0} bitcast(convert10)
dot = bf16[16,2304,16]{2,1,0} dot(bitcast3, bcast4),
lhs_batch_dims={1}, lhs_contracting_dims={2},
rhs_batch_dims={1}, rhs_contracting_dims={2}
bcast5 = bf16[16,3,6,128,16]{4,3,2,1,0} bitcast(dot)
copy2 = bf16[16,3,6,128,16]{3,2,4,1,0} copy(bcast5)
convert13 = f32[16,3,6,128,16]{3,2,4,1,0} convert(copy2)
p5 = bf16[3,6,128]{2,1,0} parameter(5)
bcast6 = bf16[3,6,128,16]{2,1,3,0} broadcast(p5), dimensions={0,1,2}
convert11 = f32[3,6,128,16]{2,1,3,0} convert(bcast6)
bcast7 = f32[16,3,6,128,16]{3,2,4,1,0} broadcast(convert11),
dimensions={1,2,3,4}
multiply3 = f32[16,3,6,128,16]{3,2,4,1,0} multiply(convert13, bcast7)
convert12 = bf16[16,3,6,128,16]{3,2,4,1,0} convert(multiply3)
ROOT bcast8 = bf16[16,16,3,1,6,128]{5,4,1,3,2,0} bitcast(convert12)
}
ENTRY e {
p0 = s8[3,12288,6,128]{3,2,1,0} parameter(0)
p1 = bf16[12288]{0} parameter(1)
p2 = bf16[16]{0} parameter(2)
p3 = bf16[16,12288]{1,0} parameter(3)
p4 = bf16[16,12288]{1,0} parameter(4)
p5 = bf16[3,6,128]{2,1,0} parameter(5)
ROOT fusion = bf16[16,16,3,1,6,128]{5,4,1,3,2,0}
fusion(p0, p1, p2, p3, p4, p5), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3, d4, d5)[s0] -> (d2, d0 * 768 + s0, d4, d5),
domain:
d0 in [0, 15],
d1 in [0, 15],
d2 in [0, 2],
d3 in [0, 0],
d4 in [0, 5],
d5 in [0, 127],
s0 in [0, 767]
operand id = 1
(d0, d1, d2, d3, d4, d5)[s0] -> (d0 * 768 + s0),
domain:
d0 in [0, 15],
d1 in [0, 15],
d2 in [0, 2],
d3 in [0, 0],
d4 in [0, 5],
d5 in [0, 127],
s0 in [0, 767]
operand id = 2
(d0, d1, d2, d3, d4, d5) -> (d1),
domain:
d0 in [0, 15],
d1 in [0, 15],
d2 in [0, 2],
d3 in [0, 0],
d4 in [0, 5],
d5 in [0, 127]
operand id = 3
(d0, d1, d2, d3, d4, d5)[s0] -> (d1, d0 * 768 + s0),
domain:
d0 in [0, 15],
d1 in [0, 15],
d2 in [0, 2],
d3 in [0, 0],
d4 in [0, 5],
d5 in [0, 127],
s0 in [0, 767]
operand id = 4
(d0, d1, d2, d3, d4, d5)[s0] -> (d1, d0 * 768 + s0),
domain:
d0 in [0, 15],
d1 in [0, 15],
d2 in [0, 2],
d3 in [0, 0],
d4 in [0, 5],
d5 in [0, 127],
s0 in [0, 767]
operand id = 5
(d0, d1, d2, d3, d4, d5) -> (d2, d4, d5),
domain:
d0 in [0, 15],
d1 in [0, 15],
d2 in [0, 2],
d3 in [0, 0],
d4 in [0, 5],
d5 in [0, 127]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithSoftmax) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
add_computation {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
max_computation {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
softmax {
p0 = f32[2,65,125]{2,1,0} parameter(0)
bitcast0 = f32[65,2,125]{2,1,0} bitcast(p0)
constant_neg_inf_1 = f32[] constant(-inf)
reduce0 = f32[2,65]{1,0} reduce(p0, constant_neg_inf_1),
dimensions={2}, to_apply=max_computation
bitcast1 = f32[130]{0} bitcast(reduce0)
bcast1 = f32[130,125]{1,0} broadcast(bitcast1), dimensions={0}
bitcast2 = f32[65,2,125]{2,1,0} bitcast(bcast1)
subtract0 = f32[65,2,125]{2,1,0} subtract(bitcast0, bitcast2)
exponential0 = f32[65,2,125]{2,1,0} exponential(subtract0)
bitcast3 = f32[65,2,125]{2,1,0} bitcast(p0)
reduce1 = f32[2,65]{1,0} reduce(p0, constant_neg_inf_1),
dimensions={2}, to_apply=max_computation
bitcast4 = f32[130]{0} bitcast(reduce1)
bcast2 = f32[130,125]{1,0} broadcast(bitcast4), dimensions={0}
bitcast5 = f32[65,2,125]{2,1,0} bitcast(bcast2)
subtract1 = f32[65,2,125]{2,1,0} subtract(bitcast3, bitcast5)
exponential1 = f32[65,2,125]{2,1,0} exponential(subtract1)
constant_zero_1 = f32[] constant(0)
reduce2 = f32[65,2]{1,0} reduce(exponential1, constant_zero_1),
dimensions={2}, to_apply=add_computation
bitcast6 = f32[130]{0} bitcast(reduce2)
bcast3 = f32[130,125]{1,0} broadcast(bitcast6), dimensions={0}
bitcast7 = f32[65,2,125]{2,1,0} bitcast(bcast3)
divide = f32[65,2,125]{2,1,0} divide(exponential0, bitcast7)
ROOT bitcast8 = f32[2,65,125]{2,1,0} bitcast(divide)
}
ENTRY e {
p0 = f32[2,65,125]{2,1,0} parameter(0)
ROOT fusion = f32[2,65,125]{2,1,0}
fusion(p0), kind=kLoop, calls=softmax
}
)"));
EXPECT_THAT(input_indexing.indexing_maps,
ElementsAre(UnorderedElementsAre(MatchIndexingMap(R"(
(d0, d1, d2)[s0] -> (d0, d1, s0),
domain:
d0 in [0, 1],
d1 in [0, 64],
d2 in [0, 124],
s0 in [0, 124]
)"),
MatchIndexingMap(R"(
(d0, d1, d2) -> (d0, d1, d2),
domain:
d0 in [0, 1],
d1 in [0, 64],
d2 in [0, 124]
)"))));
}
TEST_F(IndexingAnalysisTest, FusionOpTensorPlusTransposedTensor) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
p0 = f32[1000, 1000] parameter(0)
transpose_p0 = f32[1000, 1000]{0, 1} transpose(p0), dimensions={1, 0}
ROOT a0 = f32[1000, 1000] add(p0, transpose_p0)
}
ENTRY e {
p0 = f32[1000,1000] parameter(0)
ROOT fusion = f32[1000,1000] fusion(p0), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.indexing_maps,
ElementsAre(UnorderedElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 999],
d1 in [0, 999]
)"),
MatchIndexingMap(R"(
(d0, d1) -> (d1, d0),
domain:
d0 in [0, 999],
d1 in [0, 999]
)"))));
}
TEST_F(IndexingAnalysisTest, FusionExponentialDuplication) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule test_module
fused_computation {
p0 = f32[4] parameter(0)
p1 = f32[4] parameter(1)
add0 = f32[4] add(p0, p1)
slice1.0 = f32[3] slice(add0), slice={[0:3]}
slice1.1 = f32[3] slice(add0), slice={[1:4]}
add1 = f32[3]{0} add(slice1.0, slice1.1)
slice2.0 = f32[2] slice(add1), slice={[0:2]}
slice2.1 = f32[2] slice(add1), slice={[1:3]}
ROOT add2 = f32[2] add(slice2.0, slice2.1)
}
ENTRY entry_computation {
p0 = f32[4] parameter(0)
p1 = f32[4] parameter(1)
ROOT fusion = f32[2] fusion(p0, p1), kind=kLoop,
calls=fused_computation
})"));
EXPECT_THAT(input_indexing.indexing_maps,
ElementsAre(UnorderedElementsAre(MatchIndexingMap(R"(
(d0) -> (d0 + 1),
domain:
d0 in [0, 1]
)"),
MatchIndexingMap(R"(
(d0) -> (d0),
domain:
d0 in [0, 1]
)"),
MatchIndexingMap(R"(
(d0) -> (d0 + 2),
domain:
d0 in [0, 1]
)")),
UnorderedElementsAre(MatchIndexingMap(R"(
(d0) -> (d0 + 2),
domain:
d0 in [0, 1]
)"),
MatchIndexingMap(R"(
(d0) -> (d0 + 1),
domain:
d0 in [0, 1]
)"),
MatchIndexingMap(R"(
(d0) -> (d0),
domain:
d0 in [0, 1]
)"))));
}
TEST_F(IndexingAnalysisTest, GatherOp) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY main {
operand = f32[33,76,70] parameter(0)
indices = s32[1806,2] parameter(1)
ROOT r = f32[1806,7,8,4] gather(operand, indices), offset_dims={1,2,3},
collapsed_slice_dims={}, start_index_map={0,1},
index_vector_dim=1, slice_sizes={7,8,4}
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3)[rt0, rt1] -> (d1 + rt0, d2 + rt1, d3),
domain:
d0 in [0, 1805],
d1 in [0, 6],
d2 in [0, 7],
d3 in [0, 3],
rt0 in [0, 26],
rt1 in [0, 68]
operand id = 1
(d0, d1, d2, d3)[s0] -> (d0, s0),
domain:
d0 in [0, 1805],
d1 in [0, 6],
d2 in [0, 7],
d3 in [0, 3],
s0 in [0, 1]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithReduceOfReduce) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
f {
p0 = f32[150, 20, 10, 50] parameter(0)
p0_init = f32[] parameter(1)
reduce_1 = f32[20, 10] reduce(p0, p0_init),
dimensions={0, 3}, to_apply=max
ROOT reduce_2 = f32[10] reduce(reduce_1, p0_init),
dimensions={0}, to_apply=max
}
ENTRY e {
p0 = f32[150, 20, 10, 50] parameter(0)
p0_init = f32[] constant(-inf)
ROOT fusion = f32[10] fusion(p0, p0_init), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0)[s0, s1, s2] -> (s0, s2, d0, s1),
domain:
d0 in [0, 9],
s0 in [0, 149],
s1 in [0, 49],
s2 in [0, 19]
operand id = 1
(d0) -> (),
domain:
d0 in [0, 9]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithReduceOfBroadcast) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
f {
p0 = f32[15, 20] parameter(0)
p0_init = f32[] parameter(1)
p0_bcast = f32[15, 32, 20, 64] broadcast(p0), dimensions={0, 2}
ROOT reduce_2 = f32[15, 64] reduce(p0_bcast, p0_init),
dimensions={1, 2}, to_apply=max
}
ENTRY e {
p0 = f32[15, 20] parameter(0)
p0_init = f32[] constant(-inf)
ROOT fusion = f32[15, 64] fusion(p0, p0_init), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1)[s0] -> (d0, s0),
domain:
d0 in [0, 14],
d1 in [0, 63],
s0 in [0, 19]
operand id = 1
(d0, d1) -> (),
domain:
d0 in [0, 14],
d1 in [0, 63]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithTransposeOfTranspose) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
p0 = f32[20, 10, 50] parameter(0)
lhs_transpose_1 = f32[10, 20, 50]
transpose(p0), dimensions={1, 0, 2}
lhs_e = f32[10, 20, 50] exponential(lhs_transpose_1)
lhs_transpose_2 = f32[10, 50, 20]
transpose(lhs_e), dimensions={0, 2, 1}
rhs_transpose_1 = f32[50, 10, 20]
transpose(p0), dimensions={2, 1, 0}
rhs_log = f32[50, 10, 20] exponential(rhs_transpose_1)
rhs_transpose_2 = f32[10, 50, 20]
transpose(rhs_log), dimensions={1, 0, 2}
ROOT add = f32[10, 50, 20] add(lhs_transpose_2, rhs_transpose_2)
}
ENTRY e {
p0 = f32[20, 10, 50] parameter(0)
ROOT fusion = f32[10, 50, 20] fusion(p0), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d2, d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 49],
d2 in [0, 19]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithReducedSlice) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
f {
p0 = f32[150, 64, 1024] parameter(0)
p0_init = f32[] parameter(1)
p0_slice = f32[16, 32, 128] slice(f32[150, 64, 1024] p0),
slice={[5:21:1], [0:64:2], [50:434:3]}
ROOT reduce = f32[32] reduce(p0_slice, p0_init),
dimensions={0, 2}, to_apply=max
}
ENTRY e {
p0 = f32[150, 64, 1024] parameter(0)
p0_init = f32[] constant(-inf)
ROOT fusion = f32[32] fusion(p0, p0_init), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0)[s0, s1] -> (s0 + 5, d0 * 2, s1 * 3 + 50),
domain:
d0 in [0, 31],
s0 in [0, 15],
s1 in [0, 127]
operand id = 1
(d0) -> (),
domain:
d0 in [0, 31]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithReshape_CollapseOfExpand) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
p0 = f32[128] parameter(0)
expand = f32[8, 16] reshape(p0)
ROOT collapse = f32[128] reshape(expand)
}
ENTRY e {
p0 = f32[128] parameter(0)
ROOT fusion = f32[128] fusion(p0), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0) -> (d0),
domain:
d0 in [0, 127]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithReshape_ExpandOfCollapse) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
p0 = f32[8, 16] parameter(0)
collapse = f32[128] reshape(p0)
ROOT expand = f32[8, 16] reshape(collapse)
}
ENTRY e {
p0 = f32[8, 16] parameter(0)
ROOT fusion = f32[8, 16] fusion(p0), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 7],
d1 in [0, 15]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithReshape_ChainedGenericReshapes) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
p0 = f32[10, 10, 10] parameter(0)
reshape1 = f32[50, 20] reshape(p0)
ROOT reshape2 = f32[10, 10, 10] reshape(reshape1)
}
ENTRY e {
p0 = f32[10, 10, 10] parameter(0)
ROOT fusion = f32[10, 10, 10] fusion(p0), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0, d1, d2),
domain:
d0 in [0, 9],
d1 in [0, 9],
d2 in [0, 9]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithSliceOfSlice) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
p0 = f32[150, 64, 1024] parameter(0)
p0_slice_1 = f32[16, 32, 128] slice(f32[150, 64, 1024] p0),
slice={[5:21:1], [0:64:2], [50:434:3]}
ROOT p0_slice_2 = f32[7, 9, 24] slice(f32[16, 32, 128] p0_slice_1),
slice={[3:16:2], [4:30:3], [5:100:4]}
}
ENTRY e {
p0 = f32[150, 64, 1024] parameter(0)
ROOT fusion = f32[7, 9, 24] fusion(p0), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0 * 2 + 8, d1 * 6 + 8, d2 * 12 + 65),
domain:
d0 in [0, 6],
d1 in [0, 8],
d2 in [0, 23]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithDynSliceOfDynSlice) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
%src = s32[150, 64] parameter(0)
%of11 = s32[] parameter(1)
%of12 = s32[] parameter(2)
%of21 = s32[] parameter(3)
%of22 = s32[] parameter(4)
%ds1 = s32[50, 32] dynamic-slice(s32[150, 64] %src,
s32[] %of11, s32[] %of12), dynamic_slice_sizes={50, 32}
ROOT %ds2 = s32[25, 16] dynamic-slice(s32[50, 32] %ds1,
s32[] %of21, s32[] %of22), dynamic_slice_sizes={25, 16}
}
ENTRY e {
%p0 = s32[150, 64] parameter(0)
%p1 = s32[] parameter(1)
%p2 = s32[] parameter(2)
%p3 = s32[] parameter(3)
%p4 = s32[] parameter(4)
ROOT fusion = s32[25, 16] fusion(p0, p1, p2, p3, p4),
kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1)[rt0, rt1, rt2, rt3] -> (d0 + rt0 + rt2, d1 + rt1 + rt3),
domain:
d0 in [0, 24],
d1 in [0, 15],
rt0 in [0, 100],
rt1 in [0, 32],
rt2 in [0, 25],
rt3 in [0, 16]
operand id = 1
(d0, d1) -> (),
domain:
d0 in [0, 24],
d1 in [0, 15]
operand id = 2
(d0, d1) -> (),
domain:
d0 in [0, 24],
d1 in [0, 15]
operand id = 3
(d0, d1) -> (),
domain:
d0 in [0, 24],
d1 in [0, 15]
operand id = 4
(d0, d1) -> (),
domain:
d0 in [0, 24],
d1 in [0, 15]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpSliceOfAllConcatenateOpInputs) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
p0 = f32[2, 5, 7] parameter(0)
p1 = f32[2, 11, 7] parameter(1)
p2 = f32[2, 17, 7] parameter(2)
concat = f32[2, 33, 7] concatenate(
f32[2, 5, 7] p0, f32[2, 11, 7] p1, f32[2, 17, 7] p2), dimensions={1}
ROOT slice = f32[2, 11, 7] slice(f32[2, 33, 7] concat),
slice={[0:2:1], [0:33:3], [0:7:1]}
}
ENTRY e {
p0 = f32[2, 5, 7] parameter(0)
p1 = f32[2, 11, 7] parameter(1)
p2 = f32[2, 17, 7] parameter(2)
ROOT fusion = f32[2, 11, 7] fusion(p0, p1, p2), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0, d1 * 3, d2),
domain:
d0 in [0, 1],
d1 in [0, 1],
d2 in [0, 6]
operand id = 1
(d0, d1, d2) -> (d0, d1 * 3 - 5, d2),
domain:
d0 in [0, 1],
d1 in [2, 5],
d2 in [0, 6]
operand id = 2
(d0, d1, d2) -> (d0, d1 * 3 - 16, d2),
domain:
d0 in [0, 1],
d1 in [6, 10],
d2 in [0, 6]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpSliceOfOneOfConcatenateOpInputs) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
p0 = f32[2, 5, 7] parameter(0)
p1 = f32[2, 11, 7] parameter(1)
p2 = f32[2, 17, 7] parameter(2)
concat = f32[2, 33, 7] concatenate(
f32[2, 5, 7] p0, f32[2, 11, 7] p1, f32[2, 17, 7] p2), dimensions={1}
ROOT slice = f32[2, 3, 7] slice(f32[2, 33, 7] concat),
slice={[0:2:1], [0:5:2], [0:7:1]}
}
ENTRY e {
p0 = f32[2, 5, 7] parameter(0)
p1 = f32[2, 11, 7] parameter(1)
p2 = f32[2, 17, 7] parameter(2)
ROOT fusion = f32[2, 3, 7] fusion(p0, p1, p2), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0, d1 * 2, d2),
domain:
d0 in [0, 1],
d1 in [0, 2],
d2 in [0, 6]
operand id = 1
KNOWN EMPTY
operand id = 2
KNOWN EMPTY
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpReshapeOfConcat) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
p0 = f32[2] parameter(0)
p1 = f32[30] parameter(1)
concat = f32[32] concatenate(f32[2] p0, f32[30] p1), dimensions={0}
ROOT reshape = f32[4, 8] reshape(concat)
}
ENTRY e {
p0 = f32[2] parameter(0)
p1 = f32[30] parameter(1)
ROOT fusion = f32[4, 8] fusion(p0, p1), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0 * 8 + d1),
domain:
d0 in [0, 3],
d1 in [0, 7],
d0 * 8 + d1 in [0, 1]
operand id = 1
(d0, d1) -> (d0 * 8 + d1 - 2),
domain:
d0 in [0, 3],
d1 in [0, 7],
d0 * 8 + d1 in [2, 31]
)"));
}
TEST_F(IndexingAnalysisTest, IotaOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
ROOT iota = s32[5,5,111,42] iota(), iota_dimension=0
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.indexing_maps, IsEmpty());
}
TEST_F(IndexingAnalysisTest, ReshapeOpCollapseShape) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[4,8] parameter(0)
ROOT reshape = f32[32] reshape(p0)
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0) -> (d0 floordiv 8, d0 mod 8),
domain:
d0 in [0, 31]
)"));
}
TEST_F(IndexingAnalysisTest, ReshapeOpExpandShape) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[32] parameter(0)
ROOT reshape = f32[4, 8] reshape(p0)
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0 * 8 + d1),
domain:
d0 in [0, 3],
d1 in [0, 7]
)"));
}
TEST_F(IndexingAnalysisTest, ReshapeOpExpandAndCollapseShape) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[4, 8, 12] parameter(0)
ROOT reshape = f32[32, 3, 4] reshape(p0)
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0 floordiv 8, d0 mod 8, d1 * 4 + d2),
domain:
d0 in [0, 31],
d1 in [0, 2],
d2 in [0, 3]
)"));
auto output_indexing = GetInputToOutputIndexing(root);
EXPECT_THAT(output_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0 * 8 + d1, d2 floordiv 4, d2 mod 4),
domain:
d0 in [0, 3],
d1 in [0, 7],
d2 in [0, 11]
)"));
}
TEST_F(IndexingAnalysisTest, ReshapeOpExpandSubshapeOnly) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[16, 8] parameter(0)
ROOT reshape = f32[4, 4, 8] reshape(p0)
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0 * 4 + d1, d2),
domain:
d0 in [0, 3],
d1 in [0, 3],
d2 in [0, 7]
)"));
}
TEST_F(IndexingAnalysisTest, ReshapeOpGenericReshape2DTo3D) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[4,8] parameter(0)
ROOT reshape = f32[2, 4, 4] reshape(p0)
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0 * 2 + d1 floordiv 2, (d1 mod 2) * 4 + d2),
domain:
d0 in [0, 1],
d1 in [0, 3],
d2 in [0, 3]
)"));
}
TEST_F(IndexingAnalysisTest, ReshapeOpGenericReshape3DTo2D) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[2, 4, 4] parameter(0)
ROOT reshape = f32[4, 8] reshape(p0)
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0 floordiv 2,
(d0 mod 2) * 2 + d1 floordiv 4,
d1 mod 4),
domain:
d0 in [0, 3],
d1 in [0, 7]
)"));
}
TEST_F(IndexingAnalysisTest, PadOp) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[4, 4] parameter(0)
p1 = f32[] parameter(1)
ROOT pad = f32[12, 16] pad(p0, p1), padding=1_4_1x4_8_0
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> ((d0 - 1) floordiv 2, d1 - 4),
domain:
d0 in [1, 7],
d1 in [4, 7],
(d0 - 1) mod 2 in [0, 0]
operand id = 1
(d0, d1) -> (),
domain:
d0 in [0, 11],
d1 in [0, 15]
)"));
}
TEST_F(IndexingAnalysisTest, PadOpNoInterior) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[2,8] parameter(0)
p1 = f32[] parameter(1)
ROOT pad = f32[10,8] pad(p0, p1), padding=1_7x0_0
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0 - 1, d1),
domain:
d0 in [1, 2],
d1 in [0, 7]
operand id = 1
(d0, d1) -> (),
domain:
d0 in [0, 9],
d1 in [0, 7]
)"));
}
TEST_F(IndexingAnalysisTest, PadOpNegativePadding) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[7] parameter(0)
p1 = f32[] parameter(1)
ROOT pad = f32[5] pad(p0, p1), padding=-3_-5_1
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0) -> ((d0 + 3) floordiv 2),
domain:
d0 in [0, 4],
(d0 + 3) mod 2 in [0, 0]
operand id = 1
(d0) -> (),
domain:
d0 in [0, 4]
)"));
}
TEST_F(IndexingAnalysisTest, ReduceOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
ENTRY e {
p0 = f32[150, 20, 10, 50] parameter(0)
p0_init = f32[] constant(-inf)
ROOT reduce = f32[150, 10] reduce(p0, p0_init),
dimensions={3, 1}, to_apply=max
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1)[s0, s1] -> (d0, s0, d1, s1),
domain:
d0 in [0, 149],
d1 in [0, 9],
s0 in [0, 19],
s1 in [0, 49]
operand id = 1
(d0, d1) -> (),
domain:
d0 in [0, 149],
d1 in [0, 9]
)"));
auto output_indexing_0 = GetInputToOutputIndexing(root, 0);
EXPECT_THAT(output_indexing_0.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3) -> (d0, d2),
domain:
d0 in [0, 149],
d1 in [0, 19],
d2 in [0, 9],
d3 in [0, 49]
)"));
auto output_indexing_1 = GetInputToOutputIndexing(root, 1);
EXPECT_THAT(output_indexing_1.ToString(), MatchIndexingString(R"(
operand id = 0
()[s0, s1] -> (s0, s1),
domain:
s0 in [0, 149],
s1 in [0, 9]
)"));
}
TEST_F(IndexingAnalysisTest, VariadicReduceOp) {
HloInstruction* root = ParseAndGetRoot(R"(
HloModule m
min {
tmp_0 = f32[] parameter(0)
tmp_1 = f32[] parameter(2)
tmp_2 = s32[] parameter(1)
tmp_3 = s32[] parameter(3)
cmp = pred[] compare(tmp_0, tmp_1), direction=GE
select1 = f32[] select(cmp, tmp_0, tmp_1)
select2 = s32[] select(cmp, tmp_2, tmp_3)
ROOT tmp_4 = (f32[], s32[]) tuple(select1, select2)
}
ENTRY e {
p0 = f32[256,10] parameter(0)
p0_init = f32[] constant(-inf)
p1 = s32[256,10] parameter(1)
p1_init = s32[] constant(0)
ROOT reduce = (f32[10], s32[10]) reduce(p0, p1, p0_init, p1_init),
dimensions={0}, to_apply=min
}
)");
auto output_indexing_0 = GetOutputToInputIndexing(root, 0);
EXPECT_THAT(output_indexing_0.ToString(), MatchIndexingString(R"(
operand id = 0
(d0)[s0] -> (s0, d0),
domain:
d0 in [0, 9],
s0 in [0, 255]
operand id = 1
(d0)[s0] -> (s0, d0),
domain:
d0 in [0, 9],
s0 in [0, 255]
operand id = 2
(d0) -> (),
domain:
d0 in [0, 9]
operand id = 3
(d0) -> (),
domain:
d0 in [0, 9]
)"));
auto output_indexing_1 = GetOutputToInputIndexing(root, 1);
EXPECT_THAT(output_indexing_1.ToString(), MatchIndexingString(R"(
operand id = 0
(d0)[s0] -> (s0, d0),
domain:
d0 in [0, 9],
s0 in [0, 255]
operand id = 1
(d0)[s0] -> (s0, d0),
domain:
d0 in [0, 9],
s0 in [0, 255]
operand id = 2
(d0) -> (),
domain:
d0 in [0, 9]
operand id = 3
(d0) -> (),
domain:
d0 in [0, 9]
)"));
constexpr std::string_view kInputToOutputIndexing = R"(
(d0, d1) -> (d1),
domain:
d0 in [0, 255],
d1 in [0, 9]
)";
auto input_indexing_0 = GetInputToOutputIndexing(root, 0);
EXPECT_THAT(
input_indexing_0.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(kInputToOutputIndexing)),
ElementsAre(MatchIndexingMap(kInputToOutputIndexing))));
auto input_indexing_1 = GetInputToOutputIndexing(root, 1);
EXPECT_THAT(
input_indexing_1.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(kInputToOutputIndexing)),
ElementsAre(MatchIndexingMap(kInputToOutputIndexing))));
constexpr std::string_view kInitToOutputIndexing = R"(
()[s0] -> (s0),
domain:
s0 in [0, 9]
)";
auto input_indexing_2 = GetInputToOutputIndexing(root, 2);
EXPECT_THAT(
input_indexing_2.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(kInitToOutputIndexing)),
ElementsAre(MatchIndexingMap(kInitToOutputIndexing))));
auto input_indexing_3 = GetInputToOutputIndexing(root, 2);
EXPECT_THAT(
input_indexing_3.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(kInitToOutputIndexing)),
ElementsAre(MatchIndexingMap(kInitToOutputIndexing))));
}
TEST_F(IndexingAnalysisTest, ReduceWindowOp_NoPadding) {
auto root = ParseAndGetRoot(R"(
HloModule m
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
ENTRY e {
c_inf = f32[] constant(-inf)
p0 = f32[1024, 514]parameter(0)
ROOT reduce-window = f32[1024, 3] reduce-window(p0, c_inf),
window={size=1x512 pad=0_0x0_0}, to_apply=max
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1)[s0] -> (d0, d1 + s0),
domain:
d0 in [0, 1023],
d1 in [0, 2],
s0 in [0, 511]
operand id = 1
(d0, d1) -> (),
domain:
d0 in [0, 1023],
d1 in [0, 2]
)"));
}
TEST_F(IndexingAnalysisTest, ReduceWindowOp_PaddingAndWindowStride) {
auto root = ParseAndGetRoot(R"(
HloModule m
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
ENTRY e {
c_inf = f32[] constant(-inf)
p0 = f32[13, 17] parameter(0)
ROOT reduce-window = f32[7, 17] reduce-window(p0, c_inf),
window={size=3x2 stride=2x1 pad=1_1x0_1}, to_apply=max
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1)[s0, s1] -> (d0 * 2 + s0 - 1, d1 + s1),
domain:
d0 in [0, 6],
d1 in [0, 16],
s0 in [0, 2],
s1 in [0, 1],
d0 * 2 + s0 in [1, 13],
d1 + s1 in [0, 16]
operand id = 1
(d0, d1) -> (),
domain:
d0 in [0, 6],
d1 in [0, 16]
)"));
}
TEST_F(IndexingAnalysisTest, ReduceWindowOp_BaseDilation) {
auto root = ParseAndGetRoot(R"(
HloModule m
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
ENTRY e {
c_inf = f32[] constant(-inf)
p0 = f32[2, 3] parameter(0)
ROOT reduce-window = f32[3, 5] reduce-window(p0, c_inf),
window={size=1x1 pad=0_0x0_0 lhs_dilate=2x2}, to_apply=max
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0 floordiv 2, d1 floordiv 2),
domain:
d0 in [0, 2],
d1 in [0, 4],
d0 mod 2 in [0, 0],
d1 mod 2 in [0, 0]
operand id = 1
(d0, d1) -> (),
domain:
d0 in [0, 2],
d1 in [0, 4]
)"));
}
TEST_F(IndexingAnalysisTest, ReduceWindowOp_WindowDilation) {
auto root = ParseAndGetRoot(R"(
HloModule m
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
ENTRY e {
c_inf = f32[] constant(-inf)
p0 = f32[7, 3] parameter(0)
ROOT reduce-window = f32[4, 3] reduce-window(p0, c_inf),
window={size=2x1 pad=0_0x0_0 rhs_dilate=3x1}, to_apply=max
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1)[s0] -> (d0 + s0 * 3, d1),
domain:
d0 in [0, 3],
d1 in [0, 2],
s0 in [0, 1]
operand id = 1
(d0, d1) -> (),
domain:
d0 in [0, 3],
d1 in [0, 2]
)"));
}
TEST_F(IndexingAnalysisTest, ReduceWindowOp_Variadic) {
auto root = ParseAndGetRoot(R"(
HloModule m
combiner {
a0 = f32[] parameter(0)
a1 = s32[] parameter(1)
b0 = f32[] parameter(2)
b1 = s32[] parameter(3)
add0 = f32[] add(a0, b0)
add1 = s32[] add(a1, b1)
ROOT sum2 = (f32[], s32[]) tuple(add0, add1)
}
ENTRY e {
c_f32 = f32[] constant(-inf)
c_s32 = s32[] constant(10)
p0 = f32[2, 3] parameter(0)
p1 = s32[2, 3] parameter(1)
ROOT reduce-window = (f32[1, 2], s32[1, 2])
reduce-window(p0, p1, c_f32, c_s32),
window={size=2x2 pad=0_0x0_0}, to_apply=combiner
}
)");
auto input_indexing_0 = GetOutputToInputIndexing(root, 0);
EXPECT_THAT(input_indexing_0.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1)[s0, s1] -> (s0, d1 + s1),
domain:
d0 in [0, 0],
d1 in [0, 1],
s0 in [0, 1],
s1 in [0, 1]
operand id = 1
(d0, d1)[s0, s1] -> (s0, d1 + s1),
domain:
d0 in [0, 0],
d1 in [0, 1],
s0 in [0, 1],
s1 in [0, 1]
operand id = 2
(d0, d1) -> (),
domain:
d0 in [0, 0],
d1 in [0, 1]
operand id = 3
(d0, d1) -> (),
domain:
d0 in [0, 0],
d1 in [0, 1]
)"));
auto input_indexing_1 = GetOutputToInputIndexing(root, 1);
EXPECT_THAT(input_indexing_1.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1)[s0, s1] -> (s0, d1 + s1),
domain:
d0 in [0, 0],
d1 in [0, 1],
s0 in [0, 1],
s1 in [0, 1]
operand id = 1
(d0, d1)[s0, s1] -> (s0, d1 + s1),
domain:
d0 in [0, 0],
d1 in [0, 1],
s0 in [0, 1],
s1 in [0, 1]
operand id = 2
(d0, d1) -> (),
domain:
d0 in [0, 0],
d1 in [0, 1]
operand id = 3
(d0, d1) -> (),
domain:
d0 in [0, 0],
d1 in [0, 1]
)"));
}
TEST_F(IndexingAnalysisTest, ConvolutionOp_NoPadding) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1,12,10,4] parameter(0)
p1 = f32[4,3,5,8] parameter(1)
ROOT conv = f32[1,10,6,8] convolution(p0, p1),
window={size=3x5 pad=0_0x0_0}, dim_labels=b01f_i01o->b01f
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3)[s0, s1, s2] -> (d0, d1 + s0, d2 + s1, s2),
domain:
d0 in [0, 0],
d1 in [0, 9],
d2 in [0, 5],
d3 in [0, 7],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3]
operand id = 1
(d0, d1, d2, d3)[s0, s1, s2] -> (s2, s0, s1, d3),
domain:
d0 in [0, 0],
d1 in [0, 9],
d2 in [0, 5],
d3 in [0, 7],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3]
)"));
}
TEST_F(IndexingAnalysisTest, ConvolutionOp_PaddingAndWindowStride) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1,12,10,4] parameter(0)
p1 = f32[4,3,5,8] parameter(1)
ROOT conv = f32[1,6,5,8] convolution(p0, p1),
window={size=3x5 stride=2x2 pad=1_1x2_2}, dim_labels=b01f_i01o->b01f
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3)[s0, s1, s2] -> (d0, d1 * 2 + s0 - 1, d2 * 2 + s1 - 2, s2),
domain:
d0 in [0, 0],
d1 in [0, 5],
d2 in [0, 4],
d3 in [0, 7],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3],
d1 * 2 + s0 in [1, 12],
d2 * 2 + s1 in [2, 11]
operand id = 1
(d0, d1, d2, d3)[s0, s1, s2] -> (s2, s0, s1, d3),
domain:
d0 in [0, 0],
d1 in [0, 5],
d2 in [0, 4],
d3 in [0, 7],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3]
)"));
}
TEST_F(IndexingAnalysisTest, ConvolutionOp_LhsDilation) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1,12,10,4] parameter(0)
p1 = f32[4,3,5,8] parameter(1)
ROOT conv = f32[1,21,15,8] convolution(p0, p1),
window={size=3x5 pad=0_0x0_0 lhs_dilate=2x2}, dim_labels=b01f_i01o->b01f
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3)[s0, s1, s2] -> (d0, (d1 + s0) floordiv 2, (d2 + s1) floordiv 2, s2),
domain:
d0 in [0, 0],
d1 in [0, 20],
d2 in [0, 14],
d3 in [0, 7],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3],
(d1 + s0) mod 2 in [0, 0],
(d2 + s1) mod 2 in [0, 0]
operand id = 1
(d0, d1, d2, d3)[s0, s1, s2] -> (s2, s0, s1, d3),
domain:
d0 in [0, 0],
d1 in [0, 20],
d2 in [0, 14],
d3 in [0, 7],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3]
)"));
}
TEST_F(IndexingAnalysisTest, ConvolutionOp_RhsDilation) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1,12,10,4] parameter(0)
p1 = f32[4,3,5,8] parameter(1)
ROOT conv = f32[1,8,2,8] convolution(p0, p1),
window={size=3x5 pad=0_0x0_0 rhs_dilate=2x2}, dim_labels=b01f_i01o->b01f
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3)[s0, s1, s2] -> (d0, d1 + s0 * 2, d2 + s1 * 2, s2),
domain:
d0 in [0, 0],
d1 in [0, 7],
d2 in [0, 1],
d3 in [0, 7],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3]
operand id = 1
(d0, d1, d2, d3)[s0, s1, s2] -> (s2, s0, s1, d3),
domain:
d0 in [0, 0],
d1 in [0, 7],
d2 in [0, 1],
d3 in [0, 7],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3]
)"));
}
TEST_F(IndexingAnalysisTest, ConvolutionOp_FeatureGroups) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1,12,10,24] parameter(0)
p1 = f32[4,3,5,48] parameter(1)
ROOT conv = f32[1,10,6,48] convolution(p0, p1),
window={size=3x5 pad=0_0x0_0}, dim_labels=b01f_i01o->b01f, feature_group_count=6
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3)[s0, s1, s2] -> (d0, d1 + s0, d2 + s1, (d3 floordiv 8) * 4 + s2),
domain:
d0 in [0, 0],
d1 in [0, 9],
d2 in [0, 5],
d3 in [0, 47],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3]
operand id = 1
(d0, d1, d2, d3)[s0, s1, s2] -> (s2, s0, s1, d3),
domain:
d0 in [0, 0],
d1 in [0, 9],
d2 in [0, 5],
d3 in [0, 47],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3]
)"));
}
TEST_F(IndexingAnalysisTest, ConvolutionOp_BatchGroups) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[14,12,10,4] parameter(0)
p1 = f32[4,3,5,21] parameter(1)
ROOT conv = f32[2,10,6,21] convolution(p0, p1),
window={size=3x5 pad=0_0x0_0}, dim_labels=b01f_i01o->b01f, batch_group_count=7
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 + s3 * 2, d1 + s0, d2 + s1, s2),
domain:
d0 in [0, 1],
d1 in [0, 9],
d2 in [0, 5],
d3 in [0, 20],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3],
s3 in [0, 6]
operand id = 1
(d0, d1, d2, d3)[s0, s1, s2] -> (s2, s0, s1, d3),
domain:
d0 in [0, 1],
d1 in [0, 9],
d2 in [0, 5],
d3 in [0, 20],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3]
)"));
}
TEST_F(IndexingAnalysisTest, ReverseOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1, 17, 9, 9] parameter(0)
ROOT reverse = f32[1, 17, 9, 9] reverse(p0), dimensions={1, 2}
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3) -> (d0, -d1 + 16, -d2 + 8, d3),
domain:
d0 in [0, 0],
d1 in [0, 16],
d2 in [0, 8],
d3 in [0, 8]
)"));
auto output_indexing = GetInputToOutputIndexing(root);
EXPECT_THAT(output_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3) -> (d0, -d1 + 16, -d2 + 8, d3),
domain:
d0 in [0, 0],
d1 in [0, 16],
d2 in [0, 8],
d3 in [0, 8]
)"));
}
TEST_F(IndexingAnalysisTest, ReverseReshape) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
fused_computation {
p0 = f32[10, 11] parameter(0)
reverse.0 = f32[10, 11] reverse(p0), dimensions={0, 1}
reshape.0 = f32[110] reshape(reverse.0)
reverse.1 = f32[110] reverse(reshape.0), dimensions={0}
ROOT reshape.1 = f32[10, 11] reshape(reverse.1)
}
ENTRY e {
p0 = f32[10, 11] parameter(0)
ROOT fusion = f32[10, 11] fusion(p0), kind=kLoop,
calls=fused_computation
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 10]
)"));
}
TEST_F(IndexingAnalysisTest, SliceOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[10, 20, 50] parameter(0)
ROOT slice = f32[5, 3, 25] slice(f32[10, 20, 50] p0),
slice={[5:10:1], [3:20:7], [0:50:2]}
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0 + 5, d1 * 7 + 3, d2 * 2),
domain:
d0 in [0, 4],
d1 in [0, 2],
d2 in [0, 24]
)"));
auto output_indexing = GetInputToOutputIndexing(root);
EXPECT_THAT(output_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (
d0 - 5,
(d1 - 3) floordiv 7,
d2 floordiv 2
),
domain:
d0 in [5, 9],
d1 in [3, 17],
d2 in [0, 48],
(d1 - 3) mod 7 in [0, 0],
d2 mod 2 in [0, 0]
)"));
}
TEST_F(IndexingAnalysisTest, TransposeOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[3, 12288, 6, 128] parameter(0)
ROOT transpose = f32[3, 6, 128, 12288]
transpose(p0), dimensions={0, 2, 3, 1}
}
)");
EXPECT_THAT(GetOutputToInputIndexing(root).ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3) -> (d0, d3, d1, d2),
domain:
d0 in [0, 2],
d1 in [0, 5],
d2 in [0, 127],
d3 in [0, 12287]
)"));
EXPECT_THAT(GetInputToOutputIndexing(root).ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3) -> (d0, d2, d3, d1),
domain:
d0 in [0, 2],
d1 in [0, 12287],
d2 in [0, 5],
d3 in [0, 127]
)"));
}
TEST_F(IndexingAnalysisTest, TransposeOp4D) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[3, 12288, 6, 128] parameter(0)
ROOT bitcast = f32[3, 6, 128, 12288] {2, 1, 3, 0} bitcast(p0)
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3) -> (d0, d3, d1, d2),
domain:
d0 in [0, 2],
d1 in [0, 5],
d2 in [0, 127],
d3 in [0, 12287]
)"));
}
TEST_F(IndexingAnalysisTest, DotOp) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[4, 38, 17, 11, 18, 10] parameter(0)
p1 = f32[17, 10, 16, 18, 22, 38] parameter(1)
ROOT dot = f32[10, 38, 4, 11, 16, 22] dot(p0, p1),
lhs_batch_dims={5,1}, rhs_batch_dims={1,5},
lhs_contracting_dims={4,2}, rhs_contracting_dims={3,0}
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3, d4, d5)[s0, s1] -> (d2, d1, s1, d3, s0, d0),
domain:
d0 in [0, 9],
d1 in [0, 37],
d2 in [0, 3],
d3 in [0, 10],
d4 in [0, 15],
d5 in [0, 21],
s0 in [0, 17],
s1 in [0, 16]
operand id = 1
(d0, d1, d2, d3, d4, d5)[s0, s1] -> (s1, d0, d4, s0, d5, d1),
domain:
d0 in [0, 9],
d1 in [0, 37],
d2 in [0, 3],
d3 in [0, 10],
d4 in [0, 15],
d5 in [0, 21],
s0 in [0, 17],
s1 in [0, 16]
)"));
}
TEST_F(IndexingAnalysisTest, UnsupportedOps) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[20, 20] parameter(0)
p1 = f32[4,4] parameter(1)
p2 = f32[4,3] parameter(2)
ROOT out = f32[4,3] triangular-solve(f32[4,4] p1, f32[4,3] p2),
left_side=true,
lower=true,
transpose_a=NO_TRANSPOSE,
unit_diagonal=true
}
)");
EXPECT_THAT(GetOutputToInputIndexing(root).ToString(), MatchIndexingString(R"(
operand id = 0 unknown indexing
operand id = 1 unknown indexing
)"));
EXPECT_THAT(GetInputToOutputIndexing(root, 0).ToString(),
MatchIndexingString(R"(
operand id = 0 unknown indexing
)"));
EXPECT_THAT(GetInputToOutputIndexing(root, 1).ToString(),
MatchIndexingString(R"(
operand id = 0 unknown indexing
)"));
}
TEST_F(IndexingAnalysisTest, FusionWithUnsupportedOp) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
fused_computation {
p0 = f32[20, 20] parameter(0)
p1 = f32[4,4] parameter(1)
p2 = f32[4,3] parameter(2)
lhs = f32[4,3] triangular-solve(f32[4,4] p1, f32[4,3] p2),
left_side=true,
lower=true,
transpose_a=NO_TRANSPOSE,
unit_diagonal=true
rhs = f32[4, 3] slice(f32[20, 20] p0),
slice={[0:20:6], [0:5:2]}
ROOT add = f32[4, 3] add(lhs, rhs)
}
ENTRY e {
p0 = f32[20, 20] parameter(0)
p1 = f32[4, 4] parameter(1)
p2 = f32[4, 3] parameter(2)
ROOT fusion = f32[4, 3] fusion(p0, p1, p2), kind=kLoop,
calls=fused_computation
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0 * 6, d1 * 2),
domain:
d0 in [0, 3],
d1 in [0, 2]
operand id = 1
unknown indexing
operand id = 2
unknown indexing
)"));
}
TEST_F(IndexingAnalysisTest, EpilogueIndexing) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
fused_computation {
p0 = f32[1000, 1000] parameter(0)
t = f32[1000, 1000]{0, 1} transpose(p0), dimensions={1, 0}
a0 = f32[1000000] bitcast(t)
ROOT log = f32[1000000] log(a0)
}
ENTRY e {
p0 = f32[1000, 1000] parameter(0)
ROOT fusion = f32[1000000] fusion(p0), kind=kLoop,
calls=fused_computation
}
)");
ASSERT_TRUE(module.ok());
auto* computation = (*module)->GetComputationWithName("fused_computation");
auto fusion = HloFusionAdaptor::ForComputation(computation);
HloInstructionAdaptor transpose(*computation->GetInstructionWithName("t"),
fusion.get());
HloInstructionAdaptor log(*computation->GetInstructionWithName("log"),
fusion.get());
EXPECT_THAT(ToString(ComputeEpilogueInputToOutputIndexing(transpose, log,
&mlir_context_)),
MatchIndexingString(R"(
(d0, d1) -> (d1 * 1000 + d0),
domain:
d0 in [0, 999],
d1 in [0, 999]
)"));
}
TEST_F(IndexingAnalysisTest, EpilogueIndexing_NoEpilogue) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
fused_computation {
p0 = f32[1000, 1000] parameter(0)
ROOT t = f32[1000, 1000]{0, 1} transpose(p0), dimensions={1, 0}
}
ENTRY e {
p0 = f32[1000, 1000] parameter(0)
ROOT fusion = f32[1000, 1000] fusion(p0), kind=kLoop,
calls=fused_computation
}
)");
ASSERT_TRUE(module.ok());
auto* computation = (*module)->GetComputationWithName("fused_computation");
auto fusion = HloFusionAdaptor::ForComputation(computation);
HloInstructionAdaptor transpose(*computation->GetInstructionWithName("t"),
fusion.get());
EXPECT_THAT(ToString(ComputeEpilogueInputToOutputIndexing(
transpose, transpose, &mlir_context_)),
MatchIndexingString(R"(
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 999],
d1 in [0, 999]
)"));
}
TEST_F(IndexingAnalysisTest, BroadcastingElementwise) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = pred[] parameter(0)
p1 = f32[1000, 1000] parameter(1)
p2 = f32[1000, 1000] parameter(2)
ROOT select = f32[1000, 1000] select(p0, p1, p2)
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0 (d0, d1) -> (),
domain:
d0 in [0, 999],
d1 in [0, 999]
operand id = 1 (d0, d1) -> (d0, d1),
domain:
d0 in [0, 999],
d1 in [0, 999]
operand id = 2 (d0, d1) -> (d0, d1),
domain:
d0 in [0, 999],
d1 in [0, 999]
)"));
}
TEST_F(IndexingAnalysisTest, FusionWithRTVarsSimplification_ScalarConstant) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"hlo(
HloModule m
fused_computation {
p0 = s32[4096] parameter(0)
offset = s64[] constant(42)
ROOT dynamic-slice = s32[10]
dynamic-slice(p0, offset), dynamic_slice_sizes={10}
}
ENTRY main {
p0 = s32[4096] parameter(0)
ROOT fusion = s32[10] fusion(p0), kind=kInput, calls=fused_computation
}
)hlo"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0) -> (d0 + 42),
domain:
d0 in [0, 9]
)"));
}
TEST_F(IndexingAnalysisTest, FusionWithRTVarsSimplification_Iota) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"hlo(
HloModule m
fused_computation {
p0 = f32[33,76] parameter(0)
iota = s64[42,1] iota(), iota_dimension=0
ROOT gather = f32[42,1,1] gather(p0, iota),
offset_dims={1,2},
collapsed_slice_dims={},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,1}
}
ENTRY main {
p0 = f32[33,76] parameter(0)
ROOT fusion = f32[42,1,1] fusion(p0), kind=kInput, calls=fused_computation
}
)hlo"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0, 0),
domain:
d0 in [0, 41],
d1 in [0, 0],
d2 in [0, 0]
)"));
}
TEST_F(IndexingAnalysisTest, FusionWithRTVarsSimplification_IotaAsConstant) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"hlo(
HloModule m
fused_computation {
p0 = f32[33,76] parameter(0)
iota = s64[42,1] iota(), iota_dimension=1
ROOT gather = f32[42,1,1] gather(p0, iota),
offset_dims={1,2},
collapsed_slice_dims={},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,1}
}
ENTRY main {
p0 = f32[33,76] parameter(0)
ROOT fusion = f32[42,1,1] fusion(p0), kind=kInput, calls=fused_computation
}
)hlo"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (0, 0),
domain:
d0 in [0, 41],
d1 in [0, 0],
d2 in [0, 0]
)"));
}
TEST_F(IndexingAnalysisTest, FusionWithRTVarsSimplification_Broadcast) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"hlo(
HloModule m
fused_computation {
p0 = f32[33,76] parameter(0)
c42 = s64[] constant(42)
bcast = s64[42, 1] broadcast(s64[] c42), dimensions={}
ROOT gather = f32[42,1,1] gather(p0, bcast),
offset_dims={1,2},
collapsed_slice_dims={},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,1}
}
ENTRY main {
p0 = f32[33,76] parameter(0)
ROOT fusion = f32[42,1,1] fusion(p0), kind=kInput, calls=fused_computation
}
)hlo"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (42, 0),
domain:
d0 in [0, 41],
d1 in [0, 0],
d2 in [0, 0]
)"));
}
TEST_F(IndexingAnalysisTest, FusionWithRTVarsSimplification_Reverse) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"hlo(
HloModule m
fused_computation {
p0 = f32[33,76] parameter(0)
iota = s64[42,1] iota(), iota_dimension=0
reverse = s64[42,1] reverse(iota), dimensions={0}
ROOT gather = f32[42,1,1] gather(p0, reverse),
offset_dims={1,2},
collapsed_slice_dims={},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,1}
}
ENTRY main {
p0 = f32[33,76] parameter(0)
ROOT fusion = f32[42,1,1] fusion(p0), kind=kInput, calls=fused_computation
}
)hlo"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (-d0 + 41, 0),
domain:
d0 in [0, 41],
d1 in [0, 0],
d2 in [0, 0]
)"));
}
TEST_F(IndexingAnalysisTest, FusionWithRTVarsSimplification_Add) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"hlo(
HloModule m
fused_computation {
p0 = s32[4096] parameter(0)
p1 = s64[] parameter(1)
c42 = s64[] constant(42)
add = s64[] add(c42, p1)
ROOT dynamic-slice = s32[10]
dynamic-slice(p0, add), dynamic_slice_sizes={10}
}
ENTRY main {
p0 = s32[4096] parameter(0)
p1 = s64[] parameter(1)
ROOT fusion = s32[10] fusion(p0, p1), kind=kInput, calls=fused_computation
}
)hlo"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0 (d0)[rt0] -> (d0 + rt0 + 42),
domain:
d0 in [0, 9],
rt0 in [0, 4086]
operand id = 1
(d0) -> (),
domain:
d0 in [0, 9]
)"));
}
TEST_F(IndexingAnalysisTest, FusionWithRTVarsSimplification_Multiply) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"hlo(
HloModule m
fused_computation {
p0 = s32[4096] parameter(0)
p1 = s64[] parameter(1)
c42 = s64[] constant(42)
add = s64[] multiply(c42, p1)
ROOT dynamic-slice = s32[10]
dynamic-slice(p0, add), dynamic_slice_sizes={10}
}
ENTRY main {
p0 = s32[4096] parameter(0)
p1 = s64[] parameter(1)
ROOT fusion = s32[10] fusion(p0, p1), kind=kInput, calls=fused_computation
}
)hlo"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0 (d0)[rt0] -> (d0 + rt0 * 42),
domain:
d0 in [0, 9],
rt0 in [0, 4086]
operand id = 1
(d0) -> (),
domain:
d0 in [0, 9]
)"));
}
TEST_F(IndexingAnalysisTest, FusionWithRTVarsSimplification_ChainedOps) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"hlo(
HloModule m
fused_computation {
p0 = s32[4096] parameter(0)
p1 = s64[] parameter(1)
c42 = s64[] constant(42)
c2 = s64[] constant(2)
add = s64[] add(c42, p1)
multiply = s64[] multiply(c2, add)
ROOT dynamic-slice = s32[10]
dynamic-slice(p0, multiply), dynamic_slice_sizes={10}
}
ENTRY main {
p0 = s32[4096] parameter(0)
p1 = s64[] parameter(1)
ROOT fusion = s32[10] fusion(p0, p1), kind=kInput, calls=fused_computation
}
)hlo"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0)[rt0] -> (d0 + rt0 * 2 + 84),
domain: d0 in [0, 9],
rt0 in [0, 4086]
operand id = 1
(d0) -> (),
domain:
d0 in [0, 9]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithDUS) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"hlo(
HloModule m
fused_computation {
bitcast = s32[1,4096]{1,0} parameter(0)
constant = s32[] constant(0)
pad = s32[1,8192]{1,0} pad(bitcast, constant), padding=0_0x4096_0
slice = s32[1]{0} parameter(1)
bitcast.4 = s32[] bitcast(slice)
ROOT dynamic-slice = s32[1,4096]{1,0}
dynamic-slice(pad, constant, bitcast.4), dynamic_slice_sizes={1,4096}
}
ENTRY main {
param_0 = s32[1,4096]{1,0} parameter(0)
param_1 = s32[1]{0} parameter(1)
ROOT fusion = s32[1,4096]{1,0} fusion(param_0, param_1), kind=kInput,
calls=fused_computation
}
)hlo"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1)[rt0] -> (0, d1 + rt0 - 4096),
domain:
d0 in [0, 0],
d1 in [0, 4095],
rt0 in [0, 4096],
d1 + rt0 in [4096, 8191]
operand id = 1
(d0, d1) -> (0),
domain:
d0 in [0, 0],
d1 in [0, 4095]
)"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/indexing_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/indexing_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1d84fa11-8082-48bf-843b-b2081373cfdd | cpp | tensorflow/tensorflow | coalescing_analysis | third_party/xla/xla/service/gpu/model/coalescing_analysis.cc | third_party/xla/xla/service/gpu/model/coalescing_analysis_test.cc | #include "xla/service/gpu/model/coalescing_analysis.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <cstdlib>
#include <optional>
#include <stack>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/MathExtras.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/affine_map_evaluator.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/model/tiled_hlo_instruction.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
bool IsReadCoalescedHeuristic(HloFusionAnalysis::EmitterFusionKind fusion_kind,
const HloInstruction* producer,
const HloInstruction* consumer) {
if (fusion_kind != HloFusionAnalysis::EmitterFusionKind::kTranspose) {
auto is_broadcast = [&](const HloInstruction* instr) {
while (true) {
if (instr->opcode() == HloOpcode::kBroadcast ||
instr->opcode() == HloOpcode::kIota) {
return true;
}
if (instr->operand_count() != 1) return false;
if (instr->opcode() != HloOpcode::kBitcast && !instr->IsElementwise()) {
return false;
}
instr = instr->operand(0);
}
};
auto is_bad_transpose = [&](const HloInstruction* instr) {
if (instr->opcode() == HloOpcode::kFusion) {
for (auto* instr : instr->fused_instructions()) {
if (TransposesMinorDimension(instr) &&
!is_broadcast(instr->operand(0))) {
return true;
}
}
return false;
}
return TransposesMinorDimension(instr) &&
!is_broadcast(instr->operand(0));
};
if (is_bad_transpose(producer)) return false;
if (consumer && is_bad_transpose(consumer)) return false;
}
if (fusion_kind == HloFusionAnalysis::EmitterFusionKind::kReduction &&
IsInputFusibleReduction(*producer) && consumer &&
IsInputFusibleReduction(*consumer)) {
return false;
}
return true;
}
bool IsTiledReadCoalescedHeuristic(const TiledHloInstruction& operand,
const se::DeviceDescription& device_info) {
const Shape& shape = operand.hlo()->shape();
int64_t contiguous_read_elements = 1;
for (const auto dim_idx : shape.layout().minor_to_major()) {
if (operand.tile_stride(dim_idx) != 1) {
break;
}
int64_t tile_size = operand.tile_size(dim_idx);
int64_t dim_size = shape.dimensions(dim_idx);
contiguous_read_elements *= std::min(tile_size, dim_size);
if (tile_size < dim_size) {
break;
}
}
int64_t contiguous_bytes_accessed =
contiguous_read_elements *
ShapeUtil::ByteSizeOfPrimitiveType(operand.hlo()->shape().element_type());
return contiguous_bytes_accessed >=
device_info.dram_to_l2_transaction_size_bytes();
}
namespace {
using ::mlir::AffineBinaryOpExpr;
using ::mlir::AffineConstantExpr;
using ::mlir::AffineExpr;
using ::mlir::AffineExprKind;
using ::mlir::AffineMap;
using ::mlir::getAffineConstantExpr;
using ::mlir::MLIRContext;
bool EstimateCoalescingViaMemoryTransactionsCount(
absl::Span<const Interval> intervals, PrimitiveType element_type) {
constexpr int64_t kBytesPerMemoryTransaction = 128;
int64_t type_size = ShapeUtil::ByteSizeOfPrimitiveType(element_type);
int memory_transactions = 0;
int total_num_elements = 0;
for (const auto& range : intervals) {
int64_t num_elements = range.upper - range.lower + 1;
memory_transactions += llvm::divideCeilSigned(num_elements * type_size,
kBytesPerMemoryTransaction);
total_num_elements += num_elements;
}
if (memory_transactions == 0) {
return true;
}
int memory_transactions_lower_bound = llvm::divideCeilSigned(
total_num_elements * type_size, kBytesPerMemoryTransaction);
constexpr float kIsCoalescedThreshold = 0.9;
return memory_transactions_lower_bound >
memory_transactions * kIsCoalescedThreshold;
}
Shape GetLinearizedShape(const Shape& shape) {
if (shape.rank() == 0) {
return shape;
}
std::vector<int64_t> dims{ShapeUtil::ElementsIn(shape)};
auto result = Shape(shape.element_type(), dims,
absl::InlinedVector<bool, 4>(dims.size(), false), {});
*result.mutable_layout() = xla::Layout({0});
return result;
}
std::optional<GroupedByOpIndexingMap> GetThreadIdToInputMemoryLayoutsMaps(
const HloFusionAdaptor& fusion_adaptor,
absl::Span<const HloInstruction* const> operands,
const HloFusionAnalysis& fusion_analysis,
KernelFusionInterface* fusion_interface, MLIRContext* mlir_context) {
GroupedByOpIndexingMap result;
for (const auto& [root_index, hero] :
llvm::enumerate(fusion_analysis.fusion_heroes())) {
for (const auto& [hero_operand_index, hero_operand] :
llvm::enumerate(hero.GetOperands())) {
if (hero_operand.shape().rank() == 0) {
continue;
}
std::optional<IndexingMap> thread_id_to_hero_operand_map =
fusion_interface->ComputeThreadIdToInputIndexing(
root_index, hero_operand_index, mlir_context);
if (!thread_id_to_hero_operand_map.has_value()) {
return std::nullopt;
}
GroupedByOpIndexingMap instr_indexing_keyed_by_operands =
ComputeGroupedOutputToInputIndexing(fusion_adaptor, hero_operand,
mlir_context);
for (const HloInstruction* operand : operands) {
auto operand_indexing_maps_it =
instr_indexing_keyed_by_operands.find(operand);
if (operand_indexing_maps_it ==
instr_indexing_keyed_by_operands.end()) {
continue;
}
const Shape& operand_shape = operand->shape();
IndexingMap operand_logical_to_physical_map =
GetIndexingMapFromLogicalToPhysicalLayout(operand_shape,
mlir_context);
IndexingMap operand_physical_to_linearized_shape = GetBitcastMap(
ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(
operand_shape),
GetLinearizedShape(operand_shape), mlir_context);
IndexingMap operand_logical_to_linearized_physical_shape =
operand_logical_to_physical_map *
operand_physical_to_linearized_shape;
operand_logical_to_linearized_physical_shape.Simplify();
for (const IndexingMap& operand_indexing_map :
operand_indexing_maps_it->second) {
if (operand_indexing_map.IsUndefined()) {
result[operand] = {operand_indexing_map};
break;
}
IndexingMap logical_output_to_linearized_physical_input_map =
operand_indexing_map *
operand_logical_to_linearized_physical_shape;
IndexingMap thread_id_to_linearized_physical_input_map =
*thread_id_to_hero_operand_map *
logical_output_to_linearized_physical_input_map;
thread_id_to_linearized_physical_input_map.Simplify();
result[operand].insert(thread_id_to_linearized_physical_input_map);
}
}
}
}
return result;
}
void AssignValuesToRTVars(IndexingMap* indexing_map) {
if (indexing_map->GetRTVarsCount() == 0) {
return;
}
MLIRContext* mlir_context = indexing_map->GetMLIRContext();
llvm::SmallVector<AffineExpr, 2> symbol_replacements;
for (int64_t symbol_id = 0; symbol_id < indexing_map->GetRangeVarsCount();
++symbol_id) {
symbol_replacements.push_back(
mlir::getAffineSymbolExpr(symbol_id, mlir_context));
}
for (const IndexingMap::Variable& rt_var : indexing_map->GetRTVars()) {
symbol_replacements.push_back(getAffineConstantExpr(
(rt_var.bounds.lower + rt_var.bounds.upper) / 2, mlir_context));
}
AffineMap thread_x_to_input_no_dim_symbols =
indexing_map->GetAffineMap().replaceDimsAndSymbols(
{}, symbol_replacements, indexing_map->GetDimVarsCount(),
indexing_map->GetRangeVarsCount());
*indexing_map = IndexingMap{thread_x_to_input_no_dim_symbols,
indexing_map->GetDimVars(),
indexing_map->GetRangeVars(),
{}};
indexing_map->Simplify();
indexing_map->RemoveUnusedSymbols();
}
void AssignValuesToOuterLoopIVs(IndexingMap* indexing_map) {
if (indexing_map->GetRangeVarsCount() <= 1) {
return;
}
MLIRContext* mlir_context = indexing_map->GetMLIRContext();
llvm::SmallVector<AffineExpr, 2> symbol_replacements;
for (int64_t symbol_id = 0; symbol_id < indexing_map->GetRangeVarsCount() - 1;
++symbol_id) {
symbol_replacements.push_back(getAffineConstantExpr(
indexing_map->GetRangeVar(symbol_id).bounds.lower, mlir_context));
}
symbol_replacements.push_back(mlir::getAffineSymbolExpr(0, mlir_context));
AffineMap thread_x_to_input_no_dim_symbols =
indexing_map->GetAffineMap().replaceDimsAndSymbols(
{}, symbol_replacements, indexing_map->GetDimVarsCount(), 1);
*indexing_map = IndexingMap{thread_x_to_input_no_dim_symbols,
indexing_map->GetDimVars(),
{indexing_map->GetRangeVars().back()},
{}};
indexing_map->Simplify();
indexing_map->RemoveUnusedSymbols();
}
struct PartitionedExpr {
explicit PartitionedExpr(MLIRContext* mlir_context) {
AffineExpr zero = getAffineConstantExpr(0, mlir_context);
func_of_d0 = zero;
func_of_s0 = zero;
}
AffineExpr func_of_d0;
AffineExpr func_of_s0;
};
std::optional<PartitionedExpr> Partition(AffineExpr expr) {
PartitionedExpr result(expr.getContext());
std::vector<AffineExpr> summands;
std::stack<AffineExpr> dfs;
dfs.push(expr);
while (!dfs.empty()) {
auto top = dfs.top();
dfs.pop();
auto sum = mlir::dyn_cast<AffineBinaryOpExpr>(top);
if (sum && sum.getKind() == AffineExprKind::Add) {
dfs.push(sum.getLHS());
dfs.push(sum.getRHS());
continue;
}
bool depends_on_thread_x = top.isFunctionOfDim(0);
bool depends_on_range = top.isFunctionOfSymbol(0);
if (depends_on_thread_x && depends_on_range) {
return std::nullopt;
}
if (depends_on_thread_x) {
result.func_of_d0 = top + result.func_of_d0;
}
if (depends_on_range) {
result.func_of_s0 = top + result.func_of_s0;
}
}
return result;
}
void FindAllIndices(AffineExpr expr, int dim_id, int symbol_id,
const std::vector<Interval>& dimension_ranges,
const std::vector<Interval>& symbol_ranges,
std::vector<int64_t>* dimensions,
std::vector<int64_t>* symbols,
std::vector<int64_t>* indices) {
if (dim_id < dimension_ranges.size()) {
Interval dim_range = dimension_ranges[dim_id];
for (int64_t dim_value = dim_range.lower; dim_value <= dim_range.upper;
++dim_value) {
dimensions->push_back(dim_value);
FindAllIndices(expr, dim_id + 1, symbol_id, dimension_ranges,
symbol_ranges, dimensions, symbols, indices);
dimensions->pop_back();
}
return;
}
if (symbol_id < symbol_ranges.size()) {
Interval symbol_range = symbol_ranges[symbol_id];
for (int64_t symbol_value = symbol_range.lower;
symbol_value <= symbol_range.upper; ++symbol_value) {
symbols->push_back(symbol_value);
FindAllIndices(expr, dim_id, symbol_id + 1, dimension_ranges,
symbol_ranges, dimensions, symbols, indices);
symbols->pop_back();
}
return;
}
indices->push_back(EvaluateAffineExpr(expr, *dimensions, *symbols));
}
std::vector<Interval> FindIntervals(
AffineExpr expr, const std::vector<Interval>& dimension_ranges,
const std::vector<Interval>& symbol_ranges = {}) {
std::vector<int64_t> dimensions, symbols;
std::vector<int64_t> linear_indices;
FindAllIndices(expr, 0, 0, dimension_ranges, symbol_ranges, &dimensions,
&symbols, &linear_indices);
std::sort(linear_indices.begin(), linear_indices.end());
linear_indices.erase(
std::unique(linear_indices.begin(), linear_indices.end()),
linear_indices.end());
std::vector<Interval> intervals;
for (int i = 0, start, end; i < linear_indices.size();) {
start = linear_indices[i++];
end = start;
while (i < linear_indices.size() && linear_indices[i] == end + 1) {
++end;
++i;
}
intervals.push_back(Interval{start, end});
}
return intervals;
}
std::vector<Interval> ExtendIntervals(const std::vector<Interval>& intervals,
int64_t length) {
std::vector<Interval> overlapped_intervals;
for (int i = 0; i < intervals.size();) {
int64_t lower = intervals[i].lower;
int64_t upper = intervals[i].upper + length;
++i;
while (i < intervals.size() && upper >= intervals[i].lower - 1) {
upper = std::max(upper, intervals[i].upper + length);
++i;
}
overlapped_intervals.push_back(Interval{lower, upper});
}
return overlapped_intervals;
}
std::vector<Interval> FindContiguousIntervals(
const PartitionedExpr& partitioned_expr, const IndexingMap& indexing_map) {
constexpr int64_t kNumThreadsPerWarp = 32;
MLIRContext* mlir_context = indexing_map.GetMLIRContext();
AffineExpr thread_x = mlir::getAffineDimExpr(0, mlir_context);
AffineExpr range = mlir::getAffineSymbolExpr(0, mlir_context);
if (partitioned_expr.func_of_d0 == thread_x) {
return {Interval{0, kNumThreadsPerWarp - 1}};
}
if (auto mul =
mlir::dyn_cast<AffineBinaryOpExpr>(partitioned_expr.func_of_d0);
mul && mul.getKind() == AffineExprKind::Mul) {
if (auto multiplier = mlir::dyn_cast<AffineConstantExpr>(mul.getRHS());
multiplier) {
if (multiplier.getValue() == -1) {
return {Interval{0, kNumThreadsPerWarp - 1}};
}
if (partitioned_expr.func_of_s0 == range) {
Interval range_interval = indexing_map.GetSymbolBound(0);
int64_t num_elems = range_interval.GetLoopTripCount();
if (num_elems >= std::abs(multiplier.getValue())) {
return {Interval{0, multiplier.getValue() * (kNumThreadsPerWarp - 1) +
num_elems - 1}};
}
std::vector<Interval> intervals;
for (int i = 0, dm = 0; i < kNumThreadsPerWarp;
++i, dm += multiplier.getValue()) {
intervals.push_back(
{range_interval.lower + dm, range_interval.upper + dm});
}
return intervals;
}
std::vector<Interval> intervals;
for (int i = 0, dm = 0; i < kNumThreadsPerWarp;
++i, dm += multiplier.getValue()) {
intervals.push_back({dm, dm});
}
return intervals;
}
}
auto intervals = FindIntervals(partitioned_expr.func_of_d0,
{indexing_map.GetDimVars(0).bounds});
if (partitioned_expr.func_of_s0 != range) {
return intervals;
}
Interval range_interval = indexing_map.GetSymbolBound(0);
return ExtendIntervals(intervals, range_interval.GetLoopTripCount() - 1);
}
bool IsIndexingCoalesced(IndexingMap& thread_x_to_linearized_input,
PrimitiveType element_type) {
if (thread_x_to_linearized_input.IsUndefined()) {
return false;
}
if (thread_x_to_linearized_input.GetAffineMap().getNumResults() == 0) {
return true;
}
AssignValuesToRTVars(&thread_x_to_linearized_input);
MLIRContext* mlir_context = thread_x_to_linearized_input.GetMLIRContext();
AffineExpr thread_x_dim = mlir::getAffineDimExpr(
KernelFusionInterface::kIndexingMapThreadIdxDims[0], mlir_context);
AffineExpr c0 = getAffineConstantExpr(0, mlir_context);
IndexingMap thread_x_first_32_elements{
AffineMap::get(1, 0, {thread_x_dim, c0, c0, c0, c0, c0}, mlir_context),
{IndexingMap::Variable{{0, 31}}},
{},
{}};
IndexingMap thread_x_to_input_sample =
thread_x_first_32_elements * thread_x_to_linearized_input;
thread_x_to_input_sample.Simplify();
thread_x_to_input_sample.RescaleSymbols();
thread_x_to_input_sample.RemoveUnusedSymbols();
if (thread_x_to_input_sample.IsKnownEmpty()) {
return true;
}
AssignValuesToOuterLoopIVs(&thread_x_to_input_sample);
auto partitioned_expr =
Partition(thread_x_to_input_sample.GetAffineMap().getResult(0));
if (!partitioned_expr.has_value()) {
return false;
}
if (thread_x_to_input_sample.GetConstraintsCount() > 1 ||
(thread_x_to_input_sample.GetConstraintsCount() == 1 &&
thread_x_to_input_sample.GetConstraints().begin()->first !=
partitioned_expr->func_of_d0 + partitioned_expr->func_of_s0)) {
return false;
}
return EstimateCoalescingViaMemoryTransactionsCount(
FindContiguousIntervals(*partitioned_expr, thread_x_to_input_sample),
element_type);
}
}
CoalescingAnalysis::CoalescingAnalysis(
const HloInstruction* instr,
absl::Span<const HloInstruction* const> operands,
const HloFusionAnalysis& fusion_analysis,
KernelFusionInterface* fusion_interface, MLIRContext* mlir_context,
bool use_heuristic) {
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(instr);
if (!use_heuristic && ComputeCoalescingForAllOperands(
*fusion_adaptor, operands, fusion_analysis,
fusion_interface, mlir_context)) {
return;
}
is_coalesced_computed_by_heuristic_ =
IsReadCoalescedHeuristic(fusion_analysis.GetEmitterFusionKind(), instr);
}
CoalescingAnalysis::CoalescingAnalysis(
const HloInstruction* producer, const HloInstruction* consumer,
absl::Span<const HloInstruction* const> operands,
const HloFusionAnalysis& fusion_analysis,
KernelFusionInterface* fusion_interface, MLIRContext* mlir_context,
bool use_heuristic) {
auto fusion_adaptor =
HloFusionAdaptor::ForProducerConsumer(producer, consumer);
if (!use_heuristic && ComputeCoalescingForAllOperands(
*fusion_adaptor, operands, fusion_analysis,
fusion_interface, mlir_context)) {
return;
}
is_coalesced_computed_by_heuristic_ = IsReadCoalescedHeuristic(
fusion_analysis.GetEmitterFusionKind(), producer, consumer);
}
bool CoalescingAnalysis::ComputeCoalescingForAllOperands(
const HloFusionAdaptor& fusion_adaptor,
absl::Span<const HloInstruction* const> operands,
const HloFusionAnalysis& fusion_analysis,
KernelFusionInterface* fusion_interface, MLIRContext* mlir_context) {
std::optional<GroupedByOpIndexingMap> thread_id_to_input_memory_layouts =
GetThreadIdToInputMemoryLayoutsMaps(fusion_adaptor, operands,
fusion_analysis, fusion_interface,
mlir_context);
if (!thread_id_to_input_memory_layouts.has_value()) {
return false;
}
for (const HloInstruction* operand : operands) {
if (operand->shape().rank() == 0) {
coalescing_per_operand_.insert({operand, true});
continue;
}
auto operand_indexing_maps =
thread_id_to_input_memory_layouts->find(operand);
if (operand_indexing_maps == thread_id_to_input_memory_layouts->end()) {
coalescing_per_operand_.insert({operand, true});
continue;
}
for (IndexingMap operand_indexing_map : operand_indexing_maps->second) {
bool is_coalesced = IsIndexingCoalesced(operand_indexing_map,
operand->shape().element_type());
auto [it, inserted] =
coalescing_per_operand_.insert({operand, is_coalesced});
if (!inserted) {
it->second &= is_coalesced;
}
if (!is_coalesced) break;
}
}
return true;
}
bool CoalescingAnalysis::IsReadCoalesced(const HloInstruction* operand) const {
auto it = coalescing_per_operand_.find(operand);
if (it == coalescing_per_operand_.end()) {
return is_coalesced_computed_by_heuristic_;
}
return it->second;
}
}
} | #include "xla/service/gpu/model/coalescing_analysis.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/fusions/fusions.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/symbolic_tile_analysis.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/gpu/model/tiled_hlo_instruction.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
class CoalescingTest : public HloTestBase {
public:
std::vector<bool> IsReadCoalescedPerOperand(absl::string_view hlo_string) {
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloInstruction* root = module->entry_computation()->root_instruction();
return IsReadCoalescedPerOperand(root);
}
std::vector<bool> IsReadCoalescedPerOperand(const HloInstruction* root) {
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(root);
auto analysis = HloFusionAnalysis::Create(*root, device_info_);
auto emitter = GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis});
auto fusion = dynamic_cast<KernelFusionInterface*>(emitter.get());
EXPECT_NE(fusion, nullptr);
CoalescingAnalysis coalescing_analysis(root, root->operands(), analysis,
fusion, &mlir_context_,
false);
std::vector<bool> results;
for (const HloInstruction* operand : root->operands()) {
results.push_back(coalescing_analysis.IsReadCoalesced(operand));
}
return results;
}
bool IsReadCoalescedHeuristic(absl::string_view hlo_string) {
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloInstruction* root = module->entry_computation()->root_instruction();
auto analysis = HloFusionAnalysis::Create(*root, device_info_);
return xla::gpu::IsReadCoalescedHeuristic(analysis.GetEmitterFusionKind(),
root->operand(0), root);
}
protected:
stream_executor::DeviceDescription device_info_ =
TestGpuDeviceInfo::RTXA6000DeviceInfo();
mlir::MLIRContext mlir_context_;
};
TEST_F(CoalescingTest, IdentityLayout) {
absl::string_view ir = R"(
HloModule m
fusion {
p0 = f32[100, 200] parameter(0)
p1 = f32[100, 200] parameter(1)
ROOT adthread_x = f32[100, 200] add(p0, p1)
}
ENTRY e {
p0 = f32[100, 200] parameter(0)
p1 = f32[100, 200] parameter(1)
ROOT fusion = f32[100, 200] fusion(p0, p1), kind=kInput, calls=fusion
}
)";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true, true));
}
TEST_F(CoalescingTest, RhsTransposedLayout) {
absl::string_view ir = R"(
HloModule m
fusion {
p0 = f32[100, 200]{1, 0} parameter(0)
p1 = f32[100, 200]{0, 1} parameter(1)
ROOT exp = f32[100, 200]{1, 0} add(p0, p1)
}
ENTRY e {
p0 = f32[100, 200]{1, 0} parameter(0)
p1 = f32[100, 200]{0, 1} parameter(1)
ROOT fusion = f32[100, 200]{1, 0} fusion(p0, p1), kind=kInput, calls=fusion
}
)";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true, false));
}
TEST_F(CoalescingTest, OutputTransposedLayout) {
absl::string_view ir = R"(
HloModule m
fusion {
p0 = f32[100, 200]{1, 0} parameter(0)
p1 = f32[100, 200]{1, 0} parameter(1)
ROOT exp = f32[100, 200]{0, 1} add(p0, p1)
}
ENTRY e {
p0 = f32[100, 200]{1, 0} parameter(0)
p1 = f32[100, 200]{1, 0} parameter(1)
ROOT fusion = f32[100, 200]{0, 1} fusion(p0, p1), kind=kInput, calls=fusion
}
)";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(false, false));
}
TEST_F(CoalescingTest, OutputAndLhsTransposedLayout) {
absl::string_view ir = R"(
HloModule m
fusion {
p0 = f32[100, 200]{1, 0} parameter(0)
p1 = f32[100, 200]{0, 1} parameter(1)
ROOT add = f32[100, 200]{1, 0} add(p0, p1)
}
ENTRY e {
p0 = f32[100, 200]{1, 0} parameter(0)
p1 = f32[100, 200]{0, 1} parameter(1)
ROOT fusion = f32[100, 200]{1, 0} fusion(p0, p1), kind=kInput, calls=fusion
}
)";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true, false));
}
TEST_F(CoalescingTest, Transpose) {
absl::string_view ir = R"(
HloModule module
fusion {
%input = f32[1, 6400, 32] parameter(0)
ROOT transpose = f32[1, 32, 6400] transpose(%input), dimensions={0, 2, 1}
}
ENTRY entry {
%input = f32[1, 6400, 32] parameter(0)
ROOT %fusion = f32[1, 32, 6400] fusion(%input), kind=kLoop, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true));
}
TEST_F(CoalescingTest, TransposeOfBroadcastHeuristic) {
absl::string_view ir = R"(
HloModule module
fusion {
input = f32[1, 32, 6400] parameter(0)
ROOT slice = f32[1, 32, 100] slice(input), slice={[0:1:1], [0:32:1], [0:6400:64]}
}
ENTRY entry {
p0 = f32[32] parameter(0)
broadcast = f32[1, 6400, 32] broadcast(p0), dimensions={2}
transpose = f32[1, 32, 6400] transpose(broadcast), dimensions={0, 2, 1}
ROOT %fusion = f32[1, 32, 100] fusion(transpose), kind=kLoop, calls=fusion
})";
EXPECT_TRUE(IsReadCoalescedHeuristic(ir));
}
TEST_F(CoalescingTest, TransposeOfIotaHeuristic) {
absl::string_view ir = R"(
HloModule module
fusion {
p0 = f32[32, 100, 64] parameter(0)
ROOT slice = f32[32, 100, 1] slice(p0), slice={[0:32:1], [0:100:1], [0:1:1]}
}
ENTRY entry {
iota = f32[100, 64, 32] iota(), iota_dimension=1
transpose = f32[32, 100, 64] transpose(iota), dimensions={2, 0, 1}
ROOT %fusion = f32[32, 100, 1] fusion(transpose), kind=kLoop, calls=fusion
})";
EXPECT_TRUE(IsReadCoalescedHeuristic(ir));
}
TEST_F(CoalescingTest, TransposeOfAddHeuristic) {
absl::string_view ir = R"(
HloModule module
fusion {
p0 = f32[32, 100, 64] parameter(0)
ROOT slice = f32[32, 100, 1] slice(p0), slice={[0:32:1], [0:100:1], [0:1:1]}
}
ENTRY entry {
input = f32[100, 64, 32] parameter(0)
add = f32[100, 64, 32] add(input, input)
transpose = f32[32, 100, 64] transpose(add), dimensions={2, 0, 1}
ROOT %fusion = f32[32, 100, 1] fusion(transpose), kind=kLoop, calls=fusion
})";
EXPECT_FALSE(IsReadCoalescedHeuristic(ir));
}
TEST_F(CoalescingTest, TransposeOnlyOuterDims) {
absl::string_view ir = R"(
HloModule module
fusion {
%input = f32[100, 32, 64] parameter(0)
ROOT transpose = f32[32, 100, 64] transpose(%input), dimensions={1, 0, 2}
}
ENTRY entry {
%input = f32[100, 32, 64] parameter(0)
ROOT %fusion = f32[32, 100, 64] fusion(%input), kind=kLoop, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true));
}
TEST_F(CoalescingTest, PadOp) {
absl::string_view ir = R"(
HloModule module
fusion {
p0 = f32[997, 436] parameter(0)
p1 = f32[] parameter(1)
ROOT pad = f32[1024, 512] pad(p0, p1), padding=10_17x24_52
}
ENTRY entry {
p0 = f32[997, 436] parameter(0)
p1 = f32[] parameter(1)
ROOT %fusion = f32[1024, 512] fusion(p0, p1), kind=kLoop, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true, true));
}
TEST_F(CoalescingTest, RowReduction) {
absl::string_view ir = R"(
HloModule module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%input = f32[100,64,512] parameter(0)
%c0 = f32[] constant(0)
ROOT reduce = f32[100,64] reduce(%input, %c0), dimensions={2}, to_apply=add
}
ENTRY entry {
%input = f32[100,64,512] parameter(0)
ROOT %fusion = f32[100,64] fusion(%input), kind=kInput, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true));
}
TEST_F(CoalescingTest, MultiRowReduction) {
absl::string_view ir = R"(
HloModule module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%input = f32[100,64,4] parameter(0)
%c0 = f32[] constant(0)
ROOT reduce = f32[100,64] reduce(%input, %c0), dimensions={2}, to_apply=add
}
ENTRY entry {
%input = f32[100,64,4] parameter(0)
ROOT %fusion = f32[100,64] fusion(%input), kind=kInput, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true));
}
TEST_F(CoalescingTest, ColumnReduction) {
absl::string_view ir = R"(
HloModule module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%input = f32[100,64,32] parameter(0)
%c0 = f32[] constant(0)
ROOT reduce = f32[100,32] reduce(%input, %c0),
dimensions={1}, to_apply=add
}
ENTRY entry {
%input = f32[100,64,32] parameter(0)
ROOT %fusion = f32[100,32] fusion(%input), kind=kInput, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true));
}
TEST_F(CoalescingTest, VariadicReduceViaLoopEmitter) {
absl::string_view ir = R"(
HloModule module
max {
p0 = s32[] parameter(0)
p1 = s32[] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
max01 = s32[] maximum(p0, p1)
max23 = s32[] maximum(p2, p3)
ROOT max = (s32[], s32[]) tuple(max01, max23)
}
fusion {
p0 = s32 [5696,10,4] parameter(0)
p1 = s32 [5696,10,4] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
ROOT reduce = (s32[5696,4], s32[5696,4]) reduce(s32[5696,10,4] p0,
s32[5696,10,4] p1, s32[] p2, s32[] p3), dimensions={1}, to_apply=max
}
ENTRY entry {
p0 = s32 [5696,10,4] parameter(0)
p1 = s32 [5696,10,4] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
ROOT f = (s32[5696,4], s32[5696,4]) fusion(p0, p1, p2, p3),
kind=kInput, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir),
ElementsAre(false, false, true, true));
}
TEST_F(CoalescingTest, VariadicReduceViaReductionEmitter) {
absl::string_view ir = R"(
HloModule module
max {
p0 = s32[] parameter(0)
p1 = s32[] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
max01 = s32[] maximum(p0, p1)
max23 = s32[] maximum(p2, p3)
ROOT max = (s32[], s32[]) tuple(max01, max23)
}
fusion {
p0 = s32[32,40] parameter(0)
p1 = s32[32,40] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
ROOT reduce = (s32[32], s32[32])
reduce(s32[32,40] p0, s32[32,40] p1, s32[] p2, s32[] p3),
dimensions={1}, to_apply=max
}
ENTRY entry {
p0 = s32[32,40] parameter(0)
p1 = s32[32,40] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
ROOT f = (s32[32], s32[32]) fusion(p0, p1, p2, p3),
kind=kInput, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir),
ElementsAre(true, true, true, true));
}
TEST_F(CoalescingTest, Gather) {
absl::string_view ir = R"(
HloModule module
fusion {
operand = f32[33, 76, 70] parameter(0)
indices = s32[1806, 2] parameter(1)
ROOT gather = f32[1806, 7, 8, 4] gather(operand, indices),
offset_dims={1,2,3}, collapsed_slice_dims={}, start_index_map={0,1},
index_vector_dim=1, slice_sizes={7,8,4}
}
ENTRY entry {
p0 = f32[33, 76, 70] parameter(0)
p1 = s32[1806, 2] parameter(1)
ROOT %fusion = f32[1806, 7, 8, 4] fusion(p0, p1), kind=kLoop, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(false, true));
}
TEST_F(CoalescingTest, DynamicSlice) {
absl::string_view ir = R"(
HloModule module
fusion {
%src = s32[2,2,258] parameter(0)
%of1 = s32[] parameter(1)
%of2 = s32[] parameter(2)
%of3 = s32[] parameter(3)
ROOT %ds = s32[1,2,32] dynamic-slice(s32[2,2,258] %src,
s32[] %of1, s32[] %of2, s32[] %of3),
dynamic_slice_sizes={1, 2, 32}
}
ENTRY entry {
%p0 = s32[2,2,258] parameter(0)
%p1 = s32[] parameter(1)
%p2 = s32[] parameter(2)
%p3 = s32[] parameter(3)
ROOT %fusion = s32[1,2,32] fusion(p0, p1, p2, p3), kind=kLoop, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir),
ElementsAre(true, true, true, true));
}
TEST_F(CoalescingTest, UnusedParameter) {
Shape shape = ShapeUtil::MakeShape(F32, {100000});
auto module = std::make_unique<HloModule>("m", HloModuleConfig{});
HloComputation::Builder b("b");
auto p0 = b.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
auto p1 = b.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloComputation::Builder sub_builder("subcomp");
HloInstruction* p0f = sub_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "p0f"));
HloInstruction* p1f = sub_builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "p1f"));
ASSERT_NE(p1f, nullptr);
sub_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0f));
HloComputation* subcomp = module->AddEmbeddedComputation(sub_builder.Build());
auto fusion = HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kLoop, {p0, p1}, subcomp);
b.AddInstruction(std::move(fusion));
module->AddEntryComputation(b.Build());
EXPECT_THAT(IsReadCoalescedPerOperand(
module->entry_computation()->root_instruction()),
ElementsAre(true, true));
}
TEST_F(CoalescingTest, Param) {
absl::string_view ir = R"(
HloModule module
fusion {
%p0 = u32[48,2,1280] parameter(0)
%p1 = u32[48,1,1280] parameter(1)
%p2 = u32[48,1,1280] parameter(2)
%concat = u32[48,2,1280] concatenate(u32[48,1,1280] %p1,
u32[48,1,1280] %p2), dimensions={1}
ROOT %shift = u32[48,2,1280] shift-right-logical(
u32[48,2,1280] %concat, u32[48,2,1280] %p0)
}
ENTRY entry {
%p0 = u32[48,2,1280] parameter(0)
%p1 = u32[48,1,1280] parameter(1)
%p2 = u32[48,1,1280] parameter(2)
ROOT %fusion = u32[48,2,1280] fusion(p0, p1, p2), kind=kLoop, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true, true, true));
}
class CoalescingForTiledHloTest : public CoalescingTest {
public:
std::vector<bool> IsTiledReadCoalescedPerOperand(
const HloInstruction* root, absl::Span<int64_t const> tile_sizes) {
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(root);
SymbolicTileAnalysis symbolic_tile_analysis =
std::get<SymbolicTileAnalysis>(SymbolicTileAnalysis::AnalyzeFusion(
*fusion_adaptor, &mlir_context_));
TiledHloComputation tiled_hlo_computation =
*symbolic_tile_analysis.ComputeTiledHloInstructions(
tile_sizes, true,
true);
const TiledHloInstruction* tiled_hlo_root = tiled_hlo_computation.GetRoot();
std::vector<bool> result;
for (const TiledHloInstruction* operand : tiled_hlo_root->operands()) {
result.push_back(IsTiledReadCoalescedHeuristic(*operand, device_info_));
}
return result;
}
};
TEST_F(CoalescingForTiledHloTest, TiledReadCoalescedHeuristic_Transpose) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
ENTRY main {
p0 = f32[2048, 48] parameter(0)
ROOT transpose = f32[48, 2048] transpose(p0), dimensions={1, 0}
})"));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {1, 2048}),
ElementsAre(false));
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {48, 32}),
ElementsAre(true));
}
TEST_F(CoalescingForTiledHloTest,
TiledReadCoalescedHeuristic_MaskingIsHandledCorrectly) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
ENTRY main {
p0 = f32[2048, 12] parameter(0)
ROOT transpose = f32[12, 2048] transpose(p0), dimensions={1, 0}
})"));
const HloInstruction* root = module->entry_computation()->root_instruction();
constexpr int kNumBytesPerParamRow = 12 * 4;
ASSERT_GT(device_info_.dram_to_l2_transaction_size_bytes(),
kNumBytesPerParamRow);
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {16, 4}), ElementsAre(true));
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {1024, 1}),
ElementsAre(false));
}
TEST_F(CoalescingForTiledHloTest, RhsTransposedLayout) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
ENTRY main {
p0 = f32[256, 512]{1,0} parameter(0)
p1 = f32[256, 512]{0,1} parameter(1)
ROOT add = f32[256, 512]{1,0} add(p0, p1)
})"));
const HloInstruction* root = module->entry_computation()->root_instruction();
constexpr int kExpectedDramToL2TransactionSize = 64;
ASSERT_EQ(device_info_.dram_to_l2_transaction_size_bytes(),
kExpectedDramToL2TransactionSize);
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {1, 16}),
ElementsAre(true, false));
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {16, 1}),
ElementsAre(false, true));
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {16, 16}),
ElementsAre(true, true));
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {8, 8}),
ElementsAre(false, false));
}
TEST_F(CoalescingForTiledHloTest, SmallDataTypes) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
ENTRY main {
p0 = s8[256, 512] parameter(0)
p1 = s8[256, 512] parameter(1)
ROOT add = s8[256, 512] add(p0, p1)
})"));
const HloInstruction* root = module->entry_computation()->root_instruction();
constexpr int kExpectedDramToL2TransactionSize = 64;
ASSERT_EQ(device_info_.dram_to_l2_transaction_size_bytes(),
kExpectedDramToL2TransactionSize);
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {16, 16}),
ElementsAre(false, false));
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {16, 32}),
ElementsAre(false, false));
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {16, 64}),
ElementsAre(true, true));
EXPECT_THAT(IsTiledReadCoalescedPerOperand(root, {16, 128}),
ElementsAre(true, true));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/coalescing_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/coalescing_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
55e09b08-062c-4740-9cea-c3c6d2b6afbd | cpp | tensorflow/tensorflow | symbolic_tile_analysis | third_party/xla/xla/service/gpu/model/symbolic_tile_analysis.cc | third_party/xla/xla/service/gpu/model/symbolic_tile_analysis_test.cc | #include "xla/service/gpu/model/symbolic_tile_analysis.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <sstream>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/hash/hash.h"
#include "absl/log/check.h"
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/model/indexing_map_serialization.h"
#include "xla/service/gpu/model/symbolic_tile.h"
#include "xla/service/gpu/model/symbolic_tiled_hlo_instruction.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/gpu/model/tiled_hlo_instruction.h"
#include "xla/service/instruction_fusion.h"
#include "xla/service/name_uniquer.h"
#include "xla/shape.h"
#include "xla/status_macros.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::mlir::AffineExpr;
using ::mlir::MLIRContext;
struct OutputTilingInfo {
llvm::SmallVector<int64_t> num_output_tiles_per_dim;
IndexingMap output_tile_offset_indexing;
};
OutputTilingInfo ComputeOutputTilingInfo(absl::Span<const int64_t> dimensions,
absl::Span<const int64_t> tile_sizes,
mlir::MLIRContext* mlir_context) {
CHECK_EQ(dimensions.size(), tile_sizes.size());
int64_t num_tiles = 1;
llvm::SmallVector<int64_t> outer_loop_bounds;
outer_loop_bounds.reserve(dimensions.size());
for (auto [dim_size, tile_size] : llvm::zip(dimensions, tile_sizes)) {
int64_t num_tiles_per_dim = (dim_size + tile_size - 1) / tile_size;
num_tiles *= num_tiles_per_dim;
outer_loop_bounds.push_back(num_tiles_per_dim);
}
llvm::SmallVector<AffineExpr> tiled_dims;
tiled_dims.reserve(dimensions.size());
for (auto [dim_id, tile_size] : llvm::enumerate(tile_sizes)) {
tiled_dims.push_back(tile_size *
mlir::getAffineDimExpr(dim_id, mlir_context));
}
IndexingMap output_tile_offset_indexing = IndexingMap::FromTensorSizes(
mlir::AffineMap::get(
dimensions.size(), 0, tiled_dims,
mlir_context),
outer_loop_bounds, {});
return {outer_loop_bounds, output_tile_offset_indexing};
}
absl::StatusOr<IndexingMap> ComputeTileOffsetIndexing(
const SymbolicTiledHloInstruction& tiled_hlo,
const IndexingMap& output_tile_offset_indexing,
mlir::MLIRContext* mlir_context) {
IndexingMap tile_offset_indexing = ComposeIndexingMaps(
output_tile_offset_indexing, tiled_hlo.indexing_map());
if (absl::c_any_of(tile_offset_indexing.GetSymbolBounds(),
[](const Interval& symbol_bound) {
return symbol_bound.lower != 0;
})) {
return absl::FailedPreconditionError(
absl::StrCat("Symbol lower bound is not zero. ",
ToString(tiled_hlo.indexing_map())));
}
std::vector<AffineExpr> symbol_lower_bounds(
tile_offset_indexing.GetRangeVarsCount(),
mlir::getAffineConstantExpr(0, mlir_context));
symbol_lower_bounds.reserve(tile_offset_indexing.GetSymbolCount());
for (int i = 0; i < tile_offset_indexing.GetRTVarsCount(); ++i) {
symbol_lower_bounds.push_back(mlir::getAffineSymbolExpr(i, mlir_context));
}
mlir::AffineMap simplified_affine_map =
tile_offset_indexing.GetAffineMap().replaceDimsAndSymbols(
{},
symbol_lower_bounds,
tile_offset_indexing.GetDimVarsCount(),
tile_offset_indexing.GetRTVarsCount());
IndexingMap simplified_indexing_map =
IndexingMap{simplified_affine_map, tile_offset_indexing.GetDimVars(),
{}, tile_offset_indexing.GetRTVars()};
simplified_indexing_map.Simplify();
simplified_indexing_map.RescaleSymbols();
simplified_indexing_map.RemoveUnusedSymbols();
return simplified_indexing_map;
}
template <typename T>
class OrderedUniquePtrValueHashSet {
public:
std::pair<T*, bool> Insert(std::unique_ptr<T> elem) {
auto [it, inserted] = hash_set_.insert(elem.get());
if (inserted) {
data_.push_back(std::move(elem));
}
return {*it, inserted};
}
void Reserve(int64_t n) {
hash_set_.reserve(n);
data_.reserve(n);
}
std::vector<std::unique_ptr<T>> ExtractData() { return std::move(data_); }
private:
struct PtrHash {
size_t operator()(const T* v) const { return absl::HashOf(*v); }
};
struct PtrEqual {
bool operator()(const T* lhs, const T* rhs) const {
return lhs == rhs || *lhs == *rhs;
}
};
absl::flat_hash_set<T*, PtrHash, PtrEqual> hash_set_;
std::vector<std::unique_ptr<T>> data_;
};
FusionDecision ShouldProceedWithSymbolicTileDerivation(
const SymbolicTiledHloInstruction& tiled_hlo_instruction) {
const HloInstruction* hlo = tiled_hlo_instruction.hlo();
const IndexingMap& indexing_map = tiled_hlo_instruction.indexing_map();
if (hlo->opcode() == HloOpcode::kConcatenate) {
return FusionDecision::Forbid("Bailing out on ") << hlo->ToString();
}
if (hlo->opcode() == HloOpcode::kReshape ||
hlo->opcode() == HloOpcode::kBitcast) {
mlir::MLIRContext* ctx = indexing_map.GetMLIRContext();
IndexingMap reshape_indexing_map =
*ComputeOutputToInputIndexing(hlo, 0, ctx)
.indexing_maps[0]
.begin();
std::optional<SymbolicTile> reshape_symbolic_tile =
SymbolicTile::FromIndexingMap(reshape_indexing_map);
if (!reshape_symbolic_tile.has_value()) {
return FusionDecision::Forbid("Bailing out on reshape ")
<< hlo->ToString() << " with indexing map "
<< ToString(reshape_indexing_map);
}
}
return FusionDecision::Allow();
}
std::variant<ConstraintExpression, FusionDecision>
SetSymbolicTilesAndComputeConstraints(
std::vector<std::unique_ptr<SymbolicTiledHloInstruction>>&
tiled_hlo_instructions,
const HloFusionAdaptor& fusion_adaptor) {
ConstraintExpression constraints;
for (const std::unique_ptr<SymbolicTiledHloInstruction>&
tiled_hlo_instruction : tiled_hlo_instructions) {
const HloInstruction* hlo = tiled_hlo_instruction->hlo();
const IndexingMap& indexing_map = tiled_hlo_instruction->indexing_map();
if (fusion_adaptor.ContainsInstruction(hlo)) {
FusionDecision should_proceed =
ShouldProceedWithSymbolicTileDerivation(*tiled_hlo_instruction);
if (!should_proceed) {
return should_proceed;
}
}
auto symbolic_tile = SymbolicTile::FromIndexingMap(indexing_map);
if (!symbolic_tile.has_value()) {
return FusionDecision::Forbid("Failed to compute symbolic tile for ")
<< ToString(indexing_map) << " for HLO " << hlo->ToString();
}
if (!symbolic_tile->is_satisfiable()) {
return FusionDecision::Forbid("Symbolic tile ")
<< symbolic_tile->ToString() << " is not satisfiable for "
<< ToString(indexing_map) << " for HLO " << hlo->ToString();
}
constraints = ConstraintExpression::And(std::move(constraints),
symbolic_tile->constraints());
constraints.Simplify();
if (!constraints.is_satisfiable()) {
return FusionDecision::Forbid("Fusion has unsatisfiable constraints");
}
tiled_hlo_instruction->set_symbolic_tile(*std::move(symbolic_tile));
}
return constraints;
}
void SortTiledHloInstructionsInPostOrder(
std::vector<std::unique_ptr<SymbolicTiledHloInstruction>>&
tiled_hlo_instructions,
const SymbolicTiledHloInstruction* root_tiled_hlo) {
absl::flat_hash_map<const SymbolicTiledHloInstruction*, int64_t>
topological_order;
std::function<void(const SymbolicTiledHloInstruction*)> visit_instruction;
visit_instruction = [&](const SymbolicTiledHloInstruction* instruction) {
if (topological_order.contains(instruction)) {
return;
}
for (const SymbolicTiledHloInstruction* operand : instruction->operands()) {
visit_instruction(operand);
}
topological_order[instruction] = topological_order.size();
};
visit_instruction(root_tiled_hlo);
absl::c_sort(tiled_hlo_instructions,
[&](const std::unique_ptr<SymbolicTiledHloInstruction>& t1,
const std::unique_ptr<SymbolicTiledHloInstruction>& t2) {
return topological_order.at(t1.get()) <
topological_order.at(t2.get());
});
}
}
SymbolicTileAnalysisOrError SymbolicTileAnalysis::AnalyzeComputation(
const HloComputation& computation, MLIRContext* ctx,
EmitterSpecificConstraintsBuilder emitter_specific_constraints_builder) {
auto fusion = HloFusionAdaptor::ForComputation(&computation);
return SymbolicTileAnalysis::AnalyzeFusion(
*fusion, ctx, emitter_specific_constraints_builder);
}
SymbolicTileAnalysisOrError SymbolicTileAnalysis::AnalyzeFusion(
const HloFusionAdaptor& fusion, MLIRContext* ctx,
EmitterSpecificConstraintsBuilder emitter_specific_constraints_builder) {
OrderedUniquePtrValueHashSet<SymbolicTiledHloInstruction>
tiled_hlo_instructions_set;
auto roots = fusion.GetRoots();
if (roots.size() > 1) {
return FusionDecision::Forbid("Multi-output fusions are not supported. ")
<< fusion.ToString();
}
auto& root = roots[0];
auto [root_tiled_hlo, _] = tiled_hlo_instructions_set.Insert(
std::make_unique<SymbolicTiledHloInstruction>(
&root.instruction(), CreateIdentityMap(root.shape(), ctx)));
std::vector<SymbolicTiledHloInstruction*> worklist = {root_tiled_hlo};
while (!worklist.empty()) {
auto tiled_hlo_instruction = worklist.back();
worklist.pop_back();
HloInstructionAdaptor instruction_adaptor(*tiled_hlo_instruction->hlo(),
&fusion);
if (!fusion.ContainsInstruction(instruction_adaptor)) {
continue;
}
HloInstructionIndexing operands_indexing =
ComputeOutputToInputIndexing(tiled_hlo_instruction->hlo(),
0, ctx);
for (auto [operand, operand_indexing_map_set] :
llvm::zip(instruction_adaptor.GetOperands(),
operands_indexing.indexing_maps)) {
CHECK_EQ(operand_indexing_map_set.size(), 1);
IndexingMap operand_indexing_map =
ComposeIndexingMaps(tiled_hlo_instruction->indexing_map(),
*operand_indexing_map_set.begin());
if (operand_indexing_map.IsUndefined()) {
return FusionDecision::Forbid(
"Couldn't derive indexing map for instruction ")
<< tiled_hlo_instruction->hlo()->ToString() << " and operand "
<< operand.instruction().ToString();
}
operand_indexing_map.Simplify();
operand_indexing_map.RescaleSymbols();
operand_indexing_map.RemoveUnusedSymbols();
auto [operand_tiled_hlo, inserted] = tiled_hlo_instructions_set.Insert(
std::make_unique<SymbolicTiledHloInstruction>(
&operand.instruction(), std::move(operand_indexing_map)));
tiled_hlo_instruction->AppendOperand(operand_tiled_hlo);
if (inserted) {
worklist.push_back(operand_tiled_hlo);
}
}
}
std::vector<std::unique_ptr<SymbolicTiledHloInstruction>>
tiled_hlo_instructions = tiled_hlo_instructions_set.ExtractData();
SortTiledHloInstructionsInPostOrder(tiled_hlo_instructions, root_tiled_hlo);
std::variant<ConstraintExpression, FusionDecision> constraints_or =
SetSymbolicTilesAndComputeConstraints(tiled_hlo_instructions, fusion);
if (std::holds_alternative<FusionDecision>(constraints_or)) {
return std::get<FusionDecision>(constraints_or);
}
std::unique_ptr<EmitterSpecificConstraints> emitter_specific_constraints;
if (emitter_specific_constraints_builder != nullptr) {
emitter_specific_constraints =
emitter_specific_constraints_builder(tiled_hlo_instructions, fusion);
}
return SymbolicTileAnalysis(
std::move(tiled_hlo_instructions),
std::get<ConstraintExpression>(std::move(constraints_or)),
std::move(emitter_specific_constraints), ctx);
}
absl::StatusOr<bool> SymbolicTileAnalysis::ParametersSatisfyConstraints(
absl::Span<const int64_t> tile_parameters) const {
if (!constraints_.is_satisfiable()) {
return absl::FailedPreconditionError(
"SymbolicTileAnalysis's constraints are not satisfiable. "
"This should never happen.");
}
if (tile_parameters.size() != num_tile_parameters()) {
return absl::InvalidArgumentError(absl::StrFormat(
"Failed to check if tile parameters satisfy constraints. Number of "
"provided parameters doesn't match number of expected parameters "
"(%d != %d)",
tile_parameters.size(), num_tile_parameters()));
}
if (emitter_specific_constraints_ != nullptr) {
TF_ASSIGN_OR_RETURN(
bool constraints_are_satisfied,
emitter_specific_constraints_->ParametersSatisfyConstraints(
tile_parameters));
if (!constraints_are_satisfied) {
return false;
}
}
return constraints_.IsSatisfiedBy(tile_parameters);
}
absl::StatusOr<TiledHloComputation>
SymbolicTileAnalysis::ComputeTiledHloInstructions(
absl::Span<const int64_t> tile_parameters,
bool constraints_are_known_satisfied,
bool compute_all_tile_offset_indexing_maps) const {
if (!constraints_are_known_satisfied) {
TF_ASSIGN_OR_RETURN(bool constraints_are_satisfied,
ParametersSatisfyConstraints(tile_parameters));
if (!constraints_are_satisfied) {
return absl::InvalidArgumentError(
absl::StrCat("Tile parameters ", absl::StrJoin(tile_parameters, ", "),
" do not satisfy constraints."));
}
}
llvm::SmallPtrSet<const HloInstruction*, 8> parameters_with_offset_indexing;
absl::flat_hash_map<const SymbolicTiledHloInstruction*,
llvm::SmallVector<int64_t>>
tile_sizes_map;
if (!compute_all_tile_offset_indexing_maps) {
absl::flat_hash_set<size_t> hashes;
for (const std::unique_ptr<SymbolicTiledHloInstruction>&
symbolic_tiled_hlo : symbolic_tiled_hlo_instructions_) {
if (!symbolic_tiled_hlo->operands().empty()) {
continue;
}
llvm::SmallVector<int64_t> tile_sizes =
symbolic_tiled_hlo->TileSizes(tile_parameters);
size_t hash_value = absl::HashOf(symbolic_tiled_hlo->hlo(),
absl::Span<const int64_t>(tile_sizes));
tile_sizes_map.emplace(symbolic_tiled_hlo.get(), std::move(tile_sizes));
auto [it, inserted] = hashes.insert(hash_value);
if (!inserted) {
parameters_with_offset_indexing.insert(symbolic_tiled_hlo->hlo());
}
}
}
OutputTilingInfo output_tiling_info = ComputeOutputTilingInfo(
GetRoot()->hlo()->shape().dimensions(), tile_parameters, context_);
OrderedUniquePtrValueHashSet<TiledHloInstruction> tiled_hlo_instructions_set;
absl::flat_hash_map<const SymbolicTiledHloInstruction*, TiledHloInstruction*>
symbolic_to_tiled_hlo_map;
tiled_hlo_instructions_set.Reserve(symbolic_tiled_hlo_instructions_.size());
std::function<absl::StatusOr<TiledHloInstruction*>(
const SymbolicTiledHloInstruction*)>
get_tiled_hlo_instruction;
for (const std::unique_ptr<SymbolicTiledHloInstruction>& symbolic_tiled_hlo :
symbolic_tiled_hlo_instructions_) {
llvm::SmallVector<int64_t> tile_sizes;
auto it = tile_sizes_map.find(symbolic_tiled_hlo.get());
if (it != tile_sizes_map.end()) {
tile_sizes = it->second;
} else {
tile_sizes = symbolic_tiled_hlo->TileSizes(tile_parameters);
}
llvm::SmallVector<int64_t> tile_strides =
symbolic_tiled_hlo->TileStrides(tile_parameters);
std::optional<IndexingMap> tile_offset_indexing;
if (compute_all_tile_offset_indexing_maps ||
parameters_with_offset_indexing.contains(symbolic_tiled_hlo->hlo()) ||
symbolic_tiled_hlo->hlo()->opcode() == HloOpcode::kIota) {
TF_ASSIGN_OR_RETURN(
tile_offset_indexing,
ComputeTileOffsetIndexing(
*symbolic_tiled_hlo,
output_tiling_info.output_tile_offset_indexing, context_));
}
llvm::SmallVector<const TiledHloInstruction*> operands;
for (const SymbolicTiledHloInstruction* operand :
symbolic_tiled_hlo->operands()) {
operands.push_back(symbolic_to_tiled_hlo_map.at(operand));
}
TF_ASSIGN_OR_RETURN(std::unique_ptr<TiledHloInstruction> tiled_hlo_holder,
TiledHloInstruction::Create(
symbolic_tiled_hlo->hlo(), std::move(operands),
std::move(tile_sizes), std::move(tile_strides),
std::move(tile_offset_indexing)));
auto [tiled_hlo, inserted] =
tiled_hlo_instructions_set.Insert(std::move(tiled_hlo_holder));
symbolic_to_tiled_hlo_map[symbolic_tiled_hlo.get()] = tiled_hlo;
}
return TiledHloComputation::FromSortedTiledHloInstructions(
tiled_hlo_instructions_set.ExtractData(),
output_tiling_info.num_output_tiles_per_dim);
}
std::string SymbolicTileAnalysis::ToString() const {
std::stringstream ss;
NameUniquer name_uniquer("_");
absl::flat_hash_map<SymbolicTiledHloInstruction*, std::string> tile_names;
for (const auto& tiled_hlo : symbolic_tiled_hlo_instructions_) {
std::string tile_name = name_uniquer.GetUniqueName(
absl::StrCat(tiled_hlo->hlo()->name(), ".tile_0"));
tile_names[tiled_hlo.get()] = tile_name;
absl::InlinedVector<std::string, 4> operand_names;
for (const auto& operand : tiled_hlo->operands()) {
operand_names.push_back(tile_names.at(operand));
}
ss << tile_name << " = " << HloOpcodeString(tiled_hlo->hlo()->opcode())
<< "(" << absl::StrJoin(operand_names, ", ") << ")\n";
ss << tiled_hlo->ToString();
}
return ss.str();
}
namespace {
std::vector<int64_t> PossibleTileSizesForOneDimension(int64_t dim_size) {
CHECK_GE(dim_size, 1);
std::vector<int64_t> result;
result.reserve(absl::bit_width(static_cast<uint64_t>(dim_size)));
for (int64_t tile_size = 1; tile_size < dim_size; tile_size *= 2) {
result.push_back(tile_size);
}
result.push_back(dim_size);
return result;
}
}
namespace detail {
std::vector<SymbolicTileAnalysis::Tiling> GetGoodTilings(
absl::Span<const int64_t> dim_sizes,
std::function<bool(absl::Span<const int64_t>)> is_valid) {
CHECK(is_valid != nullptr);
std::vector<SymbolicTileAnalysis::Tiling> tilings;
tilings.push_back({});
for (int dim_size : dim_sizes) {
std::vector<int64_t> possible_tile_sizes =
PossibleTileSizesForOneDimension(dim_size);
std::vector<SymbolicTileAnalysis::Tiling> extended_tilings;
extended_tilings.reserve(tilings.size() * possible_tile_sizes.size());
for (const SymbolicTileAnalysis::Tiling& tiling : tilings) {
for (int64_t tile_size : possible_tile_sizes) {
SymbolicTileAnalysis::Tiling extended_tiling = tiling;
extended_tiling.push_back(tile_size);
extended_tilings.push_back(extended_tiling);
}
}
tilings = std::move(extended_tilings);
}
tilings.erase(
std::remove_if(tilings.begin(), tilings.end(), std::not_fn(is_valid)),
tilings.end());
return tilings;
}
}
absl::StatusOr<std::vector<SymbolicTileAnalysis::Tiling>>
SymbolicTileAnalysis::GetGoodTilings() const {
TF_RET_CHECK(!symbolic_tiled_hlo_instructions_.empty());
TF_RET_CHECK(symbolic_tiled_hlo_instructions_.back() != nullptr);
const SymbolicTiledHloInstruction& instr =
*symbolic_tiled_hlo_instructions_.back();
TF_RET_CHECK(instr.hlo() != nullptr);
const Shape& shape = instr.hlo()->shape();
if (!absl::c_all_of(shape.dimensions(),
[](int64_t dim_size) { return dim_size >= 1; })) {
return absl::InvalidArgumentError(absl::StrFormat(
"Shape %s has zero or negative dimensions.", shape.ToString()));
}
absl::Status status = absl::OkStatus();
std::vector<SymbolicTileAnalysis::Tiling> result = detail::GetGoodTilings(
shape.dimensions(), [&](absl::Span<const int64_t> tile_sizes) {
absl::StatusOr<bool> is_valid =
ParametersSatisfyConstraints(tile_sizes);
if (!is_valid.ok()) {
status = is_valid.status();
return false;
}
return is_valid.value();
});
if (status.ok()) {
return result;
}
return status;
}
}
} | #include "xla/service/gpu/model/symbolic_tile_analysis.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "xla/service/gpu/model/symbolic_tile.h"
#include "xla/service/gpu/model/symbolic_tiled_hlo_instruction.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/gpu/model/tiled_hlo_instruction.h"
#include "xla/service/instruction_fusion.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using detail::GetGoodTilings;
using ::testing::ElementsAreArray;
using ::testing::ExplainMatchResult;
using ::testing::IsEmpty;
using ::testing::Matcher;
using ::testing::Not;
using ::testing::SizeIs;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
using TilingVector = std::vector<SymbolicTileAnalysis::Tiling>;
MATCHER_P3(MatchTiledHloInstructionImpl, tile_sizes, tile_strides,
tile_offsets_indexing, "") {
return ExplainMatchResult(ElementsAreArray(tile_sizes), arg.tile_sizes(),
result_listener) &&
ExplainMatchResult(ElementsAreArray(tile_strides), arg.tile_strides(),
result_listener) &&
ExplainMatchResult(
IsOkAndHolds(MatchIndexingMap(tile_offsets_indexing)),
arg.tile_offsets_indexing(), result_listener);
}
Matcher<const TiledHloInstruction> MatchTiledHloInstruction(
absl::Span<const int64_t> tile_sizes,
absl::Span<const int64_t> tile_strides,
absl::string_view tile_offsets_indexing) {
return MatchTiledHloInstructionImpl(tile_sizes, tile_strides,
tile_offsets_indexing);
}
class FakeEmitterSpecificConstraints : public EmitterSpecificConstraints {
public:
absl::StatusOr<bool> ParametersSatisfyConstraints(
absl::Span<const int64_t> tile_parameters) const override {
return tile_parameters[0] == dim0_tile_size_;
}
static EmitterSpecificConstraintsBuilder GetBuilder() {
return [](const std::vector<std::unique_ptr<SymbolicTiledHloInstruction>>&
instructions,
const HloFusionAdaptor&) {
const SymbolicTiledHloInstruction* root = instructions[0].get();
int64_t dim0_size = root->hlo()->shape().dimensions(0);
return std::make_unique<FakeEmitterSpecificConstraints>(
dim0_size / 2);
};
}
explicit FakeEmitterSpecificConstraints(int64_t dim0_tile_size)
: dim0_tile_size_(dim0_tile_size) {}
private:
int64_t dim0_tile_size_;
};
class SymbolicTileAnalysisTest : public HloTestBase {
public:
std::optional<SymbolicTileAnalysis> TryAnalyzeModule(
HloModule* module,
EmitterSpecificConstraintsBuilder emitter_specific_constraints_builder =
nullptr) {
SymbolicTileAnalysisOrError analysis_or_error =
SymbolicTileAnalysis::AnalyzeComputation(
*module->entry_computation()
->root_instruction()
->fused_instructions_computation(),
&mlir_context_, emitter_specific_constraints_builder);
if (std::holds_alternative<SymbolicTileAnalysis>(analysis_or_error)) {
return std::get<SymbolicTileAnalysis>(std::move(analysis_or_error));
}
VLOG(1) << "Cannot analyze module: "
<< std::get<FusionDecision>(analysis_or_error).Explain();
return std::nullopt;
}
mlir::MLIRContext mlir_context_;
};
TEST_F(SymbolicTileAnalysisTest, SimpleNormalizationDiamondIsSupported) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
max {
p1 = f32[] parameter(1)
p0 = f32[] parameter(0)
ROOT m = f32[] maximum(p0, p1)
}
fusion {
p0 = f32[2,97]{1,0} parameter(0)
constant = f32[] constant(-inf)
reduce = f32[2] reduce(p0, constant), dimensions={1}, to_apply=max
broadcast = f32[2,97]{1,0} broadcast(reduce), dimensions={0}
ROOT subtract = f32[2,97]{1,0} subtract(p0, broadcast)
}
ENTRY main {
p0 = f32[2,97]{1,0} parameter(0)
ROOT fusion = f32[2,97]{1,0} fusion(p0), kind=kLoop, calls=fusion
})"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
TF_ASSERT_OK_AND_ASSIGN(TiledHloComputation tiled_hlo_computation,
analysis->ComputeTiledHloInstructions(
{1, 10},
false,
true));
const TiledHloInstruction* root = tiled_hlo_computation.GetRoot();
EXPECT_THAT(*root, MatchTiledHloInstruction({1, 10},
{1, 1},
R"(
(d0, d1) -> (d0, d1 * 10),
domain:
d0 in [0, 1],
d1 in [0, 9]
)"));
auto p0_from_subtract0 = root->operand(0);
auto p0_from_subtract1 = root->operand(1)->operand(0)->operand(0);
EXPECT_THAT(*p0_from_subtract0, MatchTiledHloInstruction(
{1, 10},
{1, 1},
R"(
(d0, d1) -> (d0, d1 * 10),
domain:
d0 in [0, 1],
d1 in [0, 9]
)"));
EXPECT_THAT(*p0_from_subtract1, MatchTiledHloInstruction(
{1, 97},
{1, 1},
R"(
(d0, d1) -> (d0, 0),
domain:
d0 in [0, 1],
d1 in [0, 9]
)"));
}
TEST_F(SymbolicTileAnalysisTest, ElementwiseDiamondCSEIsSupported) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[2,97] parameter(0)
exp = f32[2,97] exponential(p0)
log = f32[2,97] log(p0)
ROOT subtract = f32[2,97] subtract(exp, log)
}
ENTRY main {
p0 = f32[2,97] parameter(0)
ROOT fusion = f32[2,97] fusion(p0), kind=kLoop, calls=fusion
})"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
TF_ASSERT_OK_AND_ASSIGN(
TiledHloComputation tiled_hlo_computation,
analysis->ComputeTiledHloInstructions({1, 10}));
const TiledHloInstruction* root = tiled_hlo_computation.GetRoot();
auto p0_from_subtract0 = root->operand(0)->operand(0);
auto p0_from_subtract1 = root->operand(1)->operand(0);
EXPECT_EQ(p0_from_subtract0, p0_from_subtract1);
}
TEST_F(SymbolicTileAnalysisTest, ProducerConsumerFusionIsSupported) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT m = f32[] maximum(p0, p1)
}
fusion.1 {
p0 = f32[2,97] parameter(0)
constant = f32[] constant(-inf)
exp = f32[2,97] exponential(p0)
ROOT reduce = f32[2] reduce(exp, constant), dimensions={1}, to_apply=max
}
fusion.2 {
p0 = f32[2] parameter(0)
p1 = f32[2,97] parameter(1)
broadcast = f32[2,97]{1,0} broadcast(p0), dimensions={0}
ROOT subtract = f32[2,97] subtract(p1, broadcast)
}
ENTRY main {
p0 = f32[2,97] parameter(0)
producer = f32[2] fusion(p0), kind=kLoop, calls=fusion.1
ROOT consumer = f32[2,97] fusion(producer, p0), kind=kLoop, calls=fusion.2
})"));
const auto* consumer = module->entry_computation()->root_instruction();
const auto* producer = consumer->operand(0);
auto fusion = HloFusionAdaptor::ForProducerConsumer(producer, consumer);
SymbolicTileAnalysisOrError analysis_or_error =
SymbolicTileAnalysis::AnalyzeFusion(*fusion, &mlir_context_);
ASSERT_TRUE(std::holds_alternative<SymbolicTileAnalysis>(analysis_or_error));
SymbolicTileAnalysis analysis =
std::get<SymbolicTileAnalysis>(std::move(analysis_or_error));
TF_ASSERT_OK_AND_ASSIGN(
TiledHloComputation tiled_hlo_computation,
analysis.ComputeTiledHloInstructions({1, 97}));
const TiledHloInstruction* root = tiled_hlo_computation.GetRoot();
const TiledHloInstruction* p0_from_producer =
root->operand(1)->operand(0)->operand(0)->operand(0);
const TiledHloInstruction* p0_from_consumer = root->operand(0);
EXPECT_EQ(p0_from_producer, p0_from_consumer);
EXPECT_THAT(*p0_from_producer,
MatchTiledHloInstruction(
{1, 97}, {1, 1},
R"(
(d0, d1) -> (d0, 0),
domain:
d0 in [0, 1],
d1 in [0, 0]
)"));
}
TEST_F(SymbolicTileAnalysisTest, TransposeOffsetIndexingIsCorrect) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[8,16,4] parameter(0)
ROOT transpose = f32[4,8,16] transpose(p0), dimensions={2,0,1}
}
ENTRY main {
p0 = f32[8,16,4] parameter(0)
ROOT fusion = f32[4,8,16] fusion(p0), kind=kLoop, calls=fusion
})"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
TF_ASSERT_OK_AND_ASSIGN(TiledHloComputation tiled_hlo_computation,
analysis->ComputeTiledHloInstructions(
{2, 4, 2},
false,
true));
const TiledHloInstruction* root = tiled_hlo_computation.GetRoot();
EXPECT_THAT(*root, MatchTiledHloInstruction(
{2, 4, 2}, {1, 1, 1},
R"(
(d0, d1, d2) -> (d0 * 2, d1 * 4, d2 * 2),
domain:
d0 in [0, 1],
d1 in [0, 1],
d2 in [0, 7]
)"));
EXPECT_THAT(*root->operand(0),
MatchTiledHloInstruction(
{4, 2, 2}, {1, 1, 1},
R"(
(d0, d1, d2) -> (d1 * 4, d2 * 2, d0 * 2),
domain:
d0 in [0, 1],
d1 in [0, 1],
d2 in [0, 7]
)"));
}
TEST_F(SymbolicTileAnalysisTest, SliceOffsetIndexingIsCorrect) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[8,16] parameter(0)
slice.0 = f32[4,8] slice(p0), slice={[0:4], [2:10]}
slice.1 = f32[4,8] slice(p0), slice={[3:7], [4:12]}
ROOT add = f32[4,8] add(slice.0, slice.1)
}
ENTRY main {
p0 = f32[8,16] parameter(0)
ROOT fusion = f32[4,8] fusion(p0), kind=kLoop, calls=fusion
})"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
TF_ASSERT_OK_AND_ASSIGN(TiledHloComputation tiled_hlo_computation,
analysis->ComputeTiledHloInstructions(
{2, 2},
false,
true));
const TiledHloInstruction* root = tiled_hlo_computation.GetRoot();
const TiledHloInstruction* p0_from_slice0 = root->operand(0)->operand(0);
const TiledHloInstruction* p0_from_slice1 = root->operand(1)->operand(0);
EXPECT_THAT(*root, MatchTiledHloInstruction(
{2, 2}, {1, 1},
R"(
(d0, d1) -> (d0 * 2, d1 * 2),
domain:
d0 in [0, 1],
d1 in [0, 3]
)"));
EXPECT_THAT(*p0_from_slice0,
MatchTiledHloInstruction(
{2, 2}, {1, 1},
R"(
(d0, d1) -> (d0 * 2, d1 * 2 + 2),
domain:
d0 in [0, 1],
d1 in [0, 3]
)"));
EXPECT_THAT(*p0_from_slice1,
MatchTiledHloInstruction(
{2, 2}, {1, 1},
R"(
(d0, d1) -> (d0 * 2 + 3, d1 * 2 + 4),
domain:
d0 in [0, 1],
d1 in [0, 3]
)"));
}
TEST_F(SymbolicTileAnalysisTest, DotOffsetIndexingIsCorrect) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[4,8] parameter(0)
p1 = f32[8,16] parameter(1)
ROOT dot = f32[4,16] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY main {
p0 = f32[4,8] parameter(0)
p1 = f32[8,16] parameter(1)
ROOT fusion = f32[4,16] fusion(p0, p1), kind=kLoop, calls=fusion
})"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
TF_ASSERT_OK_AND_ASSIGN(TiledHloComputation tiled_hlo_computation,
analysis->ComputeTiledHloInstructions(
{2, 2},
false,
true));
const TiledHloInstruction* dot = tiled_hlo_computation.GetRoot();
EXPECT_THAT(*dot, MatchTiledHloInstruction(
{2, 2}, {1, 1},
R"(
(d0, d1) -> (d0 * 2, d1 * 2),
domain:
d0 in [0, 1],
d1 in [0, 7]
)"));
const TiledHloInstruction* lhs = dot->operand(0);
EXPECT_THAT(*lhs, MatchTiledHloInstruction(
{2, 8}, {1, 1},
R"(
(d0, d1) -> (d0 * 2, 0),
domain:
d0 in [0, 1],
d1 in [0, 7]
)"));
const TiledHloInstruction* rhs = dot->operand(1);
EXPECT_THAT(*rhs, MatchTiledHloInstruction(
{8, 2}, {1, 1},
R"(
(d0, d1) -> (0, d1 * 2),
domain:
d0 in [0, 1],
d1 in [0, 7]
)"));
}
TEST_F(SymbolicTileAnalysisTest, DoesNotBailOutOnConstrainedReshape) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[4,2]{1,0} parameter(0)
ROOT reshape = f32[8] reshape(p0)
}
ENTRY main {
p0 = f32[4,2]{1,0} parameter(0)
ROOT fusion = f32[8] fusion(p0), kind=kLoop, calls=fusion
})"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
const ConstraintExpression& constraints = analysis->GetConstraints();
EXPECT_THAT(constraints.DisjointConjointConstraints(), SizeIs(2));
EXPECT_THAT(constraints.DisjointConjointConstraints().front(), SizeIs(1));
}
TEST_F(SymbolicTileAnalysisTest, DoesNotBailOutOnConstrainedBitcast) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[4,2]{1,0} parameter(0)
ROOT bitcast = f32[8] bitcast(p0)
}
ENTRY main {
p0 = f32[4,2]{1,0} parameter(0)
ROOT fusion = f32[8] fusion(p0), kind=kLoop, calls=fusion
})"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
const ConstraintExpression& constraints = analysis->GetConstraints();
EXPECT_THAT(constraints.DisjointConjointConstraints(), SizeIs(2));
EXPECT_THAT(constraints.DisjointConjointConstraints().front(), SizeIs(1));
}
TEST_F(SymbolicTileAnalysisTest, BailOutOnUnsupportedConcatenate) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[1,3]{1,0} parameter(0)
p1 = f32[1,3]{1,0} parameter(1)
ROOT concatenate = f32[2,3] concatenate(p0, p1), dimensions={0}
}
ENTRY main {
p0 = f32[1,3]{1,0} parameter(0)
p1 = f32[1,3]{1,0} parameter(1)
ROOT fusion = f32[2,3] fusion(p0, p1), kind=kLoop, calls=fusion
})"));
EXPECT_FALSE(TryAnalyzeModule(module.get()).has_value());
}
TEST_F(SymbolicTileAnalysisTest, MultiOutputFusionIsNotSupported) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[32] parameter(0)
p1 = f32[32] parameter(1)
add = f32[32] add(p0, p1)
subtract = f32[32] subtract(p0, p1)
ROOT tuple = (f32[32], f32[32]) tuple(add, subtract)
}
ENTRY main {
p0 = f32[32] parameter(0)
p1 = f32[32] parameter(1)
ROOT fusion = (f32[32], f32[32]) fusion(p0, p1), kind=kLoop, calls=fusion
})"));
EXPECT_FALSE(TryAnalyzeModule(module.get()).has_value());
}
TEST_F(SymbolicTileAnalysisTest, ConstraintSatisfactionIsEvaluatedCorrectly) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[1,8,6,4,8]{4,3,2,1,0} parameter(0)
ROOT bitcast = f32[48,32]{1,0} bitcast(p0)
}
ENTRY main {
p0 = f32[1,8,6,4,8]{4,3,2,1,0} parameter(0)
ROOT fusion = f32[48,32]{1,0} fusion(p0), kind=kLoop, calls=fusion
})"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
const ConstraintExpression& constraints = analysis->GetConstraints();
EXPECT_THAT(constraints.DisjointConjointConstraints(), SizeIs(4));
for (const ConstraintExpression::ConjointConstraints& conjunction :
constraints.DisjointConjointConstraints())
EXPECT_THAT(conjunction, SizeIs(2));
std::vector<int64_t> possible_tile_parameters({6, 8});
EXPECT_THAT(analysis->ParametersSatisfyConstraints(possible_tile_parameters),
IsOkAndHolds(true));
std::vector<int64_t> impossible_tile_parameters({6, 7});
EXPECT_THAT(
analysis->ParametersSatisfyConstraints(impossible_tile_parameters),
IsOkAndHolds(false));
EXPECT_THAT(analysis->ParametersSatisfyConstraints({6}),
StatusIs(absl::StatusCode::kInvalidArgument));
TF_EXPECT_OK(
analysis->ParametersSatisfyConstraints(possible_tile_parameters));
EXPECT_THAT(analysis->ComputeTiledHloInstructions(impossible_tile_parameters),
StatusIs(absl::StatusCode::kInvalidArgument));
TF_EXPECT_OK(analysis->ComputeTiledHloInstructions(
impossible_tile_parameters, true));
}
TEST_F(SymbolicTileAnalysisTest, EmitterSpecificConstraintsAreUsedCorrectly) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[16,32] parameter(0)
ROOT add = f32[16,32] add(p0, p0)
}
ENTRY main {
p0 = f32[16,32] parameter(0)
ROOT fusion = f32[16,32] fusion(p0), kind=kLoop, calls=fusion
})"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(
module.get(), FakeEmitterSpecificConstraints::GetBuilder());
ASSERT_TRUE(analysis.has_value());
EXPECT_THAT(analysis->ParametersSatisfyConstraints({5, 32}),
IsOkAndHolds(false));
EXPECT_THAT(analysis->ParametersSatisfyConstraints({8, 32}),
IsOkAndHolds(true));
}
TEST_F(SymbolicTileAnalysisTest, ConstraintsAreAggregatedCorrectly) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[1,48,4,8]{3,2,1,0} parameter(0)
p1 = f32[1,8,6,32]{3,2,1,0} parameter(1)
bitcast_p0 = f32[48,32]{1,0} bitcast(p0)
bitcast_p1 = f32[48,32]{1,0} bitcast(p1)
ROOT add = f32[48,32]{1,0} add(bitcast_p0, bitcast_p1)
}
ENTRY main {
p0 = f32[1,48,4,8]{3,2,1,0} parameter(0)
p1 = f32[1,8,6,32]{3,2,1,0} parameter(1)
ROOT fusion = f32[48,32]{1,0} fusion(p0, p1), kind=kLoop, calls=fusion
})"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
const ConstraintExpression& constraints = analysis->GetConstraints();
EXPECT_THAT(constraints.DisjointConjointConstraints(), SizeIs(4));
EXPECT_THAT(constraints.DisjointConjointConstraints().front(), SizeIs(2));
}
bool AlwaysValid(absl::Span<const int64_t>) { return true; }
TEST(GetGoodTilingsTest, ReturnsOneTilingWhenRankIsZero) {
EXPECT_EQ(GetGoodTilings({}, AlwaysValid),
TilingVector{SymbolicTileAnalysis::Tiling{}});
}
TEST(GetGoodTilingsTest, ReturnsPowersOfTwoAndTheDimSizeForRankOne) {
EXPECT_EQ(GetGoodTilings({1}, AlwaysValid), TilingVector{{1}});
EXPECT_EQ(GetGoodTilings({2}, AlwaysValid), TilingVector({{1}, {2}}));
EXPECT_EQ(GetGoodTilings({3}, AlwaysValid), TilingVector({{1}, {2}, {3}}));
EXPECT_EQ(GetGoodTilings({4}, AlwaysValid), TilingVector({{1}, {2}, {4}}));
EXPECT_EQ(GetGoodTilings({5}, AlwaysValid),
TilingVector({{1}, {2}, {4}, {5}}));
EXPECT_EQ(GetGoodTilings({11}, AlwaysValid),
TilingVector({{1}, {2}, {4}, {8}, {11}}));
}
TEST(GetGoodTilingsTest, CreatesCartesianProductForRankTwo) {
EXPECT_EQ(GetGoodTilings({3, 4}, AlwaysValid), TilingVector({{1, 1},
{1, 2},
{1, 4},
{2, 1},
{2, 2},
{2, 4},
{3, 1},
{3, 2},
{3, 4}}));
}
TEST(GetGoodTilingsTest, CreatesCartesianProductForRankThree) {
EXPECT_EQ(GetGoodTilings({3, 4, 2}, AlwaysValid), TilingVector({{1, 1, 1},
{1, 1, 2},
{1, 2, 1},
{1, 2, 2},
{1, 4, 1},
{1, 4, 2},
{2, 1, 1},
{2, 1, 2},
{2, 2, 1},
{2, 2, 2},
{2, 4, 1},
{2, 4, 2},
{3, 1, 1},
{3, 1, 2},
{3, 2, 1},
{3, 2, 2},
{3, 4, 1},
{3, 4, 2}}));
}
TEST(GetGoodTilingsTest, FiltersTheTilingsUsingThePredicate) {
auto all_even = [](absl::Span<const int64_t> tile_sizes) {
return absl::c_all_of(tile_sizes,
[](int64_t tile_size) { return tile_size % 2 == 0; });
};
EXPECT_EQ(GetGoodTilings({3, 4}, all_even), TilingVector({{2, 2}, {2, 4}}));
auto all_equal = [](absl::Span<const int64_t> tile_sizes) {
return absl::c_all_of(tile_sizes, [&](int64_t tile_size) {
return tile_size == tile_sizes.at(0);
});
};
EXPECT_EQ(GetGoodTilings({3, 3, 3}, all_equal),
TilingVector({{1, 1, 1}, {2, 2, 2}, {3, 3, 3}}));
}
TEST_F(SymbolicTileAnalysisTest,
GetGoodTilingsWorksTakingConstraintsIntoAccount) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[1,8,6,1]{3,2,1,0} parameter(0)
ROOT bitcast = f32[48,1]{1,0} bitcast(p0)
}
ENTRY main {
p0 = f32[1,8,6,1]{3,2,1,0} parameter(0)
ROOT fusion = f32[48,1]{1,0} fusion(p0), kind=kLoop, calls=fusion
})"));
std::optional<SymbolicTileAnalysis> opt_analysis =
TryAnalyzeModule(module.get());
ASSERT_TRUE(opt_analysis.has_value());
const SymbolicTileAnalysis& analysis = opt_analysis.value();
TF_ASSERT_OK_AND_ASSIGN(
std::vector<SymbolicTileAnalysis::Tiling> good_tilings,
analysis.GetGoodTilings());
EXPECT_EQ(good_tilings, std::vector<SymbolicTileAnalysis::Tiling>(
{{1, 1}, {2, 1}, {48, 1}}));
}
void LogTilingsIfVlog1(absl::Span<const SymbolicTileAnalysis::Tiling> tilings) {
if (VLOG_IS_ON(1)) {
LOG(INFO) << "Tilings: {";
for (const SymbolicTileAnalysis::Tiling& tiling : tilings) {
LOG(INFO) << "{" << absl::StrJoin(tiling, ",") << "},";
}
LOG(INFO) << "}";
}
}
TEST_F(SymbolicTileAnalysisTest, GetGoodTilingsWorksForSoftmaxExample) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule m
max_computation {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
ROOT maximum = f32[] maximum(param_0, param_1)
}
add_computation {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
ROOT add = f32[] add(param_0, param_1)
}
fused_computation {
param_0 = f32[8192,50304] parameter(0)
bitcast = f32[4,2048,50304] bitcast(param_0)
constant = f32[] constant(-inf)
reduce = f32[8192] reduce(param_0, constant), dimensions={1}, to_apply=max_computation
bitcast.1 = f32[4,2048] bitcast(reduce)
broadcast = f32[4,2048,50304] broadcast(bitcast.1), dimensions={0,1}
subtract = f32[4,2048,50304] subtract(bitcast, broadcast)
exponential = f32[4,2048,50304] exponential(subtract)
constant.1 = f32[] constant(0)
reduce.1 = f32[4,2048] reduce(exponential, constant.1), dimensions={2}, to_apply=add_computation
log = f32[4,2048] log(reduce.1)
broadcast.1 = f32[4,2048,50304] broadcast(log), dimensions={0,1}
ROOT subtract.1 = f32[4,2048,50304] subtract(subtract, broadcast.1)
}
ENTRY entry_computation {
param_0 = f32[8192,50304] parameter(0)
ROOT fusion = f32[4,2048,50304] fusion(param_0), kind=kCustom, calls=fused_computation, backend_config={"fusion_backend_config":{"kind":"__triton"}}
}
)"));
std::optional<SymbolicTileAnalysis> opt_analysis =
TryAnalyzeModule(module.get());
ASSERT_TRUE(opt_analysis.has_value());
const SymbolicTileAnalysis& analysis = opt_analysis.value();
TF_ASSERT_OK_AND_ASSIGN(
std::vector<SymbolicTileAnalysis::Tiling> good_tilings,
analysis.GetGoodTilings());
EXPECT_THAT(good_tilings, Not(IsEmpty()));
LogTilingsIfVlog1(good_tilings);
}
TEST_F(SymbolicTileAnalysisTest,
GetGoodTilingsWorksForSoftmaxAndReduceExample) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule m
max_computation {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
ROOT maximum = f32[] maximum(param_0, param_1)
}
add_computation {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
ROOT add = f32[] add(param_0, param_1)
}
fused_computation {
param_0 = f32[8192,50304] parameter(0)
param_1 = s32[4,2048] parameter(1)
broadcast = s32[4,2048,50304] broadcast(param_1), dimensions={0,1}
iota = s32[4,2048,50304] iota(), iota_dimension=2
compare = pred[4,2048,50304] compare(broadcast, iota), direction=EQ
bitcast = f32[4,2048,50304] bitcast(param_0)
constant = f32[] constant(-inf)
reduce = f32[8192] reduce(param_0, constant), dimensions={1}, to_apply=max_computation
bitcast.1 = f32[4,2048] bitcast(reduce)
broadcast.1 = f32[4,2048,50304] broadcast(bitcast.1), dimensions={0,1}
subtract = f32[4,2048,50304] subtract(bitcast, broadcast.1)
exponential = f32[4,2048,50304] exponential(subtract)
constant.1 = f32[] constant(0)
reduce.1 = f32[4,2048] reduce(exponential, constant.1), dimensions={2}, to_apply=add_computation
log = f32[4,2048] log(reduce.1)
broadcast.2 = f32[4,2048,50304] broadcast(log), dimensions={0,1}
subtract.1 = f32[4,2048,50304] subtract(subtract, broadcast.2)
constant.2 = f32[] constant(0)
broadcast.3 = f32[4,2048,50304] broadcast(constant.2), dimensions={}
select = f32[4,2048,50304] select(compare, subtract.1, broadcast.3)
bitcast.2 = f32[4,2048,393,128] bitcast(select)
ROOT reduce.2 = f32[4,2048,393] reduce(bitcast.2, constant.2), dimensions={3}, to_apply=add_computation
}
ENTRY entry_computation {
param_0 = f32[8192,50304] parameter(0)
param_1 = s32[4,2048] parameter(1)
ROOT fusion = f32[4,2048,393] fusion(param_0, param_1), kind=kCustom, calls=fused_computation, backend_config={"fusion_backend_config":{"kind":"__triton_softmax"}}
}
)"));
std::optional<SymbolicTileAnalysis> opt_analysis =
TryAnalyzeModule(module.get());
ASSERT_TRUE(opt_analysis.has_value());
const SymbolicTileAnalysis& analysis = opt_analysis.value();
TF_ASSERT_OK_AND_ASSIGN(
std::vector<SymbolicTileAnalysis::Tiling> good_tilings,
analysis.GetGoodTilings());
EXPECT_THAT(good_tilings, Not(IsEmpty()));
LogTilingsIfVlog1(good_tilings);
}
TEST_F(SymbolicTileAnalysisTest,
FusionWithNumberOfTilesLargerThanInt32MaxIsSupported) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule softmax
fused_computation {
param_0 = f16[65538,32768]{1,0} parameter(0)
ROOT log = f16[65538,32768]{1,0} log(param_0)
}
ENTRY main {
param_0 = f16[65538,32768]{1,0} parameter(0)
ROOT fusion = f16[65538,32768]{1,0} fusion(param_0), kind=kLoop, calls=fused_computation
}
)"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
TF_ASSERT_OK_AND_ASSIGN(TiledHloComputation tiled_hlo_computation,
analysis->ComputeTiledHloInstructions(
{1, 1},
false,
true));
EXPECT_THAT(*tiled_hlo_computation.GetRoot(),
MatchTiledHloInstruction(
{1, 1},
{1, 1},
R"(
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 65537],
d1 in [0, 32767]
)"));
}
TEST_F(SymbolicTileAnalysisTest, CanComputeTiledHloInstructionsWithRTVars) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule m
max_computation {
param_0 = s32[] parameter(0)
param_1 = s32[] parameter(1)
ROOT maximum = s32[] maximum(param_0, param_1)
}
fused_computation {
src = s32[2,2,258] parameter(0)
of1 = s32[] parameter(1)
of2 = s32[] parameter(2)
of3 = s32[] parameter(3)
ds = s32[1,2,32] dynamic-slice(s32[2,2,258] src, s32[] of1, s32[] of2, s32[] of3),
dynamic_slice_sizes={1, 2, 32}
c0 = s32[] constant(0)
ROOT reduce = s32[1,2] reduce(ds, c0), dimensions={2}, to_apply=max_computation
}
ENTRY main {
param_0 = s32[2,2,258] parameter(0)
param_1 = s32[] parameter(1)
param_2 = s32[] parameter(2)
param_3 = s32[] parameter(3)
ROOT fusion = s32[1,2] fusion(param_0, param_1, param_2, param_3), kind=kLoop, calls=fused_computation
}
)"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
TF_ASSERT_OK_AND_ASSIGN(
TiledHloComputation tiled_hlo_computation,
analysis->ComputeTiledHloInstructions(
{1, 1}, false,
true));
const TiledHloInstruction* dynamic_slice =
tiled_hlo_computation.GetRoot()->operand(0);
const TiledHloInstruction* param_0_tile = dynamic_slice->operand(0);
EXPECT_THAT(*dynamic_slice, MatchTiledHloInstruction(
{1, 1, 32},
{0, 1, 1},
R"(
(d0, d1) -> (0, d1, 0),
domain:
d0 in [0, 0],
d1 in [0, 1]
)"));
EXPECT_THAT(*param_0_tile, MatchTiledHloInstruction(
{1, 1, 32},
{0, 1, 1},
R"(
(d0, d1)[rt0, rt1] -> (rt0, d1, rt1),
domain:
d0 in [0, 0],
d1 in [0, 1],
rt0 in [0, 1],
rt1 in [0, 226]
)"));
}
TEST_F(SymbolicTileAnalysisTest,
BailsOutOnReshapeWhenStandaloneSymbolicTileDerivationFails) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule m
add_computation {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fused_computation {
p0 = f32[2,128,128] parameter(0)
bitcast_fix = f32[16384,1,2] bitcast(p0)
bitcast = f32[2,128,128] bitcast(bitcast_fix)
c0 = f32[] constant(0)
reduce = f32[2,128] reduce(bitcast, c0), dimensions={2},
to_apply=add_computation
}
ENTRY main {
p0 = f32[2,128,128] parameter(0)
ROOT fusion = f32[2,128] fusion(p0), kind=kLoop, calls=fused_computation
})"));
SymbolicTileAnalysisOrError analysis_or_error =
SymbolicTileAnalysis::AnalyzeComputation(
*module->entry_computation()
->root_instruction()
->fused_instructions_computation(),
&mlir_context_, nullptr);
EXPECT_TRUE(std::holds_alternative<FusionDecision>(analysis_or_error));
EXPECT_THAT(std::get<FusionDecision>(analysis_or_error).Explain(),
::testing::HasSubstr("Bailing out on reshape"));
}
TEST_F(SymbolicTileAnalysisTest,
DoesNotBailOutOnFilteredOutHloIfThatHloIsOnlyAnOperand) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule m
fused_computation {
p0 = f32[10,10] parameter(0)
ROOT reshape = f32[100] reshape(p0)
}
ENTRY main {
p0 = f32[10,2] parameter(0)
p1 = f32[2,10] parameter(1)
untileable_dot = f32[10,10] dot(p0, p1),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT fusion = f32[100] fusion(untileable_dot),
kind=kLoop, calls=fused_computation
})"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
EXPECT_TRUE(analysis.has_value());
}
TEST_F(SymbolicTileAnalysisTest, IotaAlwaysHasTileOffsetsIndexingSet) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
ROOT iota = s32[100] iota(), iota_dimension=0
}
ENTRY main {
ROOT fusion = s32[100] fusion(), kind=kLoop, calls=fusion
})"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
TF_ASSERT_OK_AND_ASSIGN(TiledHloComputation tiled_hlo_computation,
analysis->ComputeTiledHloInstructions(
{4},
false,
false));
const TiledHloInstruction* iota = tiled_hlo_computation.GetRoot();
EXPECT_THAT(iota->tile_offsets_indexing().status(), ::tsl::testing::IsOk());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/symbolic_tile_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/symbolic_tile_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
82cb7774-5af3-4c61-8b54-eb605a8ae3f4 | cpp | tensorflow/tensorflow | triton_emitter_constraints | third_party/xla/xla/service/gpu/model/triton_emitter_constraints.cc | third_party/xla/xla/service/gpu/model/triton_emitter_constraints_test.cc | #include "xla/service/gpu/model/triton_emitter_constraints.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/MathExtras.h"
#include "mlir/IR/AffineMap.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/affine_map_evaluator.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/model/symbolic_tile.h"
#include "xla/service/gpu/model/symbolic_tile_analysis.h"
#include "xla/service/gpu/model/symbolic_tiled_hlo_instruction.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
namespace {
constexpr int64_t kMaxTensorNumElements = 1048576;
llvm::SmallVector<int64_t> GetPaddedTileSizes(
llvm::SmallVector<int64_t> tile_sizes) {
llvm::SmallVector<int64_t> result;
result.reserve(tile_sizes.size());
for (int64_t value : tile_sizes) {
result.push_back(llvm::PowerOf2Ceil(value));
}
return result;
}
}
std::vector<TritonEmitterConstraints::CustomConstraints>
TritonEmitterConstraints::DeriveCustomConstraints(
const std::vector<std::unique_ptr<SymbolicTiledHloInstruction>>&
instructions,
const HloFusionAdaptor& fusion_adaptor) {
std::vector<CustomConstraints> result;
for (const auto& instruction : instructions) {
const HloInstruction* hlo = instruction->hlo();
if (hlo->opcode() == HloOpcode::kReshape ||
hlo->opcode() == HloOpcode::kBitcast) {
if (!fusion_adaptor.ContainsInstruction(hlo)) {
continue;
}
mlir::MLIRContext* ctx =
instruction->symbolic_tile().size_map().getContext();
IndexingMap reshape_indexing_map =
*ComputeOutputToInputIndexing(hlo, 0, ctx)
.indexing_maps[0]
.begin();
std::optional<SymbolicTile> reshape_symbolic_tile =
SymbolicTile::FromIndexingMap(reshape_indexing_map);
CHECK(reshape_symbolic_tile.has_value());
ConstraintExpression reshape_constraints =
reshape_symbolic_tile->constraints();
result.push_back(
CustomConstraints{instruction->symbolic_tile().size_map(),
std::move(reshape_constraints)});
}
}
return result;
}
EmitterSpecificConstraintsBuilder
TritonEmitterConstraints::GetBuilder(
const se::DeviceDescription& device_description) {
return [=](const std::vector<std::unique_ptr<SymbolicTiledHloInstruction>>&
instructions,
const HloFusionAdaptor& fusion_adaptor) {
llvm::DenseSet<mlir::AffineMap> unique_tile_size_maps;
for (const auto& tiled_hlo_instruction : instructions) {
unique_tile_size_maps.insert(
tiled_hlo_instruction->symbolic_tile().size_map());
}
std::vector<CustomConstraints> custom_constraints =
DeriveCustomConstraints(instructions, fusion_adaptor);
llvm::SmallVector<mlir::AffineMap, 4> tile_size_maps(
unique_tile_size_maps.begin(), unique_tile_size_maps.end());
return std::unique_ptr<TritonEmitterConstraints>(
absl::WrapUnique(new TritonEmitterConstraints(
std::move(tile_size_maps), std::move(custom_constraints),
instructions.back()->hlo()->shape(),
device_description)));
};
}
absl::StatusOr<bool> TritonEmitterConstraints::ParametersSatisfyConstraints(
absl::Span<const int64_t> tile_parameters) const {
for (const auto& tile_size_map : tile_size_maps_) {
int64_t tile_size = 1;
for (auto expr : tile_size_map.getResults()) {
tile_size *= llvm::PowerOf2Ceil(
EvaluateAffineExpr(expr, tile_parameters));
}
if (tile_size > kMaxTensorNumElements) {
return false;
}
}
int64_t num_tiles = 1;
for (auto [dim_size, tile_size] :
llvm::zip(root_shape_.dimensions(), tile_parameters)) {
num_tiles *= (dim_size + tile_size - 1) / tile_size;
}
if (num_tiles >= device_info_.block_dim_limit().x) {
return false;
}
for (const auto& custom_constraint : custom_constraints_) {
llvm::SmallVector<int64_t> transformed_tile_parameters =
EvaluateAffineMap(custom_constraint.tile_parameters_transform,
tile_parameters);
if (!custom_constraint.constraints.IsSatisfiedBy(
GetPaddedTileSizes(transformed_tile_parameters))) {
return false;
}
}
return true;
}
}
} | #include "xla/service/gpu/model/triton_emitter_constraints.h"
#include <memory>
#include <optional>
#include <utility>
#include <variant>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/symbolic_tile_analysis.h"
#include "xla/service/instruction_fusion.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
using ::tsl::testing::IsOkAndHolds;
class TritonEmitterConstraintsTest : public HloTestBase {
public:
std::optional<SymbolicTileAnalysis> TryAnalyzeModule(
HloModule* module, bool with_triton_emitter_specific_constraints = true) {
EmitterSpecificConstraintsBuilder constraints_builder = nullptr;
if (with_triton_emitter_specific_constraints) {
constraints_builder =
TritonEmitterConstraints::GetBuilder(device_description_);
}
SymbolicTileAnalysisOrError analysis_or_error =
SymbolicTileAnalysis::AnalyzeComputation(
*module->entry_computation()
->root_instruction()
->fused_instructions_computation(),
&mlir_context_, constraints_builder);
if (std::holds_alternative<SymbolicTileAnalysis>(analysis_or_error)) {
return std::get<SymbolicTileAnalysis>(std::move(analysis_or_error));
}
VLOG(1) << "Cannot analyze module: "
<< std::get<FusionDecision>(analysis_or_error).Explain();
return std::nullopt;
}
mlir::MLIRContext mlir_context_;
se::DeviceDescription device_description_ =
TestGpuDeviceInfo::RTXA6000DeviceInfo();
};
TEST_F(TritonEmitterConstraintsTest, TooBigTileSizesConstraintIsEnforced) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule m
max_computation {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
ROOT maximum = f32[] maximum(param_0, param_1)
}
fused_computation {
param_0 = f32[8192,50304] parameter(0)
constant = f32[] constant(-inf)
reduce = f32[8192] reduce(param_0, constant), dimensions={1}, to_apply=max_computation
broadcast = f32[8192,50304] broadcast(reduce), dimensions={0}
ROOT subtract = f32[8192,50304] subtract(param_0, broadcast)
}
ENTRY entry_computation {
param_0 = f32[8192,50304] parameter(0)
ROOT fusion = f32[8192,50304] fusion(param_0), kind=kCustom, calls=fused_computation, backend_config={"fusion_backend_config":{"kind":"__triton"}}
}
)"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
EXPECT_THAT(analysis->ParametersSatisfyConstraints({8, 128}),
IsOkAndHolds(true));
EXPECT_THAT(analysis->ParametersSatisfyConstraints({18, 50304}),
IsOkAndHolds(false));
EXPECT_THAT(analysis->ParametersSatisfyConstraints({1024, 1}),
IsOkAndHolds(false));
}
TEST_F(TritonEmitterConstraintsTest, TooManyBlocksConstraintIsEnforced) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule m
max_computation {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
ROOT maximum = f32[] maximum(param_0, param_1)
}
fused_computation {
param_0 = f32[65536,65536] parameter(0)
ROOT log = f32[65536,65536] log(param_0)
}
ENTRY entry_computation {
param_0 = f32[65536,65536] parameter(0)
ROOT fusion = f32[65536,65536] fusion(param_0), kind=kCustom, calls=fused_computation, backend_config={"fusion_backend_config":{"kind":"__triton"}}
}
)"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
EXPECT_THAT(analysis->ParametersSatisfyConstraints({128, 128}),
IsOkAndHolds(true));
EXPECT_THAT(analysis->ParametersSatisfyConstraints({1, 1}),
IsOkAndHolds(false));
}
TEST_F(TritonEmitterConstraintsTest, CustomReshapeConstraintsAreEnforced) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_computation {
p = s8[36] parameter(0)
ROOT bitcast = s8[6,6] bitcast(p)
}
ENTRY entry_computation {
p = s8[36] parameter(0)
ROOT fusion = s8[6,6] fusion(p), kind=kCustom, calls=triton_computation
})"));
std::optional<SymbolicTileAnalysis> analysis_without_triton_constraints =
TryAnalyzeModule(module.get(),
false);
ASSERT_TRUE(analysis_without_triton_constraints.has_value());
EXPECT_THAT(
analysis_without_triton_constraints->ParametersSatisfyConstraints({2, 6}),
IsOkAndHolds(true));
std::optional<SymbolicTileAnalysis> analysis_with_triton_constraints =
TryAnalyzeModule(module.get(),
true);
ASSERT_TRUE(analysis_with_triton_constraints.has_value());
EXPECT_THAT(
analysis_with_triton_constraints->ParametersSatisfyConstraints({2, 6}),
IsOkAndHolds(false));
EXPECT_THAT(
analysis_with_triton_constraints->ParametersSatisfyConstraints({1, 6}),
IsOkAndHolds(true));
}
TEST_F(TritonEmitterConstraintsTest,
ReshapeConstraintsAreNotDerivedForFusionOperands) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_computation {
p = s8[6,6] parameter(0)
ROOT add = s8[6,6] add(p, p)
}
ENTRY entry_computation {
p = s8[36] parameter(0)
bitcast = s8[6,6] bitcast(p)
ROOT fusion = s8[6,6] fusion(bitcast),
kind=kCustom, calls=triton_computation
})"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
const HloComputation* triton_computation =
FindComputation(module.get(), "triton_computation");
std::unique_ptr<EmitterSpecificConstraints> constraints =
TritonEmitterConstraints::GetBuilder(device_description_)(
analysis->GetSymbolicTiledHloComputation(),
*HloFusionAdaptor::ForComputation(triton_computation));
EXPECT_FALSE(reinterpret_cast<TritonEmitterConstraints*>(constraints.get())
->HasCustomConstraints());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/triton_emitter_constraints.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/triton_emitter_constraints_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7eeb4c59-1ba3-41fa-9a55-c2db5b2366a6 | cpp | tensorflow/tensorflow | gpu_hlo_cost_analysis | third_party/xla/xla/service/gpu/model/gpu_hlo_cost_analysis.cc | third_party/xla/xla/service/gpu/model/gpu_hlo_cost_analysis_test.cc | #include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/map_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/elemental_ir_emitter.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/model/hlo_op_profile.pb.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
static constexpr absl::string_view kIRSizeKey = HloCostAnalysis::kReserved0Key;
static constexpr absl::string_view kBasicBlockSplitCountKey =
HloCostAnalysis::kReserved1Key;
static constexpr absl::string_view kCollAlgoScaleRatioKey =
"Collective algorithm's scaling ratio";
static constexpr absl::string_view kCollNumDevicesKey =
"Number of devices of a collective group";
absl::Status GpuHloCostAnalysis::Preprocess(const HloInstruction* hlo) {
TF_RETURN_IF_ERROR(HloCostAnalysis::Preprocess(hlo));
current_properties_[kIRSizeKey] = 1;
current_properties_[kBasicBlockSplitCountKey] =
ElementalIrEmitter::OpInvalidatesCache(hlo);
return absl::OkStatus();
}
float GpuHloCostAnalysis::ScalingRatio(const HloInstruction& hlo) const {
return GetPropertyForHlo(hlo, kCollAlgoScaleRatioKey, hlo_properties_);
}
int64_t GpuHloCostAnalysis::NumOfDevices(const HloInstruction& hlo) const {
return GetPropertyForHlo(hlo, kCollNumDevicesKey, hlo_properties_);
}
int64_t GpuHloCostAnalysis::FusionParameterReadBytes(
const HloInstruction* hlo) const {
CHECK(hlo->IsFused() && (hlo->opcode() == HloOpcode::kParameter ||
hlo->opcode() == HloOpcode::kGetTupleElement));
float utilization = hlo_properties_.at(hlo)[kUtilizationKey];
if (!options_.count_multiple_input_accesses) {
utilization = fmin(utilization, 1.0);
}
return std::llround(GetShapeSize(hlo->shape()) * utilization);
}
absl::Status GpuHloCostAnalysis::FusionCalculateUtilizations(
const HloInstruction* fusion) {
const HloInstruction* root = fusion->fused_expression_root();
std::vector<HloInstruction*> instructions =
fusion->fused_instructions_computation()->MakeInstructionPostOrder();
absl::c_reverse(instructions);
absl::flat_hash_map<const HloInstruction*, int64_t> root_ir_sizes;
for (const HloInstruction* instr : instructions) {
hlo_properties_[instr][kUtilizationKey] = 0;
hlo_properties_[instr][kIRSizeKey] = 0;
elementwise_use_roots_[instr].clear();
root_utilizations_[instr] = 0;
}
root_utilizations_[root] = 1.0;
root_ir_sizes[root] = 1;
elementwise_use_roots_[root].insert(root);
current_properties_[kFlopsKey] = 0;
current_properties_[kBasicBlockSplitCountKey] = 0;
current_properties_[kIRSizeKey] = 0;
for (const HloInstruction* instr : instructions) {
VLOG(8) << instr->name() << ":";
VLOG(9) << "Elementwise use roots:";
Properties& instr_props = hlo_properties_[instr];
for (const HloInstruction* r : elementwise_use_roots_[instr]) {
VLOG(9) << "\t" << r->name() << ": " << root_utilizations_[r];
instr_props[kUtilizationKey] += root_utilizations_[r];
instr_props[kIRSizeKey] += root_ir_sizes[r];
}
float cur_instr_utilization = instr_props[kUtilizationKey];
VLOG(8) << "Total utilization: " << cur_instr_utilization;
float cur_instr_times_emitted = instr_props[kIRSizeKey];
VLOG(8) << "Times emitted: " << cur_instr_times_emitted;
current_properties_[kFlopsKey] +=
cur_instr_utilization * instr_props[kFlopsKey];
current_properties_[kIRSizeKey] += cur_instr_times_emitted;
current_properties_[kBasicBlockSplitCountKey] +=
cur_instr_times_emitted * ElementalIrEmitter::OpInvalidatesCache(instr);
for (int operand_idx = 0; operand_idx < instr->operand_count();
++operand_idx) {
const HloInstruction* operand = instr->operand(operand_idx);
if ((instr->IsElementwise()) || instr->opcode() == HloOpcode::kTuple ||
instr->opcode() == HloOpcode::kGetTupleElement) {
for (const HloInstruction* r : elementwise_use_roots_[instr]) {
elementwise_use_roots_[operand].insert(r);
}
} else {
elementwise_use_roots_[operand].insert(operand);
float cur_operand_utilization =
cur_instr_utilization * operand_utilization(*instr, operand_idx);
int64_t operand_elements =
ShapeUtil::ElementsInRecursive(operand->shape());
if (operand_elements == 0) {
cur_operand_utilization = 0;
} else {
cur_operand_utilization =
ceil(cur_operand_utilization * operand_elements) /
operand_elements;
}
root_utilizations_[operand] += cur_operand_utilization;
root_ir_sizes[operand] += cur_instr_times_emitted;
}
}
}
return absl::OkStatus();
}
float GpuHloCostAnalysis::CommonElementwiseUtilization(
const HloInstruction* a, const HloInstruction* b) const {
float ret = 0;
for (auto r : elementwise_use_roots_.at(a)) {
if (elementwise_use_roots_.at(b).count(r)) {
ret += root_utilizations_.at(r);
}
}
return ret;
}
bool GpuHloCostAnalysis::ProducerConsumerMergedTooLarge(
const HloInstruction& producer, const HloInstruction& consumer) {
int64_t producer_replication = 1;
if (consumer.opcode() == HloOpcode::kFusion) {
producer_replication =
IrSize(*consumer.fused_parameter(consumer.operand_index(&producer)));
}
VLOG(5) << producer.name() << " would be emitted by " << consumer.name()
<< " x" << producer_replication;
int64_t n_splits = producer_replication * IrBasicBlockSplitCount(producer) +
IrBasicBlockSplitCount(consumer);
VLOG(5) << "Basic block split counts: " << IrBasicBlockSplitCount(producer)
<< ", " << IrBasicBlockSplitCount(consumer) << " -> " << n_splits;
int64_t merged_ir_size =
(IrSize(producer) * producer_replication + IrSize(consumer));
if (producer.GetModule()
->config()
.debug_options()
.xla_gpu_mlir_emitter_level() < 4) {
if (n_splits > kMaxBasicBlockSplitsPerFusion) {
return true;
}
merged_ir_size *= (1 << n_splits);
}
VLOG(5) << "IR sizes: " << IrSize(producer) << ", " << IrSize(consumer)
<< " -> " << merged_ir_size;
return merged_ir_size > kMaxIRSize;
}
absl::Status GpuHloCostAnalysis::HandleCustomCall(
const HloInstruction* custom_call) {
if (IsCublasGemm(*custom_call)) {
TF_ASSIGN_OR_RETURN(auto gpu_config,
custom_call->backend_config<gpu::GpuBackendConfig>());
const gpu::GemmBackendConfig& gemm_config =
gpu_config.gemm_backend_config();
const Shape& output_shape = custom_call->shape().IsTuple()
? custom_call->shape().tuple_shapes(0)
: custom_call->shape();
current_properties_[kFlopsKey] =
GetDotFlops(custom_call->operand(0)->shape(), output_shape,
gemm_config.dot_dimension_numbers());
return absl::OkStatus();
}
if (IsCustomCallToDnnConvolution(*custom_call)) {
current_properties_[kFlopsKey] = GetConvolutionFlops(custom_call);
if (custom_call->shape().IsTuple()) {
float output_size =
options_.shape_size(custom_call->shape().tuple_shapes(0));
current_properties_[kBytesAccessedKey] -=
current_properties_.output_bytes_accessed();
current_properties_[kBytesAccessedKey] += output_size;
current_properties_.set_output_bytes_accessed(output_size);
}
return absl::OkStatus();
}
return HloCostAnalysis::HandleCustomCall(custom_call);
}
int64_t GpuHloCostAnalysis::GetConvolutionFlops(
const HloInstruction* convolution) {
auto lhs = convolution->operand(0);
auto rhs = convolution->operand(1);
const Shape& lhs_shape = lhs->shape();
const Shape& rhs_shape = rhs->shape();
const Shape& result_shape = [&]() -> const Shape& {
const Shape& shape = convolution->shape();
if (IsCustomCallToDnnConvolution(*convolution) &&
convolution->shape().IsTuple()) {
return shape.tuple_shapes(0);
}
return shape;
}();
return HloCostAnalysis::GetConvolutionFlops(convolution, lhs_shape, rhs_shape,
result_shape);
}
int64_t GpuHloCostAnalysis::GetFlopsPerElementwiseOpElement(
const PrimitiveType type, const HloOpcode opcode) {
constexpr int64_t kDefaultFlopsPerElement = 3;
return FindOrDefault(hlo_elementwise_op_profile_,
std::make_pair(opcode, type), kDefaultFlopsPerElement);
}
int64_t GpuHloCostAnalysis::GetFlopsForElementwiseOp(const HloOpcode op_code,
const Shape& shape) {
int64_t flop_per_element =
GetFlopsPerElementwiseOpElement(shape.element_type(), op_code);
return flop_per_element * ShapeUtil::ElementsInRecursive(shape);
}
int64_t GpuHloCostAnalysis::GetFlopsForElementwiseOp(
const HloInstruction* instr) {
return GetFlopsForElementwiseOp(instr->opcode(), instr->shape());
}
absl::Status GpuHloCostAnalysis::HandleAllReduce(
const HloInstruction* allreduce) {
const HloModuleConfig& config = allreduce->GetModule()->config();
TF_ASSIGN_OR_RETURN(
CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(
allreduce->channel_id().has_value(),
Cast<HloAllReduceInstruction>(allreduce)->use_global_device_ids()));
int64_t num_devices = config.num_partitions();
int64_t num_replicas = config.replica_count();
TF_ASSIGN_OR_RETURN(
std::vector<int64_t> participant_counts,
GetPariticipantCountsForReplicaGroups(
num_replicas, num_devices, allreduce->replica_groups(), group_mode));
int64_t num_ranks = 1;
for (auto count : participant_counts) {
num_ranks = std::max(num_ranks, count);
}
VLOG(5) << "Computing cost for " << num_ranks << " ranks in "
<< allreduce->ToString();
int64_t output_bytes_accessed = 0;
ShapeUtil::ForEachSubshape(
allreduce->shape(), [&](const Shape& subshape, const ShapeIndex&) {
if (subshape.IsArray()) {
output_bytes_accessed += GetShapeSize(subshape);
}
});
int64_t bytes_accessed = output_bytes_accessed;
for (const HloInstruction* operand : allreduce->operands()) {
bytes_accessed += GetShapeSize(operand->shape());
}
current_properties_.set_output_bytes_accessed(output_bytes_accessed);
current_properties_[kBytesAccessedKey] = bytes_accessed;
current_properties_[kCollNumDevicesKey] = num_ranks;
current_properties_[kFlopsKey] = GetFlopsForElementwiseOp(
allreduce->to_apply()->root_instruction()->opcode(), allreduce->shape());
int num_intra_steps = 2 * (num_ranks - 1);
float scaling_ratio = (1.0 * num_ranks) / num_intra_steps;
current_properties_[kCollAlgoScaleRatioKey] = scaling_ratio;
return absl::OkStatus();
}
absl::Status GpuHloCostAnalysis::HandleConcatenate(const HloInstruction* hlo) {
int64_t flop_per_element = 6;
int64_t dim = Cast<HloConcatenateInstruction>(hlo)->concatenate_dimension();
if (dim > 0 && hlo->operand(0)->shape().dimensions()[dim] & 31) {
flop_per_element = 400;
}
current_properties_[kFlopsKey] =
flop_per_element * ShapeUtil::ElementsInRecursive(hlo->shape());
return absl::OkStatus();
}
absl::Status GpuHloCostAnalysis::HandleReduce(const HloInstruction* hlo) {
TF_RETURN_IF_ERROR(HloCostAnalysis::HandleReduce(hlo));
const HloReduceInstruction* reduce = DynCast<HloReduceInstruction>(hlo);
auto output_shape = reduce->shape().IsArray()
? reduce->shape()
: reduce->shape().tuple_shapes(0);
int64_t output_bytes_accessed = 0;
ShapeUtil::ForEachLeafShape(
reduce->shape(), [&](const Shape& sub_shape, const ShapeIndex& index) {
output_bytes_accessed += GetShapeSize(sub_shape);
});
current_properties_.set_output_bytes_accessed(output_bytes_accessed);
int64_t bytes_accessed = output_bytes_accessed;
for (int64_t input_operand_id = 0; input_operand_id < reduce->input_count();
++input_operand_id) {
bytes_accessed +=
current_properties_.operand_bytes_accessed(input_operand_id);
}
int64_t output_shape_size = ShapeUtil::ElementsIn(output_shape);
for (int64_t init_operand_id = reduce->input_count();
init_operand_id < reduce->operand_count(); ++init_operand_id) {
auto init_operand = reduce->operand(init_operand_id);
int64_t operand_bytes_accessed =
output_shape_size * GetShapeSize(init_operand->shape());
current_properties_.set_operand_bytes_accessed(init_operand_id,
operand_bytes_accessed);
current_properties_.set_operand_utilization(init_operand_id,
output_shape_size);
bytes_accessed += operand_bytes_accessed;
}
current_properties_[kBytesAccessedKey] = bytes_accessed;
return absl::OkStatus();
}
absl::Status GpuHloCostAnalysis::HandleElementwiseOp(
const HloInstruction* hlo) {
current_properties_[kFlopsKey] = GetFlopsForElementwiseOp(hlo);
return absl::OkStatus();
}
std::unique_ptr<HloCostAnalysis>
GpuHloCostAnalysis::CreateNestedCostAnalysis() {
return std::make_unique<GpuHloCostAnalysis>(options_,
hlo_elementwise_op_profile_);
}
bool GpuHloCostAnalysis::KeyToCopyFromSubcomputation(
absl::string_view key) const {
return !absl::StartsWith(key, kBytesAccessedKey) &&
!absl::StartsWith(key, kUtilizationKey) &&
!absl::StartsWith(key, kIRSizeKey) &&
!absl::StartsWith(key, kBasicBlockSplitCountKey);
}
float GpuHloCostAnalysis::IrBasicBlockSplitCount(
const HloInstruction& hlo) const {
return GetPropertyForHlo(hlo, kBasicBlockSplitCountKey, hlo_properties_);
}
float GpuHloCostAnalysis::IrSize(const HloInstruction& hlo) const {
return GetPropertyForHlo(hlo, kIRSizeKey, hlo_properties_);
}
}
} | #include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include <cstdint>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/model/hlo_op_profiles.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
class GpuHloCostAnalysisTest : public HloTestBase {
HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
public:
HloCostAnalysis::Options options_{ShapeSizeBytesFunction(),
{},
{},
true};
GpuHloCostAnalysis analysis_{options_};
GpuHloCostAnalysisTest() : HloTestBase() {}
};
TEST_F(GpuHloCostAnalysisTest, ConvCustomCall) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = s8[128,12,24,24,4]{4,3,2,1,0} parameter(0)
p1 = s8[16,12,5,5,4]{4,3,2,1,0} parameter(1)
p2 = f32[16]{0} parameter(2)
conv1 = (s8[128,4,24,24,4]{4,3,2,1,0}, u8[0]{0}) custom-call(p0, p1, p2),
window={size=5x5 pad=2_2x2_2},
dim_labels=bf01_oi01->bf01,
custom_call_target="__cudnn$convBiasActivationForward"
ROOT tuple = tuple(conv1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
HloComputation* comp = module->entry_computation();
const HloInstruction* conv1 = comp->GetInstructionWithName("conv1");
int op0_size = sizeof(int8_t) * 128 * 12 * 24 * 24 * 4;
int op1_size = sizeof(int8_t) * 16 * 12 * 5 * 5 * 4;
int op2_size = sizeof(float) * 16;
int out_size = sizeof(int8_t) * 128 * 4 * 24 * 24 * 4;
EXPECT_EQ(analysis_.operand_bytes_accessed(*conv1, 0), op0_size);
EXPECT_EQ(analysis_.operand_bytes_accessed(*conv1, 1), op1_size);
EXPECT_EQ(analysis_.operand_bytes_accessed(*conv1, 2), op2_size);
EXPECT_EQ(analysis_.output_bytes_accessed(*conv1), out_size);
EXPECT_EQ(analysis_.bytes_accessed(*conv1),
op0_size + op1_size + op2_size + out_size);
EXPECT_EQ(analysis_.flop_count(*conv1), 159694848);
}
TEST_F(GpuHloCostAnalysisTest, ReduceWindowWithOverlapsRepeatedReads) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
add {
a0 = f32[] parameter(0)
a1 = f32[] parameter(1)
ROOT _ = f32[] add(a0, a1)
}
ENTRY entry {
p0 = f32[8,8] parameter(0)
c0 = f32[] constant(0)
ROOT _ = f32[3,4] reduce-window(p0, c0), window={size=4x5 stride=2x1}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
int n_output_elements = 3 * 4;
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.flop_count(), 3 * n_output_elements * (4 * 5 - 1));
EXPECT_EQ(analysis_.bytes_accessed(),
sizeof(float) * (8 * 8 + 1 + n_output_elements));
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0),
sizeof(float) * n_output_elements * 4 * 5);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 1), sizeof(float) * 1);
EXPECT_EQ(analysis_.output_bytes_accessed(*root),
sizeof(float) * n_output_elements);
}
TEST_F(GpuHloCostAnalysisTest, BroadcastWithRepeats) {
absl::string_view hlo_string = R"(
HloModule m
f {
p1 = s8[] parameter(0)
c1 = s8[] constant(0)
a1 = s8[] add(p1, c1)
b1 = s8[10000] broadcast(a1), dimensions={}
b2 = s8[10000] broadcast(c1), dimensions={}
ROOT r1 = s8[10000] add(b1, b2)
}
ENTRY e {
p0 = s8[] parameter(0)
ROOT r0 = s8[10000] fusion(p0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.output_bytes_accessed(*root), 10000);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0), 10000);
EXPECT_EQ(analysis_.bytes_accessed(*root), 2 * 10000);
EXPECT_EQ(analysis_.bytes_accessed(), 2 * 10000);
}
TEST_F(GpuHloCostAnalysisTest, WithoutRepeats) {
absl::string_view hlo_string = R"(
HloModule m
f {
p1 = s8[] parameter(0)
a1 = s8[] add(p1, p1)
b1 = s8[10000] broadcast(a1), dimensions={}
a2 = s8[10000] add(b1, b1)
slice1 = s8[8000] slice(a2), slice={[0:8000]}
slice2 = s8[8000] slice(a2), slice={[2000:10000]}
c = s8[10000] constant({...})
slicec1 = s8[8000] slice(c), slice={[0:8000]}
slicec2 = s8[8000] slice(c), slice={[2000:10000]}
a3 = s8[8000] add(slice1, slice2)
a4 = s8[8000] add(slicec1, slicec2)
ROOT a5 = s8[8000] add(a3, a4)
}
ENTRY e {
p0 = s8[] parameter(0)
ROOT r0 = s8[8000] fusion(p0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
options_.count_multiple_input_accesses = false;
GpuHloCostAnalysis analysis{options_};
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis));
EXPECT_EQ(analysis.output_bytes_accessed(*root), 8000);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), 1);
EXPECT_EQ(analysis.bytes_accessed(*root), 1 + 8000 + 10000);
EXPECT_EQ(analysis.bytes_accessed(), 1 + 8000 + 10000);
}
TEST_F(GpuHloCostAnalysisTest, BroadcastFlops) {
absl::string_view hlo_string = R"(
HloModule m
f {
i0 = f32[1024] iota(), iota_dimension=0
m0 = f32[1024] add(i0, i0)
s0 = f32[1024] multiply(m0, m0)
b0 = f32[1024,1024] broadcast(s0), dimensions={0}
ROOT r0 = f32[1024,1024] negate(b0)
}
ENTRY e {
ROOT r = f32[1024,1024] fusion(), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto n_elements = 1024 * 1024;
EXPECT_EQ(analysis_.output_bytes_accessed(*root), n_elements * 4);
EXPECT_EQ(analysis_.bytes_accessed(*root), n_elements * 4);
EXPECT_EQ(analysis_.bytes_accessed(), n_elements * 4);
EXPECT_EQ(analysis_.flop_count(), n_elements * 3 * 3);
EXPECT_EQ(analysis_.IrSize(*root), 5);
}
TEST_F(GpuHloCostAnalysisTest, Slice) {
absl::string_view hlo_string = R"(
HloModule m
f {
p1 = s8[100000000] parameter(0)
i1 = s8[100000000] iota(), iota_dimension=0
a1 = s8[100000000] add(p1, i1)
ROOT r1 = s8[1] slice(a1), slice={[0:1]}
}
ENTRY e {
p0 = s8[100000000] parameter(0)
ROOT r0 = s8[1] fusion(p0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.output_bytes_accessed(*root), 1);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0), 1);
EXPECT_EQ(analysis_.bytes_accessed(*root), 2);
EXPECT_EQ(analysis_.bytes_accessed(), 2);
EXPECT_EQ(analysis_.IrSize(*root), 4);
}
TEST_F(GpuHloCostAnalysisTest, TwoSlices) {
absl::string_view hlo_string = R"(
HloModule m
f {
p1 = s8[100] parameter(0)
i1 = s8[100] iota(), iota_dimension=0
a1 = s8[100] add(p1, i1)
slice1 = s8[1] slice(a1), slice={[0:1]}
slice2 = s8[1] slice(a1), slice={[3:4]}
ROOT r = s8[1] add(slice1, slice2)
}
ENTRY e {
p0 = s8[100] parameter(0)
ROOT r0 = s8[1] fusion(p0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.output_bytes_accessed(*root), 1);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0), 2);
EXPECT_EQ(analysis_.bytes_accessed(*root), 3);
EXPECT_EQ(analysis_.bytes_accessed(), 3);
EXPECT_EQ(analysis_.IrSize(*root), 9);
}
TEST_F(GpuHloCostAnalysisTest, MultipleTrivialUsers) {
absl::string_view hlo_string = R"(
HloModule m
f {
p0 = s8[] parameter(0)
m0 = s8[] multiply(p0, p0)
n0 = s8[] negate(p0)
ROOT a0 = s8[] add(m0, n0)
}
ENTRY e {
param0 = s8[] parameter(0)
ROOT r0 = s8[] fusion(param0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.output_bytes_accessed(*root), 1);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0), 1);
EXPECT_EQ(analysis_.bytes_accessed(*root), 1 + 1);
EXPECT_EQ(analysis_.bytes_accessed(), 1 + 1);
EXPECT_EQ(analysis_.IrSize(*root), 4);
}
TEST_F(GpuHloCostAnalysisTest, MixedUsers) {
absl::string_view hlo_string = R"(
HloModule m
f {
p0 = s8[10] parameter(0)
n0 = s8[10] negate(p0)
m0 = s8[10] multiply(n0, n0)
a0 = s8[10] add(n0, n0)
s0 = s8[5] slice(a0), slice={[0:5]}
s1 = s8[2] slice(n0), slice={[4:6]}
n1 = s8[2] negate(s1)
ROOT c0 = s8[17] concatenate(s0, m0, n1), dimensions={0}
}
ENTRY e {
param0 = s8[10] parameter(0)
ROOT r0 = s8[17] fusion(param0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.output_bytes_accessed(*root), 17);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0), 17);
EXPECT_EQ(analysis_.bytes_accessed(*root), 17 + 17);
EXPECT_EQ(analysis_.bytes_accessed(), 17 + 17);
EXPECT_EQ(analysis_.IrSize(*root->fused_parameter(0)), 3);
EXPECT_EQ(analysis_.IrSize(*root->fused_parameter(0)),
analysis_.IrSize(*root->fused_parameter(0)->users()[0]));
EXPECT_EQ(analysis_.IrSize(*root), 12);
}
TEST_F(GpuHloCostAnalysisTest, FractionalUseRoundingUp) {
absl::string_view hlo_string = R"(
HloModule m
add_s8 {
lhs = s8[] parameter(0)
rhs = s8[] parameter(1)
ROOT add = s8[] add(lhs, rhs)
}
f {
p0 = s8[] parameter(0)
b0 = s8[10] broadcast(p0), dimensions={}
c0 = s8[] constant(0)
r0 = s8[] reduce(b0, c0), dimensions={0}, to_apply=add_s8
bitcast0 = s8[1] bitcast(r0)
i0 = s8[5] iota(), iota_dimension=0
cat0 = s8[6] concatenate(bitcast0, i0), dimensions={0}
p1 = s32[] parameter(1)
ROOT s0 = s8[2] dynamic-slice(cat0, p1), dynamic_slice_sizes={2}
}
ENTRY e {
p0 = s8[] parameter(0)
p1 = s32[] parameter(1)
ROOT r = s8[2] fusion(p0, p1), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.output_bytes_accessed(*root), 2);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0), 10);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 1), 4);
EXPECT_EQ(analysis_.bytes_accessed(*root), 2 + 10 + 4);
EXPECT_EQ(analysis_.bytes_accessed(), 2 + 10 + 4);
}
TEST_F(GpuHloCostAnalysisTest, LargeConstant) {
absl::string_view hlo_string = R"(
HloModule m
f {
p0 = s8[1000] parameter(0)
c0 = s8[1000] constant({...})
ROOT a0 = s8[1000] add(p0, c0)
}
ENTRY e {
p0 = s8[1000] parameter(0)
ROOT r = s8[1000] fusion(p0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.output_bytes_accessed(*root), 1000);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0), 1000);
EXPECT_EQ(analysis_.bytes_accessed(*root), 3000);
EXPECT_EQ(analysis_.bytes_accessed(), 3000);
EXPECT_EQ(analysis_.IrSize(*root), 3);
}
TEST_F(GpuHloCostAnalysisTest, DynUpdateSliceUsingOperandData) {
const char* hlo_fusion_module_str = R"(
HloModule m
f {
to_update = s8[3,1,1,1] parameter(0)
update = s8[1,1,1,1] constant(0)
a = s32[] constant(0)
dus = s8[3,1,1,1] dynamic-update-slice(to_update, update, a, a, a, a)
ROOT _ = s8[3,1,1,1] negate(dus)
}
ENTRY _ {
to_update = s8[3,1,1,1] parameter(0)
ROOT _ = s8[3,1,1,1] fusion(to_update), kind=kLoop, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_fusion_module_str));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
HloInstruction* fusion = module->entry_computation()->root_instruction();
ASSERT_EQ(fusion->opcode(), HloOpcode::kFusion);
EXPECT_EQ(analysis_.operand_bytes_accessed(*fusion, 0), 3 - 1);
EXPECT_EQ(analysis_.output_bytes_accessed(*fusion), 3);
}
TEST_F(GpuHloCostAnalysisTest, DynUpdateSliceNotUsingOperandData) {
const char* hlo_fusion_module_str = R"(
HloModule m
f {
to_update = s8[3,1,1,1] parameter(0)
update = s8[1,1,1,1] constant(0)
a = s32[] constant(0)
ROOT dus = s8[3,1,1,1] dynamic-update-slice(to_update, update, a, a, a, a)
}
ENTRY _ {
to_update = s8[3,1,1,1] parameter(0)
ROOT _ = s8[3,1,1,1] fusion(to_update), kind=kLoop, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_fusion_module_str));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
HloInstruction* fusion = module->entry_computation()->root_instruction();
ASSERT_EQ(fusion->opcode(), HloOpcode::kFusion);
EXPECT_EQ(analysis_.operand_bytes_accessed(*fusion, 0), 0);
EXPECT_EQ(analysis_.output_bytes_accessed(*fusion), 1);
}
TEST_F(GpuHloCostAnalysisTest, CommonElementwiseUseTwoParameters) {
const char* hlo_fusion_module_str = R"(
HloModule m
add {
p0 = s8[] parameter(0)
p1 = s8[] parameter(1)
ROOT _ = s8[] add(p0, p1)
}
f {
p0 = s8[10] parameter(0)
p1 = s8[10] parameter(1)
a = s8[10] add(p0, p1)
c0 = s8[] constant(0)
r0 = s8[] reduce(a, c0), dimensions={0}, to_apply=add
c1 = s8[] constant(100)
r1 = s8[] reduce(a, c1), dimensions={0}, to_apply=add
ROOT _ = s8[] add(r0, r1)
}
ENTRY _ {
p0 = s8[10] parameter(0)
p1 = s8[10] parameter(1)
ROOT _ = s8[] fusion(p0, p1), kind=kLoop, calls=f
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_fusion_module_str));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
HloInstruction* fusion = module->entry_computation()->root_instruction();
EXPECT_EQ(analysis_.CommonElementwiseUtilization(fusion->fused_parameter(0),
fusion->fused_parameter(1)),
2.f);
}
TEST_F(GpuHloCostAnalysisTest, CommonElementwiseUseParameterAndRoot) {
const char* hlo_fusion_module_str = R"(
HloModule m
f {
p0 = s8[10] parameter(0)
p1 = s8[] parameter(1)
p1b = s8[10] broadcast(p1)
a = s8[10] add(p0, p1b)
ROOT _ = s8[10] negate(a)
}
ENTRY _ {
p0 = s8[10] parameter(0)
p1 = s8[] parameter(1)
ROOT _ = s8[10] fusion(p0, p1), kind=kLoop, calls=f
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_fusion_module_str));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
HloInstruction* fusion = module->entry_computation()->root_instruction();
EXPECT_EQ(analysis_.CommonElementwiseUtilization(
fusion->fused_parameter(0), fusion->fused_expression_root()),
1.f);
EXPECT_EQ(analysis_.CommonElementwiseUtilization(
fusion->fused_parameter(1), fusion->fused_expression_root()),
0.f);
}
TEST_F(GpuHloCostAnalysisTest,
CommonElementwiseUseParameterAndRootMultiOutputFusion) {
const char* hlo_fusion_module_str = R"(
HloModule m
f {
p0 = s8[10] parameter(0)
p1 = s8[] parameter(1)
p1b = s8[10] broadcast(p1)
a = s8[10] add(p0, p1b)
neg = s8[10] negate(a)
ROOT _ = (s8[10], s8[10]) tuple(a, neg)
}
ENTRY _ {
p0 = s8[10] parameter(0)
p1 = s8[] parameter(1)
ROOT _ = (s8[10], s8[10]) fusion(p0, p1), kind=kLoop, calls=f
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_fusion_module_str));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
HloInstruction* fusion = module->entry_computation()->root_instruction();
EXPECT_EQ(analysis_.CommonElementwiseUtilization(
fusion->fused_parameter(0), fusion->fused_expression_root()),
1.f);
EXPECT_EQ(analysis_.CommonElementwiseUtilization(
fusion->fused_parameter(1), fusion->fused_expression_root()),
0.f);
}
TEST_F(GpuHloCostAnalysisTest, Reduce) {
absl::string_view hlo_string = R"(
HloModule m
add {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
ROOT add.0 = f32[] add(param_0, param_1)
}
ENTRY entry_computation {
param_0.3 = f32[32,40]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(param_0.3, constant), dimensions={1}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
int64_t input_bytes_accessed = 4 * 32 * 40;
int64_t init_bytes_accessed = 4 * 32;
int64_t output_bytes_accessed = 4 * 32;
EXPECT_EQ(analysis_.operand_bytes_accessed(*reduce, 0), input_bytes_accessed);
EXPECT_EQ(analysis_.operand_bytes_accessed(*reduce, 1), init_bytes_accessed);
EXPECT_EQ(analysis_.output_bytes_accessed(*reduce), output_bytes_accessed);
EXPECT_EQ(analysis_.bytes_accessed(*reduce),
input_bytes_accessed + init_bytes_accessed + output_bytes_accessed);
EXPECT_EQ(analysis_.flop_count(*reduce), 32 * 39 * 3);
}
TEST_F(GpuHloCostAnalysisTest, VariadicReduce) {
absl::string_view hlo_string = R"(
HloModule m
add {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
param_2 = f32[] parameter(2)
param_3 = f32[] parameter(3)
add.0 = f32[] add(param_0, param_2)
add.1 = f32[] add(param_1, param_3)
ROOT t = (f32[], f32[]) tuple(add.0, add.1)
}
ENTRY entry_computation {
param_0.3 = f32[32,40]{1,0} parameter(0)
param_1.3 = f32[32,40]{1,0} parameter(1)
param_2.2 = f32[] parameter(2)
constant = f32[] constant(0)
ROOT reduce = (f32[32]{0}, f32[32]{0}) reduce(param_0.3, param_1.3, param_2.2, constant), dimensions={1}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
int64_t input_bytes_accessed = 4 * 32 * 40;
int64_t init_bytes_accessed = 4 * 32;
int64_t output_bytes_accessed = 2 * 4 * 32;
EXPECT_EQ(analysis_.operand_bytes_accessed(*reduce, 0), input_bytes_accessed);
EXPECT_EQ(analysis_.operand_bytes_accessed(*reduce, 1), input_bytes_accessed);
EXPECT_EQ(analysis_.operand_bytes_accessed(*reduce, 2), init_bytes_accessed);
EXPECT_EQ(analysis_.operand_bytes_accessed(*reduce, 3), init_bytes_accessed);
EXPECT_EQ(analysis_.output_bytes_accessed(*reduce), output_bytes_accessed);
EXPECT_EQ(analysis_.bytes_accessed(*reduce), 2 * input_bytes_accessed +
2 * init_bytes_accessed +
output_bytes_accessed);
EXPECT_EQ(analysis_.flop_count(*reduce), 32 * 39 * 6);
}
TEST_F(GpuHloCostAnalysisTest, CustomOpProfileIsUsed) {
absl::string_view hlo_string = R"(
HloModule m
ENTRY entry_computation {
param_0 = f32[10] parameter(0)
param_1 = f32[10] parameter(1)
param_2 = f32[10] parameter(2)
param_3 = f32[10] parameter(3)
tanh = f32[10] tanh(param_0)
mul = f32[10] multiply(tanh, param_1)
ROOT clamp = f32[10] clamp(mul, param_2, param_3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloOpProfiles::HloOpProfile hlo_op_profile;
const int kF32ClampFlopsPerElement = 7;
const int kF32MultiplyFlopsPerElement = 11;
const int kF32TanhFlopsPerElement = 13;
const int kNumElements = 10;
hlo_op_profile[{HloOpcode::kClamp, PrimitiveType::F32}] =
kF32ClampFlopsPerElement;
hlo_op_profile[{HloOpcode::kMultiply, PrimitiveType::F32}] =
kF32MultiplyFlopsPerElement;
hlo_op_profile[{HloOpcode::kTanh, PrimitiveType::F32}] =
kF32TanhFlopsPerElement;
GpuHloCostAnalysis analysis(options_, hlo_op_profile);
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis));
const HloInstruction* clamp = module->entry_computation()->root_instruction();
const HloInstruction* mul = clamp->operand(0);
const HloInstruction* tanh = mul->operand(0);
EXPECT_EQ(analysis.flop_count(*clamp),
kF32ClampFlopsPerElement * kNumElements);
EXPECT_EQ(analysis.flop_count(*mul),
kF32MultiplyFlopsPerElement * kNumElements);
EXPECT_EQ(analysis.flop_count(*tanh), kF32TanhFlopsPerElement * kNumElements);
};
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/gpu_hlo_cost_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/gpu_hlo_cost_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cd417a41-9b87-49cf-8f25-5682c1ba589a | cpp | tensorflow/tensorflow | gpu_performance_model | third_party/xla/xla/service/gpu/model/gpu_performance_model.cc | third_party/xla/xla/service/gpu/model/gpu_performance_model_test.cc | #include "xla/service/gpu/model/gpu_performance_model.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <optional>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/coalescing_analysis.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "tsl/platform/status.h"
namespace xla {
namespace gpu {
EstimateRunTimeData
GpuPerformanceModel::EstimateRunTimeForInstruction(
const HloInstruction* instr, const se::DeviceDescription& device_info,
const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config) {
VLOG(8) << "EstimateRunTimeForInstruction: " << instr->name();
int64_t flops = cost_analysis->flop_count(*instr);
int64_t bytes_written = cost_analysis->output_bytes_accessed(*instr);
std::optional<HloFusionAnalysis> local_analysis;
if (!config.fusion_analysis_cache) {
local_analysis = HloFusionAnalysis::Create(*instr, device_info);
}
const auto& fusion_analysis = config.fusion_analysis_cache
? config.fusion_analysis_cache->Get(*instr)
: local_analysis.value();
LaunchDimensions launch_dimensions =
EstimateFusionLaunchDimensions(fusion_analysis);
int64_t num_blocks = launch_dimensions.num_blocks();
absl::Duration compute_time =
ComputeTime(device_info, flops, num_blocks,
launch_dimensions.num_threads_per_block());
CoalescingAnalysis coalescing_analysis(instr, instr->operands(),
fusion_analysis);
absl::Duration read_time;
int64_t bytes_read = 0;
for (const auto [operand_id, operand] : llvm::enumerate(instr->operands())) {
int64_t operand_size = cost_analysis->GetShapeSize(operand->shape());
int64_t n_bytes_total =
GetOperandBytesAccessed(cost_analysis, instr, operand);
int64_t n_bytes_net = std::min(operand_size, n_bytes_total);
bytes_read += n_bytes_total;
bool coalesced = coalescing_analysis.IsReadCoalesced(operand);
VLogOperandRead(operand, n_bytes_total, n_bytes_net, coalesced);
read_time += ReadTimeWithDRAMHeuristic(
device_info, num_blocks, n_bytes_net, n_bytes_total,
operand->shape().element_type(), coalesced);
}
absl::Duration write_time = WriteTime(device_info, bytes_written);
absl::Duration exec_time = CombineComputeAndMemoryAccessTime(
compute_time, read_time + write_time, config);
EstimateRunTimeData runtime_data = {flops, bytes_read, bytes_written,
read_time, write_time, compute_time,
exec_time};
VLOG(3) << "Runtime data for HLO: " << instr->name() << "\n"
<< launch_dimensions.ToString() << "\n"
<< runtime_data.ToString();
return runtime_data;
}
EstimateRunTimeData
GpuPerformanceModel::EstimateRunTimeForInstructionCached(
const HloInstruction* instr, const se::DeviceDescription& device_info,
const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config) {
if (config.gpu_performance_model_cache) {
if (auto cached_result = config.gpu_performance_model_cache->Get(*instr)) {
return *cached_result;
}
}
auto runtime_data =
EstimateRunTimeForInstruction(instr, device_info, cost_analysis, config);
if (config.gpu_performance_model_cache) {
config.gpu_performance_model_cache->Set(*instr, runtime_data);
}
return runtime_data;
}
absl::Duration GpuPerformanceModel::EstimateUnfusedExecTime(
const HloInstruction* producer, const EstimateRunTimeData& producer_runtime,
const se::DeviceDescription& device_info,
const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config,
absl::Span<const HloInstruction* const> fused_consumers) {
absl::Duration time_unfused =
kKernelLaunchOverhead * (fused_consumers.size() + 1) +
producer_runtime.exec_time;
for (const HloInstruction* fused_consumer : fused_consumers) {
VLOG(8) << "Unfused consumer: " << fused_consumer->name();
float utilization_by_this_consumer =
GetOperandUtilization(cost_analysis, fused_consumer, producer);
std::optional<HloFusionAnalysis> local_analysis;
if (!config.fusion_analysis_cache) {
local_analysis = HloFusionAnalysis::Create(*fused_consumer, device_info);
}
const auto& analysis_unfused =
config.fusion_analysis_cache
? config.fusion_analysis_cache->Get(*fused_consumer)
: local_analysis.value();
LaunchDimensions launch_dimensions_unfused =
EstimateFusionLaunchDimensions(analysis_unfused);
int64_t n_bytes_total = std::llround(producer_runtime.bytes_written *
utilization_by_this_consumer);
int64_t n_bytes_net =
std::min(producer_runtime.bytes_written, n_bytes_total);
auto read_time_unfused =
ReadTime(device_info, launch_dimensions_unfused.num_blocks(),
n_bytes_net, n_bytes_total);
VLOG(10) << " Read time unfused: " << read_time_unfused;
time_unfused += read_time_unfused;
}
return time_unfused;
}
absl::Duration GpuPerformanceModel::EstimateRunTimeForFusion(
const HloInstruction* producer, const HloInstruction* consumer,
const EstimateRunTimeData& producer_runtime,
const EstimateRunTimeData& consumer_runtime,
const se::DeviceDescription& device_info,
const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config) {
VLOG(8) << "EstimateRunTimeForFusion, producer: " << producer->name()
<< " consumer: " << consumer->name();
if (producer_runtime.IsInfinite() || consumer_runtime.IsInfinite()) {
return absl::InfiniteDuration();
}
float utilization_by_this_consumer = 0;
for (int64_t i = 0; i < consumer->operand_count(); ++i) {
if (consumer->operand(i) == producer ||
(consumer->operand(i)->opcode() == HloOpcode::kGetTupleElement &&
consumer->operand(i)->operand(0) == producer)) {
utilization_by_this_consumer +=
cost_analysis->operand_utilization(*consumer, i);
}
}
std::optional<HloFusionAnalysis> local_analysis_fused;
if (!config.fusion_analysis_cache) {
local_analysis_fused =
HloFusionAnalysis::Create(*producer, *consumer, device_info);
}
const auto& fusion_analysis =
config.fusion_analysis_cache
? config.fusion_analysis_cache->Get(*producer, *consumer)
: local_analysis_fused.value();
LaunchDimensions launch_dimensions =
EstimateFusionLaunchDimensions(fusion_analysis);
int64_t flops = producer_runtime.flops * utilization_by_this_consumer +
consumer_runtime.flops;
absl::Duration compute_time =
ComputeTime(device_info, flops, launch_dimensions.num_blocks(),
launch_dimensions.num_threads_per_block());
auto fusion_operands = fusion_analysis.fusion().GetParameters();
CoalescingAnalysis coalescing_analysis(producer, consumer, fusion_operands,
fusion_analysis);
absl::Duration read_time;
int64_t bytes_read = 0;
for (const auto* operand : fusion_operands) {
int64_t operand_size = cost_analysis->GetShapeSize(operand->shape());
int64_t n_bytes_total = GetSharedOperandBytesAccessed(
cost_analysis, producer, consumer, operand);
int64_t n_bytes_net = std::min(operand_size, n_bytes_total);
bytes_read += n_bytes_total;
bool coalesced = coalescing_analysis.IsReadCoalesced(operand);
VLogOperandRead(operand, n_bytes_total, n_bytes_net, coalesced);
read_time += ReadTimeWithDRAMHeuristic(
device_info, launch_dimensions.num_blocks(), n_bytes_net, n_bytes_total,
operand->shape().element_type(), coalesced);
}
auto exec_time = CombineComputeAndMemoryAccessTime(
compute_time, read_time + consumer_runtime.write_time, config);
VLOG(3) << "Runtime data for producer-consumer fusion:\n"
<< " producer: " << producer->name() << "\n"
<< " consumer: " << consumer->name() << "\n"
<< launch_dimensions.ToString() << "\n"
<< EstimateRunTimeData{flops,
bytes_read,
consumer_runtime.bytes_written,
read_time,
consumer_runtime.write_time,
compute_time,
exec_time}
.ToString();
return exec_time;
}
absl::Duration GpuPerformanceModel::EstimateRunTimeForFusionCached(
const HloInstruction* producer, const HloInstruction* consumer,
const EstimateRunTimeData& producer_runtime,
const EstimateRunTimeData& consumer_runtime,
const se::DeviceDescription& device_info,
const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config) {
if (config.gpu_performance_model_cache) {
if (auto fusion_runtime =
config.gpu_performance_model_cache->Get(*producer, *consumer)) {
return *fusion_runtime;
}
}
auto fusion_runtime = EstimateRunTimeForFusion(
producer, consumer, producer_runtime, consumer_runtime, device_info,
cost_analysis, config);
if (config.gpu_performance_model_cache) {
config.gpu_performance_model_cache->Set(*producer, *consumer,
fusion_runtime);
}
return fusion_runtime;
}
absl::Duration GpuPerformanceModel::EstimateFusedExecTime(
const HloInstruction* producer, const EstimateRunTimeData& producer_runtime,
const se::DeviceDescription& device_info,
const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config,
absl::Span<const HloInstruction* const> fused_consumers,
bool multi_output) {
absl::Duration exec_time_fused =
kKernelLaunchOverhead * fused_consumers.size();
for (auto [idx, fused_consumer] : llvm::enumerate(fused_consumers)) {
VLOG(8) << "Fused consumer: " << fused_consumer->name();
float utilization_by_this_consumer = cost_analysis->operand_utilization(
*fused_consumer, fused_consumer->operand_index(producer));
std::optional<HloFusionAnalysis> local_analysis_fused;
if (!config.fusion_analysis_cache) {
local_analysis_fused =
HloFusionAnalysis::Create(*producer, *fused_consumer, device_info);
}
const auto& analysis_fused =
config.fusion_analysis_cache
? config.fusion_analysis_cache->Get(*producer, *fused_consumer)
: local_analysis_fused.value();
LaunchDimensions launch_dimensions_fused =
EstimateFusionLaunchDimensions(analysis_fused);
absl::Duration compute_time_by_this_consumer = ComputeTime(
device_info, producer_runtime.flops * utilization_by_this_consumer,
launch_dimensions_fused.num_blocks(),
launch_dimensions_fused.num_threads_per_block());
absl::Duration input_access_time_by_this_consumer = ProducerInputAccessTime(
cost_analysis, device_info, launch_dimensions_fused.num_blocks(),
producer, analysis_fused, config, fused_consumer);
VLOG(10) << " Compute time by consumer: " << compute_time_by_this_consumer;
VLOG(10) << " Input access time by consumer: "
<< input_access_time_by_this_consumer;
exec_time_fused += CombineComputeAndMemoryAccessTime(
compute_time_by_this_consumer, input_access_time_by_this_consumer,
config);
}
if (multi_output) {
exec_time_fused += producer_runtime.write_time;
}
return exec_time_fused;
}
GpuPerformanceModel::RunTimes
GpuPerformanceModel::EstimateRunTimesForPriorityFusion(
const HloInstruction* producer, const se::DeviceDescription& device_info,
const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config,
absl::Span<const HloInstruction* const> fused_consumers,
bool multi_output) {
auto cache_result = config.gpu_performance_model_cache->Get(*producer);
CHECK(cache_result.has_value());
EstimateRunTimeData producer_runtime = *cache_result;
absl::Duration time_unfused =
kKernelLaunchOverhead * (fused_consumers.size() + 1) +
producer_runtime.exec_time;
absl::Duration time_fused = kKernelLaunchOverhead * fused_consumers.size();
for (auto fused_consumer : fused_consumers) {
VLOG(8) << "Fused consumer: " << fused_consumer->name();
auto cache_result =
config.gpu_performance_model_cache->Get(*fused_consumer);
CHECK(cache_result.has_value());
EstimateRunTimeData consumer_runtime = *cache_result;
time_unfused += consumer_runtime.exec_time;
time_fused += EstimateRunTimeForFusionCached(
producer, fused_consumer, producer_runtime, consumer_runtime,
device_info, cost_analysis, config);
}
if (multi_output) {
time_fused += producer_runtime.write_time;
}
if (VLOG_IS_ON(8)) {
LOG(INFO) << "Consumer count: " << fused_consumers.size();
LOG(INFO) << "Unfused time: " << time_unfused;
LOG(INFO) << "Fused time: " << time_fused;
}
return {time_unfused, time_fused};
}
GpuPerformanceModel::RunTimes GpuPerformanceModel::EstimateRunTimes(
const HloInstruction* producer, const se::DeviceDescription& device_info,
const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config,
absl::Span<const HloInstruction* const> fused_consumers,
bool multi_output) {
VLOG(8) << "Producer: " << producer->name();
if (producer->opcode() == HloOpcode::kFusion) {
VLOG(10) << producer->fused_instructions_computation()->ToString();
}
EstimateRunTimeData producer_runtime = EstimateRunTimeForInstructionCached(
producer, device_info, cost_analysis, config);
absl::Duration time_unfused =
EstimateUnfusedExecTime(producer, producer_runtime, device_info,
cost_analysis, config, fused_consumers);
absl::Duration time_fused = EstimateFusedExecTime(
producer, producer_runtime, device_info, cost_analysis, config,
fused_consumers, multi_output);
if (VLOG_IS_ON(8)) {
LOG(INFO) << "Consumer count: " << fused_consumers.size();
LOG(INFO) << "Unfused time: " << time_unfused;
LOG(INFO) << "Fused time: " << time_fused;
}
return {time_unfused, time_fused};
}
void GpuPerformanceModel::RecordEstimatedRunTime(
HloInstruction* instruction, const se::DeviceDescription& device_info,
const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config) {
DCHECK(Cast<const HloFusionInstruction>(instruction)) << "expected fusion";
DCHECK(cost_analysis != nullptr) << "expected cost analysis";
EstimateRunTimeData data = EstimateRunTimeForInstructionCached(
instruction, device_info, cost_analysis, config);
double cycles =
absl::ToDoubleNanoseconds(data.exec_time) * device_info.clock_rate_ghz();
auto gpu_config = instruction->backend_config<GpuBackendConfig>();
TF_CHECK_OK(gpu_config.status()) << instruction->ToString();
auto reification_cost =
gpu_config->mutable_fusion_backend_config()->mutable_reification_cost();
reification_cost->set_end_to_end_cycles(cycles);
reification_cost->set_compute_time_us(
absl::ToDoubleMicroseconds(data.compute_time));
reification_cost->set_memory_access_time_us(
absl::ToDoubleMicroseconds(data.read_time + data.write_time));
reification_cost->set_exec_time_us(
absl::ToDoubleMicroseconds(data.exec_time));
TF_CHECK_OK(instruction->set_backend_config(*gpu_config));
VLOG(8) << "RecordEstimatedRunTime: " << instruction->ToString();
}
}
} | #include "xla/service/gpu/model/gpu_performance_model.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/model/fusion_analysis_cache.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_indexing_performance_model.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class GpuPerformanceModelTest : public HloTestBase {
GpuHloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
public:
GpuPerformanceModel::RunTimes EstimateRunTimesDefault(
const HloInstruction* producer,
std::vector<HloInstruction*> fused_consumers = {}) {
return GpuPerformanceModel::EstimateRunTimes(
producer, device_info_, &analysis_,
GpuPerformanceModelOptions::Default(), fused_consumers);
}
GpuPerformanceModel::RunTimes EstimateRunTimesForPriorityFusion(
const HloInstruction* producer,
std::vector<HloInstruction*> fused_consumers = {}) {
auto config = GpuPerformanceModelOptions::PriorityFusion(
&fusion_analysis_cache_, &gpu_performance_model_cache_);
auto runtime_data = GpuPerformanceModel::EstimateRunTimeForInstruction(
producer, device_info_, &analysis_, config);
gpu_performance_model_cache_.Set(*producer, runtime_data);
for (auto consumer : fused_consumers) {
auto runtime_data = GpuPerformanceModel::EstimateRunTimeForInstruction(
consumer, device_info_, &analysis_, config);
gpu_performance_model_cache_.Set(*consumer, runtime_data);
}
return GpuPerformanceModel::EstimateRunTimesForPriorityFusion(
producer, device_info_, &analysis_, config, fused_consumers);
}
mlir::MLIRContext mlir_context_;
GpuHloCostAnalysis::Options options_{ShapeSizeBytesFunction(),
{},
{},
true};
se::DeviceDescription device_info_{TestGpuDeviceInfo::RTXA6000DeviceInfo()};
HloFusionAnalysisCache fusion_analysis_cache_{device_info_};
GpuHloCostAnalysis analysis_{options_, device_info_};
GpuPerformanceModelCache gpu_performance_model_cache_;
GpuPerformanceModelWithIndexingAnalysis indexing_cost_model_{
&device_info_, &fusion_analysis_cache_, ShapeSizeBytesFunction(),
&mlir_context_};
GpuPerformanceModelTest() : HloTestBase() {}
};
TEST_F(GpuPerformanceModelTest, LargeWrite) {
absl::string_view hlo_string = R"(
HloModule m
f {
c0 = f32[] constant(0)
ROOT b0 = f32[10000000] broadcast(c0)
}
ENTRY e {
ROOT r.1 = f32[10000000] fusion(), kind=kLoop, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto t = EstimateRunTimesDefault(root);
EXPECT_NEAR(absl::ToInt64Microseconds(t.time_unfused), 53, 10);
auto prio_t = EstimateRunTimesForPriorityFusion(root);
EXPECT_NEAR(absl::ToInt64Microseconds(prio_t.time_unfused), 53, 10);
auto indexing_t = indexing_cost_model_.EstimateRunTimes(root);
EXPECT_NEAR(absl::ToInt64Microseconds(indexing_t.time_unfused), 53, 10);
}
TEST_F(GpuPerformanceModelTest, SmallReadWrite) {
absl::string_view hlo_string = R"(
HloModule m
f {
p0 = f32[1000] parameter(0)
p1 = f32[1000] parameter(1)
ROOT b0 = f32[1000] add(p0, p1)
}
ENTRY e {
p0 = f32[1000] parameter(0)
p1 = f32[1000] parameter(1)
ROOT r.1 = f32[1000] fusion(p0, p1), kind=kLoop, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(root->Accept(&analysis_));
auto t = EstimateRunTimesDefault(root);
EXPECT_NEAR(absl::ToInt64Microseconds(t.time_unfused), 1, 1);
GpuPerformanceModel::RecordEstimatedRunTime(
root, device_info_, &analysis_, GpuPerformanceModelOptions::Default());
auto reification_cost = root->backend_config<GpuBackendConfig>()
->fusion_backend_config()
.reification_cost();
EXPECT_NEAR(reification_cost.end_to_end_cycles(), 38.4, 0.1);
EXPECT_NEAR(reification_cost.exec_time_us(), 0, 1);
auto indexing_t = indexing_cost_model_.EstimateRunTimes(root);
EXPECT_NEAR(absl::ToInt64Microseconds(indexing_t.time_unfused), 1, 1);
}
TEST_F(GpuPerformanceModelTest, LargeReadWrite) {
absl::string_view hlo_string = R"(
HloModule m
f {
p0 = f32[10000000] parameter(0)
p1 = f32[10000000] parameter(1)
ROOT a0 = f32[10000000] add(p0, p1)
}
ENTRY e {
p0 = f32[10000000] parameter(0)
p1 = f32[10000000] parameter(1)
ROOT r.1 = f32[10000000] fusion(p0, p1), kind=kLoop, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(root->Accept(&analysis_));
auto t = EstimateRunTimesDefault(root);
EXPECT_NEAR(absl::ToInt64Microseconds(t.time_unfused), 175, 30);
GpuPerformanceModel::RecordEstimatedRunTime(
root, device_info_, &analysis_, GpuPerformanceModelOptions::Default());
auto reification_cost = root->backend_config<GpuBackendConfig>()
->fusion_backend_config()
.reification_cost();
EXPECT_NEAR(reification_cost.end_to_end_cycles(), 220284, 100);
EXPECT_NEAR(reification_cost.exec_time_us(), 156, 10);
EXPECT_NEAR(reification_cost.compute_time_us(), 1, 1);
EXPECT_NEAR(reification_cost.memory_access_time_us(), 156, 10);
}
TEST_F(GpuPerformanceModelTest, L1CacheEffect) {
absl::string_view hlo_string = R"(
HloModule m
f {
p0 = f32[10000] parameter(0)
bc0 = f32[10000,1000] broadcast(p0), dimensions={0}
b0 = f32[10000000] bitcast(bc0)
p1 = f32[10000000] parameter(1)
ROOT a0 = f32[10000000] add(b0, p1)
}
ENTRY e {
p0 = f32[10000] parameter(0)
p1 = f32[10000000] parameter(1)
ROOT r.1 = f32[10000000] fusion(p0, p1), kind=kLoop, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(root->Accept(&analysis_));
auto t = EstimateRunTimesDefault(root);
EXPECT_NEAR(absl::ToInt64Microseconds(t.time_unfused), 118, 12);
}
TEST_F(GpuPerformanceModelTest, L2CacheEffect) {
absl::string_view hlo_string = R"(
HloModule m
f {
p0 = f32[1000000] parameter(0)
bc0 = f32[1000000,10] broadcast(p0), dimensions={0}
b0 = f32[10000000] bitcast(bc0)
p1 = f32[10000000] parameter(1)
ROOT a0 = f32[10000000] add(b0, p1)
}
ENTRY e {
p0 = f32[1000000] parameter(0)
p1 = f32[10000000] parameter(1)
ROOT r.1 = f32[10000000] fusion(p0, p1), kind=kLoop, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(root->Accept(&analysis_));
auto t = EstimateRunTimesDefault(root);
EXPECT_NEAR(absl::ToInt64Microseconds(t.time_unfused), 123, 12);
}
TEST_F(GpuPerformanceModelTest, UnusedParameter) {
Shape shape = ShapeUtil::MakeShape(F32, {100000});
auto module = std::make_unique<HloModule>("m", HloModuleConfig{});
HloComputation::Builder b("b");
auto p0 = b.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
auto p1 = b.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloComputation::Builder sub_builder("subcomp");
HloInstruction* p0f = sub_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "p0f"));
HloInstruction* p1f = sub_builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "p1f"));
ASSERT_NE(p1f, nullptr);
sub_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0f));
HloComputation* subcomp = module->AddEmbeddedComputation(sub_builder.Build());
auto fusion = HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kLoop, {p0, p1}, subcomp);
b.AddInstruction(std::move(fusion));
module->AddEntryComputation(b.Build());
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto t = EstimateRunTimesDefault(root);
EXPECT_NEAR(absl::ToInt64Microseconds(t.time_unfused), 1, 1);
}
TEST_F(GpuPerformanceModelTest, ComputeBoundReducesWithSameLaunchDimensions) {
absl::string_view small_large_reduce_hlo = R"(
HloModule testmodule
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
log0 = f32[] log(p0)
log1 = f32[] log(log0)
log2 = f32[] log(log1)
log3 = f32[] log(log2)
log4 = f32[] log(log3)
ROOT max = f32[] maximum(log4, p1)
}
ENTRY fusion {
c = f32[] constant(-inf)
p0 = f32[150,32,128] parameter(0)
reduce.1 = f32[150,32] reduce(p0, c), dimensions={2}, to_apply=max
ROOT reduce.2 = f32[150] reduce(reduce.1, c), dimensions={1}, to_apply=max
}
)";
absl::string_view large_small_reduce_hlo = R"(
HloModule testmodule
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
log0 = f32[] log(p0)
log1 = f32[] log(log0)
log2 = f32[] log(log1)
log3 = f32[] log(log2)
log4 = f32[] log(log3)
ROOT max = f32[] maximum(log4, p1)
}
ENTRY fusion {
c = f32[] constant(-inf)
p0 = f32[150,128,32] parameter(0)
reduce.1 = f32[150,128] reduce(p0, c), dimensions={2}, to_apply=max
ROOT reduce.2 = f32[150] reduce(reduce.1, c), dimensions={1}, to_apply=max
}
)";
auto run = [&](absl::string_view hlo_text)
-> absl::StatusOr<GpuPerformanceModel::RunTimes> {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_text));
GpuHloCostAnalysis analysis(options_, device_info_);
TF_RETURN_IF_ERROR(module->entry_computation()->Accept(&analysis));
auto* producer =
module->entry_computation()->GetInstructionWithName("reduce.1");
std::vector<HloInstruction*> consumers{
module->entry_computation()->GetInstructionWithName("reduce.2")};
return EstimateRunTimesDefault(producer, consumers);
};
TF_ASSERT_OK_AND_ASSIGN(auto large_small_reduce_runtime,
run(small_large_reduce_hlo));
TF_ASSERT_OK_AND_ASSIGN(auto small_large_reduce_runtime,
run(large_small_reduce_hlo));
EXPECT_NEAR(absl::ToInt64Microseconds(large_small_reduce_runtime.time_fused),
absl::ToInt64Microseconds(small_large_reduce_runtime.time_fused),
2);
}
TEST_F(GpuPerformanceModelTest, FusingTransposeIntoReduceIsSlow) {
constexpr absl::string_view kHlo = R"(
HloModule testmodule
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
ENTRY fusion {
c = f32[] constant(-inf)
p0 = f32[1500,32,128] parameter(0)
transpose.1 = f32[1500,128,32] transpose(p0), dimensions={0,2,1}
ROOT reduce.1 = f32[1500,32] reduce(transpose.1, c), dimensions={1}, to_apply=max
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto* producer =
module->entry_computation()->GetInstructionWithName("transpose.1");
std::vector<HloInstruction*> consumers{
module->entry_computation()->GetInstructionWithName("reduce.1")};
auto t = EstimateRunTimesForPriorityFusion(producer, consumers);
EXPECT_NEAR(absl::ToInt64Microseconds(t.time_unfused), 105, 10);
EXPECT_NEAR(absl::ToInt64Microseconds(t.time_fused), 514, 10);
}
TEST_F(GpuPerformanceModelTest,
FusingTransposeMultiOutputFusionIntoReduceIsSlow) {
constexpr absl::string_view kHlo = R"(
HloModule testmodule
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
transpose_fusion {
param0 = f32[1500,32,128] parameter(0)
transpose.1 = f32[1500,128,32] transpose(param0), dimensions={0,2,1}
ROOT res = (f32[1500,128,32]) tuple(transpose.1)
}
ENTRY fusion {
c = f32[] constant(-inf)
p0 = f32[1500,32,128] parameter(0)
fusion = (f32[1500,128,32]) fusion(p0), kind=kInput, calls=transpose_fusion
gte = f32[1500,128,32] get-tuple-element(fusion), index=0
ROOT reduce.1 = f32[1500,32] reduce(gte, c), dimensions={1}, to_apply=max
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto* producer =
module->entry_computation()->GetInstructionWithName("fusion");
std::vector<HloInstruction*> consumers{
module->entry_computation()->GetInstructionWithName("reduce.1")};
auto t = EstimateRunTimesForPriorityFusion(producer, consumers);
EXPECT_NEAR(absl::ToInt64Microseconds(t.time_unfused), 105, 10);
EXPECT_NEAR(absl::ToInt64Microseconds(t.time_fused), 514, 10);
}
TEST_F(GpuPerformanceModelTest, FusingNonMinorTransposeIntoReduceIsFast) {
constexpr absl::string_view kHlo = R"(
HloModule testmodule
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
ENTRY fusion {
c = f32[] constant(-inf)
p0 = f32[1500,32,128]{1,2,0} parameter(0)
transpose.1 = f32[1500,128,32]{2,0,1} transpose(p0), dimensions={0,2,1}
ROOT reduce.1 = f32[1500,32] reduce(transpose.1, c), dimensions={1}, to_apply=max
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto* producer =
module->entry_computation()->GetInstructionWithName("transpose.1");
std::vector<HloInstruction*> consumers{
module->entry_computation()->GetInstructionWithName("reduce.1")};
auto t = EstimateRunTimesDefault(producer, consumers);
EXPECT_LT(t.time_fused, t.time_unfused);
auto prio_t = EstimateRunTimesForPriorityFusion(producer, consumers);
EXPECT_LT(prio_t.time_fused, prio_t.time_unfused);
}
TEST_F(GpuPerformanceModelTest, DusScalesWithUpdates) {
constexpr absl::string_view kHlo = R"(
HloModule testmodule
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
fusion.1 {
p0 = f32[1073741824] parameter(0)
p1 = f32[1024,1048576] parameter(1)
p2 = s32[] parameter(2)
c0 = f32[] constant(0)
r = f32[1024] reduce(p1, c0), dimensions={1}, to_apply=max
ROOT dus.1 = f32[1073741824] dynamic-update-slice(p0, r, p2)
}
fusion.2 {
p0 = f32[1024] parameter(0)
p1 = f32[1024,1048576] parameter(1)
p2 = s32[] parameter(2)
c0 = f32[] constant(0)
r = f32[1024] reduce(p1, c0), dimensions={1}, to_apply=max
ROOT dus.1 = f32[1024] dynamic-update-slice(p0, r, p2)
}
ENTRY main {
p0 = f32[1073741824] parameter(0)
p1 = f32[1024,1048576] parameter(1)
p2 = s32[] parameter(2)
p3 = f32[1024] parameter(3)
dus1 = f32[1073741824] fusion(p0, p1, p2), kind=kInput, calls=fusion.1
dus2 = f32[1024] fusion(p3, p1, p2), kind=kInput, calls=fusion.2
ROOT tuple = (f32[1073741824], f32[1024]) tuple(dus1, dus2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto* operand0 = module->entry_computation()->root_instruction()->operand(0);
auto* operand1 = module->entry_computation()->root_instruction()->operand(1);
auto t1 = EstimateRunTimesDefault(operand0);
auto t2 = EstimateRunTimesDefault(operand1);
EXPECT_NEAR(absl::ToInt64Microseconds(t1.time_unfused),
absl::ToInt64Microseconds(t2.time_unfused), 10);
auto prio_t1 = EstimateRunTimesForPriorityFusion(operand0);
auto prio_t2 = EstimateRunTimesForPriorityFusion(operand1);
EXPECT_NEAR(absl::ToInt64Microseconds(prio_t1.time_unfused),
absl::ToInt64Microseconds(prio_t2.time_unfused), 10);
}
TEST_F(GpuPerformanceModelTest, EqualCostBeforeAndAfterFusion) {
absl::string_view hlo_string = R"(
HloModule m
f1 {
p0 = f32[4194304] parameter(0)
p1 = f32[4194304] parameter(1)
ROOT tmp_3 = f32[4194304] multiply(f32[4194304] p0, f32[4194304] p1)
}
e1 {
p0 = f32[4194304] parameter(0)
p1 = f32[4194304] parameter(1)
f.1 = f32[4194304] fusion(f32[4194304] p0, f32[4194304] p1), kind=kLoop, calls=f1
ROOT r.1 = f32[4194304] tanh(f32[4194304] f.1)
}
f2 {
p0 = f32[4194304] parameter(0)
p1 = f32[4194304] parameter(1)
mul = f32[4194304] multiply(f32[4194304] p0, f32[4194304] p1)
ROOT res = f32[4194304] tanh(f32[4194304] mul)
}
ENTRY e2 {
p0 = f32[4194304] parameter(0)
p1 = f32[4194304] parameter(1)
ROOT f.2 = f32[4194304] fusion(f32[4194304] p0, f32[4194304] p1), kind=kLoop, calls=f2
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* computation_without_fusion =
module->GetComputationWithName("e1");
ASSERT_IS_OK(computation_without_fusion->Accept(&analysis_));
HloInstruction* consumer = computation_without_fusion->root_instruction();
const HloInstruction* producer = consumer->operand(0);
auto t1 = EstimateRunTimesForPriorityFusion(producer, {consumer});
HloComputation* computation_with_fusion =
module->GetComputationWithName("e2");
ASSERT_IS_OK(computation_with_fusion->Accept(&analysis_));
HloInstruction* root_with_fusion =
computation_with_fusion->root_instruction();
auto t2 = EstimateRunTimesForPriorityFusion(root_with_fusion);
EXPECT_EQ(t1.time_fused, t2.time_unfused);
}
TEST_F(GpuPerformanceModelTest, DoNotFuseDivideIntoSmallReduce) {
constexpr absl::string_view kHlo = R"(
HloModule testmodule
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
ENTRY fusion {
c = f32[] constant(0)
p0 = f32[3072] parameter(0)
p1 = f32[] parameter(1)
reduce = f32[] reduce(p0, c), dimensions={0}, to_apply=add
ROOT divide = f32[] divide(reduce, p1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto* producer =
module->entry_computation()->GetInstructionWithName("reduce");
std::vector<HloInstruction*> consumers{
module->entry_computation()->GetInstructionWithName("divide")};
auto t = EstimateRunTimesForPriorityFusion(producer, consumers);
EXPECT_LT(t.time_unfused, t.time_fused);
}
TEST_F(GpuPerformanceModelTest, PreferFusingExpensiveInstructionsIntoProducer) {
constexpr absl::string_view kHlo = R"(
HloModule testmodule
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fused_computation.0 {
p0 = f32[4,8,8] parameter(0)
bc = f32[1,4,1424,8,8] broadcast(p0), dimensions={1,3,4}
p1 = f32[1,4,1424,8,8] parameter(1)
ROOT sub = f32[1,4,1424,8,8] subtract(bc, p1)
}
fused_computation.1 {
p0 = f32[1,4,1424,8,8] parameter(0)
bc = f32[4,1424,8,8] bitcast(p0)
c0 = f32[] constant(0)
ROOT reduce = f32[4,8,8] reduce(bc, c0), to_apply=add, dimensions={1}
}
ENTRY fusion {
p0 = f32[4,8,8] parameter(0)
p1 = f32[1,4,1424,8,8] parameter(1)
fusion.0 = f32[1,4,1424,8,8] fusion(p0, p1), kind=kLoop, calls=fused_computation.0
exp = f32[1,4,1424,8,8] exponential(fusion.0)
ROOT fusion.1 = f32[4,8,8] fusion(exp), kind=kInput, calls=fused_computation.1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto* fusion_0 =
module->entry_computation()->GetInstructionWithName("fusion.0");
auto* exp = module->entry_computation()->GetInstructionWithName("exp");
auto exp_consumer_runtimes =
EstimateRunTimesForPriorityFusion(fusion_0, {exp});
auto exp_producer_runtimes =
EstimateRunTimesForPriorityFusion(exp, exp->users());
auto exp_consumer_priority =
exp_consumer_runtimes.time_unfused - exp_consumer_runtimes.time_fused;
auto exp_producer_priority =
exp_producer_runtimes.time_unfused - exp_producer_runtimes.time_fused;
EXPECT_LT(exp_producer_priority, exp_consumer_priority);
}
TEST_F(GpuPerformanceModelTest, DontFuseExpensiveElementwiseIntoSmallReduce) {
constexpr absl::string_view kHlo = R"(
HloModule testmodule
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fused_computation.0 {
p0 = f32[4,256,32] parameter(0)
tanh = f32[4,256,32] tanh(p0)
c1 = f32[] constant(72)
broadcast = f32[4,256, 32] broadcast(c1), dimensions={}
ROOT mul = f32[4,256,32] multiply(tanh, broadcast)
}
ENTRY fusion {
p0 = f32[4,256,32] parameter(0)
fusion = f32[4,256,32] fusion(p0), kind=kLoop, calls=fused_computation.0
c0 = f32[] constant(0)
ROOT reduce = f32[4,32] reduce(fusion, c0), to_apply=add, dimensions={1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto* fusion = module->entry_computation()->GetInstructionWithName("fusion");
auto* reduce = module->entry_computation()->GetInstructionWithName("reduce");
auto t = EstimateRunTimesForPriorityFusion(fusion, {reduce});
EXPECT_LT(t.time_unfused, t.time_fused);
}
TEST_F(GpuPerformanceModelTest,
EstimateRunTimeForFusion_InfiniteProducer_ReturnsInfinite) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule testmodule
ENTRY fusion {
p0 = f32[32] parameter(0)
exp = f32[32] exponential(p0)
ROOT add = f32[32] add(p0, exp)
})"));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto* producer = module->entry_computation()->GetInstructionWithName("exp");
auto* consumer = module->entry_computation()->GetInstructionWithName("add");
auto config = GpuPerformanceModelOptions::PriorityFusion(
&fusion_analysis_cache_, &gpu_performance_model_cache_);
auto producer_runtime = EstimateRunTimeData::Infinite();
gpu_performance_model_cache_.Set(*producer, producer_runtime);
auto consumer_runtime = GpuPerformanceModel::EstimateRunTimeForInstruction(
consumer, device_info_, &analysis_, config);
auto result = GpuPerformanceModel::EstimateRunTimeForFusion(
producer, consumer, producer_runtime, consumer_runtime, device_info_,
&analysis_, config);
EXPECT_EQ(result, absl::InfiniteDuration());
}
TEST_F(GpuPerformanceModelTest,
EstimateRunTimeForFusion_InfiniteConsumer_ReturnsInfinite) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule testmodule
ENTRY fusion {
p0 = f32[32] parameter(0)
exp = f32[32] exponential(p0)
ROOT add = f32[32] add(p0, exp)
})"));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto* producer = module->entry_computation()->GetInstructionWithName("exp");
auto* consumer = module->entry_computation()->GetInstructionWithName("add");
auto config = GpuPerformanceModelOptions::PriorityFusion(
&fusion_analysis_cache_, &gpu_performance_model_cache_);
auto producer_runtime = GpuPerformanceModel::EstimateRunTimeForInstruction(
producer, device_info_, &analysis_, config);
auto consumer_runtime = EstimateRunTimeData::Infinite();
gpu_performance_model_cache_.Set(*producer, consumer_runtime);
auto result = GpuPerformanceModel::EstimateRunTimeForFusion(
producer, consumer, producer_runtime, consumer_runtime, device_info_,
&analysis_, config);
EXPECT_EQ(result, absl::InfiniteDuration());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/gpu_performance_model.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/gpu_performance_model_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cf625149-027c-4753-b82c-cd10c5a4c99b | cpp | tensorflow/tensorflow | affine_map_evaluator | third_party/xla/xla/service/gpu/model/affine_map_evaluator.cc | third_party/xla/xla/service/gpu/model/affine_map_evaluator_test.cc | #include "xla/service/gpu/model/affine_map_evaluator.h"
#include <cstdint>
#include <vector>
#include "absl/types/span.h"
#include "llvm/Support/MathExtras.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/Support/LLVM.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
namespace {
using llvm::SmallVector;
using mlir::AffineBinaryOpExpr;
using mlir::AffineConstantExpr;
using mlir::AffineDimExpr;
using mlir::AffineExpr;
using mlir::AffineExprKind;
using mlir::AffineMap;
using mlir::AffineSymbolExpr;
}
int64_t EvaluateAffineExpr(AffineExpr expr,
absl::Span<int64_t const> dim_values,
absl::Span<int64_t const> symbol_values) {
AffineExprKind kind = expr.getKind();
if (kind == AffineExprKind::Constant) {
return mlir::cast<AffineConstantExpr>(expr).getValue();
}
if (kind == AffineExprKind::DimId) {
return dim_values[mlir::cast<AffineDimExpr>(expr).getPosition()];
}
if (kind == AffineExprKind::SymbolId) {
return symbol_values[mlir::cast<AffineSymbolExpr>(expr).getPosition()];
}
auto binary_expr = mlir::cast<AffineBinaryOpExpr>(expr);
int64_t lhs =
EvaluateAffineExpr(binary_expr.getLHS(), dim_values, symbol_values);
int64_t rhs =
EvaluateAffineExpr(binary_expr.getRHS(), dim_values, symbol_values);
switch (kind) {
case AffineExprKind::Add:
return lhs + rhs;
case AffineExprKind::Mul:
return lhs * rhs;
case AffineExprKind::FloorDiv:
return llvm::divideFloorSigned(lhs, rhs);
case AffineExprKind::Mod:
return lhs % rhs;
default:
LOG(FATAL) << "Unsupported expression";
}
}
SmallVector<int64_t> EvaluateAffineMap(
AffineMap affine_map, absl::Span<int64_t const> dim_values,
absl::Span<int64_t const> symbol_values) {
CHECK_EQ(affine_map.getNumDims(), dim_values.size());
CHECK_EQ(affine_map.getNumSymbols(), symbol_values.size());
SmallVector<int64_t> results;
results.reserve(affine_map.getNumResults());
for (auto expr : affine_map.getResults()) {
results.push_back(EvaluateAffineExpr(expr, dim_values, symbol_values));
}
return results;
}
}
} | #include "xla/service/gpu/model/affine_map_evaluator.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
using ::mlir::AffineExpr;
using ::mlir::AffineMap;
using ::mlir::bindDims;
using ::mlir::bindSymbols;
using ::testing::ElementsAre;
class AffineMapEvaluator : public HloTestBase {
public:
mlir::MLIRContext mlir_context_;
};
TEST_F(AffineMapEvaluator, EvaluateMap) {
AffineExpr d0, d1, s0, s1;
bindDims(&mlir_context_, d0, d1);
bindSymbols(&mlir_context_, s0, s1);
auto affine_map =
AffineMap::get(2, 2, {d0 + d1.floorDiv(8), s0 + s1 % 16}, &mlir_context_);
auto res = EvaluateAffineMap(affine_map, {1, 2},
{3, 4});
EXPECT_THAT(res, ElementsAre(1, 7));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/affine_map_evaluator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/affine_map_evaluator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9c5dbbb4-0726-4024-a9c7-cec6368a1eba | cpp | tensorflow/tensorflow | symbolic_tile | third_party/xla/xla/service/gpu/model/symbolic_tile.cc | third_party/xla/xla/service/gpu/model/symbolic_tile_test.cc | #include "xla/service/gpu/model/symbolic_tile.h"
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <optional>
#include <ostream>
#include <sstream>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "xla/service/gpu/model/affine_map_evaluator.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/model/indexing_map_serialization.h"
namespace xla {
namespace gpu {
namespace {
using ::llvm::SmallVector;
using ::mlir::AffineConstantExpr;
using ::mlir::AffineDimExpr;
using ::mlir::AffineExpr;
using ::mlir::AffineExprKind;
using ::mlir::AffineMap;
using ::mlir::AffineSymbolExpr;
using ::mlir::getAffineConstantExpr;
using ::mlir::getAffineDimExpr;
using ::mlir::MLIRContext;
using Constraint = ConstraintExpression::Constraint;
using ConjointConstraints = ConstraintExpression::ConjointConstraints;
AffineMap SubstituteAllIndicesAndRangeVarSymbolsWithSameValue(
AffineMap affine_map, AffineExpr value, int num_range_vars) {
CHECK_LE(num_range_vars, affine_map.getNumSymbols());
MLIRContext* mlir_context = affine_map.getContext();
int64_t num_dims = affine_map.getNumDims();
int64_t num_symbols = affine_map.getNumSymbols();
llvm::DenseMap<AffineExpr, AffineExpr> indices;
for (int64_t i = 0; i < num_dims; ++i) {
indices[getAffineDimExpr(i, mlir_context)] = value;
}
for (int64_t i = 0; i < num_range_vars; ++i) {
indices[getAffineSymbolExpr(i, mlir_context)] = value;
}
return simplifyAffineMap(affine_map.replace(indices, num_dims, num_symbols));
}
struct SizeAndStrideExpression {
AffineExpr size;
AffineExpr stride;
ConstraintExpression constraints;
SizeAndStrideExpression(
AffineExpr size, AffineExpr stride,
ConstraintExpression constraints = ConstraintExpression())
: size(std::move(size)),
stride(std::move(stride)),
constraints(std::move(constraints)) {}
};
std::optional<SizeAndStrideExpression> ExtractSizeAndStrideFromMod(
AffineExpr lhs, AffineExpr modulus) {
CHECK(modulus.getKind() == AffineExprKind::Constant);
if (auto tile_size_expr = llvm::dyn_cast<mlir::AffineDimExpr>(lhs)) {
AffineExpr size = tile_size_expr -
mlir::getAffineBinaryOpExpr(AffineExprKind::FloorDiv,
tile_size_expr - 1, modulus) *
modulus;
Interval zero_interval{0, 0};
ConstraintExpression constraints;
constraints.And(
{{tile_size_expr % modulus, zero_interval}});
constraints.Or(
{{modulus % tile_size_expr, zero_interval}});
return SizeAndStrideExpression(
size, getAffineConstantExpr(1, lhs.getContext()),
std::move(constraints));
}
return std::nullopt;
}
std::optional<SizeAndStrideExpression> ExtractSizeAndStrideFromFloorDiv(
AffineExpr num, AffineExpr den) {
if (den.getKind() != AffineExprKind::Constant) {
return std::nullopt;
}
if (auto dim_expr = llvm::dyn_cast<mlir::AffineDimExpr>(num)) {
AffineExpr size = mlir::getAffineBinaryOpExpr(AffineExprKind::FloorDiv,
dim_expr + (den - 1), den);
return SizeAndStrideExpression(
size, getAffineConstantExpr(1, num.getContext()));
}
return std::nullopt;
}
void DestructureSummationImpl(AffineExpr expr,
std::vector<AffineExpr>& summands) {
switch (expr.getKind()) {
case AffineExprKind::Add: {
const auto add = llvm::cast<mlir::AffineBinaryOpExpr>(expr);
DestructureSummationImpl(add.getLHS(), summands);
DestructureSummationImpl(add.getRHS(), summands);
break;
}
default:
summands.push_back(expr);
break;
}
}
std::vector<AffineExpr> DestructureSummation(AffineExpr expr) {
std::vector<AffineExpr> summands;
DestructureSummationImpl(expr, summands);
return summands;
}
std::optional<SizeAndStrideExpression> ExtractSizeAndStride(
AffineExpr strided_indexing, absl::Span<Interval const> dimension_intervals,
absl::Span<Interval const> symbol_intervals);
std::optional<std::vector<SizeAndStrideExpression>>
ExtractSizesAndStridesFromMultivariateSummation(
AffineExpr summation, absl::Span<Interval const> dimension_intervals,
absl::Span<Interval const> symbol_intervals) {
std::vector<AffineExpr> summands = DestructureSummation(summation);
std::vector<SizeAndStrideExpression> sizes_and_strides;
sizes_and_strides.reserve(summands.size());
for (AffineExpr summand : summands) {
std::optional<SizeAndStrideExpression> maybe_size_and_stride =
ExtractSizeAndStride(summand, dimension_intervals, symbol_intervals);
if (!maybe_size_and_stride.has_value()) {
VLOG(1) << "Couldn't extract size and stride from " << ToString(summand);
return std::nullopt;
}
sizes_and_strides.push_back(*maybe_size_and_stride);
}
return sizes_and_strides;
}
AffineExpr CombineSizes(
absl::Span<SizeAndStrideExpression const> sizes_and_strides) {
CHECK(!sizes_and_strides.empty());
AffineExpr product =
getAffineConstantExpr(1, sizes_and_strides[0].size.getContext());
for (const SizeAndStrideExpression& size_and_stride : sizes_and_strides) {
product = product * size_and_stride.size;
}
return product;
}
AffineExpr IfNeqOne(AffineExpr eq_param, AffineExpr true_expr,
AffineExpr false_expr,
int64_t eq_param_inclusive_upper_bound) {
AffineExpr b = getAffineConstantExpr(eq_param_inclusive_upper_bound,
eq_param.getContext());
AffineExpr condition = mlir::getAffineBinaryOpExpr(AffineExprKind::FloorDiv,
b + 1 - eq_param, b);
return condition * false_expr + (1 - condition) * true_expr;
}
void SortByStride(std::vector<SizeAndStrideExpression>& sizes_and_strides,
bool reverse = false) {
absl::c_sort(sizes_and_strides, [&](const SizeAndStrideExpression& sas1,
const SizeAndStrideExpression& sas2) {
int64_t stride1 = llvm::cast<AffineConstantExpr>(sas1.stride).getValue();
int64_t stride2 = llvm::cast<AffineConstantExpr>(sas2.stride).getValue();
if (reverse) {
return stride1 > stride2;
}
return stride1 < stride2;
});
}
std::optional<int64_t> TryGetSizeExpressionRangeSize(
AffineExpr size, absl::Span<Interval const> dimension_intervals) {
if (size.getKind() == AffineExprKind::Constant) {
return llvm::cast<AffineConstantExpr>(size).getValue();
}
CHECK(size.getKind() == AffineExprKind::DimId);
auto dim_position = llvm::dyn_cast<AffineDimExpr>(size).getPosition();
const Interval& interval = dimension_intervals.at(dim_position);
if (interval.lower != 0) {
VLOG(1) << "Attempted to combine strides but got dimension "
<< ToString(size) << " with lower bound " << interval.lower
<< " != 0";
return std::nullopt;
}
return interval.upper + 1;
};
std::optional<AffineExpr> CombineStrides(
std::vector<SizeAndStrideExpression> sizes_and_strides,
absl::Span<Interval const> dimension_intervals) {
CHECK(!sizes_and_strides.empty());
for (const SizeAndStrideExpression& size_and_stride : sizes_and_strides) {
if (size_and_stride.stride.getKind() != AffineExprKind::Constant) {
VLOG(1) << "Attempted to combine non-constant stride: "
<< ToString(size_and_stride.stride);
return std::nullopt;
}
if (size_and_stride.size.getKind() != AffineExprKind::Constant &&
size_and_stride.size.getKind() != AffineExprKind::DimId) {
VLOG(1) << "Attempted to combine strides but got non-constant, "
"non-dimension size "
<< ToString(size_and_stride.size);
return std::nullopt;
}
}
SortByStride(sizes_and_strides);
for (auto [dim_id, size_and_stride] : llvm::enumerate(sizes_and_strides)) {
int64_t stride =
llvm::cast<AffineConstantExpr>(size_and_stride.stride).getValue();
if (dim_id > 0) {
const SizeAndStrideExpression& previous_size_and_stride =
sizes_and_strides[dim_id - 1];
std::optional<int64_t> previous_size_expression_range_size =
TryGetSizeExpressionRangeSize(previous_size_and_stride.size,
dimension_intervals);
if (!previous_size_expression_range_size.has_value()) {
return std::nullopt;
}
int64_t previous_stride =
llvm::cast<AffineConstantExpr>(previous_size_and_stride.stride)
.getValue();
if (*previous_size_expression_range_size * previous_stride != stride) {
VLOG(1) << "Attempted to combine strides but stride did not grow "
<< "exactly as expected: got "
<< *previous_size_expression_range_size << " * "
<< previous_stride << " != " << stride;
return std::nullopt;
}
}
}
MLIRContext* ctx = sizes_and_strides[0].stride.getContext();
AffineExpr nested_if = getAffineConstantExpr(0, ctx);
for (auto size_and_stride_it = sizes_and_strides.rbegin();
size_and_stride_it != sizes_and_strides.rend(); ++size_and_stride_it) {
AffineExpr size = size_and_stride_it->size;
AffineExpr stride = size_and_stride_it->stride;
std::optional<int64_t> size_expression_range_size =
TryGetSizeExpressionRangeSize(size, dimension_intervals);
if (!size_expression_range_size.has_value()) {
return std::nullopt;
}
nested_if = IfNeqOne(size, stride, nested_if, *size_expression_range_size);
}
return nested_if;
}
std::optional<ConjointConstraints>
TryConstructSingleConjointConstraintForDestructuredSummation(
absl::Span<SizeAndStrideExpression const> sizes_and_strides,
absl::Span<Interval const> dimension_intervals, int64_t partial_dim_index,
int64_t num_full_dims) {
CHECK_LE(partial_dim_index + num_full_dims, sizes_and_strides.size());
ConjointConstraints constraints;
Interval one = Interval{1, 1};
int64_t running_size_index = 0;
while (running_size_index < partial_dim_index) {
constraints.push_back(
Constraint{sizes_and_strides[running_size_index].size, one});
++running_size_index;
}
++running_size_index;
while (running_size_index <= partial_dim_index + num_full_dims) {
AffineExpr size_expr = sizes_and_strides[running_size_index].size;
std::optional<int64_t> max_size =
TryGetSizeExpressionRangeSize(size_expr, dimension_intervals);
if (!max_size.has_value()) {
return std::nullopt;
}
constraints.push_back(Constraint{
size_expr, Interval{*max_size, *max_size}});
++running_size_index;
}
while (running_size_index < sizes_and_strides.size()) {
constraints.push_back(
Constraint{sizes_and_strides[running_size_index].size, one});
++running_size_index;
}
return constraints;
}
ConstraintExpression ConstructConstraintExpressionForDestructuredSummation(
std::vector<SizeAndStrideExpression> sizes_and_strides,
absl::Span<Interval const> dimension_intervals) {
SortByStride(sizes_and_strides, true);
ConstraintExpression result;
int64_t num_components = sizes_and_strides.size();
for (int64_t partial_dim_index = 0; partial_dim_index < num_components;
++partial_dim_index) {
for (int64_t num_full_dims = 0;
num_full_dims < num_components - partial_dim_index; ++num_full_dims) {
std::optional<ConjointConstraints> single_conjoint_constraint =
TryConstructSingleConjointConstraintForDestructuredSummation(
sizes_and_strides, dimension_intervals, partial_dim_index,
num_full_dims);
if (!single_conjoint_constraint.has_value()) {
continue;
}
result.Or(std::move(*single_conjoint_constraint));
}
}
if (result.IsAlwaysSatisfied()) {
return ConstraintExpression::GetUnsatisfiableConstraintExpression();
}
return result;
}
std::optional<SizeAndStrideExpression> CombineSizesAndStrides(
std::vector<SizeAndStrideExpression> sizes_and_strides,
absl::Span<Interval const> dimension_intervals) {
CHECK(!sizes_and_strides.empty());
if (VLOG_IS_ON(1)) {
for (const SizeAndStrideExpression& size_and_stride : sizes_and_strides) {
LOG(INFO) << "CombineSizesAndStrides:";
LOG(INFO) << "size: " << ToString(size_and_stride.size)
<< " stride: " << ToString(size_and_stride.stride);
}
}
ConstraintExpression constraints;
for (SizeAndStrideExpression& size_and_stride : sizes_and_strides) {
constraints = ConstraintExpression::And(
std::move(constraints), std::move(size_and_stride.constraints));
}
AffineExpr size = CombineSizes(sizes_and_strides);
std::optional<AffineExpr> stride =
CombineStrides(sizes_and_strides, dimension_intervals);
if (!stride.has_value()) {
return std::nullopt;
}
constraints = ConstraintExpression::And(
std::move(constraints),
ConstructConstraintExpressionForDestructuredSummation(
std::move(sizes_and_strides), dimension_intervals));
return SizeAndStrideExpression(size, *stride, std::move(constraints));
}
std::optional<SizeAndStrideExpression> ExtractSizeAndStride(
AffineExpr strided_indexing, absl::Span<Interval const> dimension_intervals,
absl::Span<Interval const> symbol_intervals) {
MLIRContext* ctx = strided_indexing.getContext();
switch (strided_indexing.getKind()) {
case AffineExprKind::DimId:
return SizeAndStrideExpression(strided_indexing,
getAffineConstantExpr(1, ctx));
case mlir::AffineExprKind::Mul: {
const auto mul = llvm::cast<mlir::AffineBinaryOpExpr>(strided_indexing);
AffineExpr lhs = mul.getLHS();
std::optional<SizeAndStrideExpression> maybe_size_and_stride =
ExtractSizeAndStride(lhs, dimension_intervals, symbol_intervals);
if (!maybe_size_and_stride.has_value()) {
return std::nullopt;
}
return SizeAndStrideExpression(
maybe_size_and_stride->size,
maybe_size_and_stride->stride * mul.getRHS());
}
case mlir::AffineExprKind::Mod: {
auto mod = llvm::cast<mlir::AffineBinaryOpExpr>(strided_indexing);
return ExtractSizeAndStrideFromMod(mod.getLHS(), mod.getRHS());
}
case mlir::AffineExprKind::FloorDiv: {
auto floor_div = llvm::cast<mlir::AffineBinaryOpExpr>(strided_indexing);
return ExtractSizeAndStrideFromFloorDiv(floor_div.getLHS(),
floor_div.getRHS());
}
case mlir::AffineExprKind::Constant:
return SizeAndStrideExpression(getAffineConstantExpr(1, ctx),
getAffineConstantExpr(0, ctx));
case mlir::AffineExprKind::SymbolId: {
auto symbol = llvm::cast<AffineSymbolExpr>(strided_indexing);
const Interval& symbol_interval = symbol_intervals[symbol.getPosition()];
if (symbol_interval.lower != 0) {
return std::nullopt;
}
return SizeAndStrideExpression(
getAffineConstantExpr(symbol_interval.upper + 1, ctx),
getAffineConstantExpr(1, ctx));
}
case mlir::AffineExprKind::Add: {
std::optional<std::vector<SizeAndStrideExpression>>
maybe_sizes_and_strides =
ExtractSizesAndStridesFromMultivariateSummation(
strided_indexing, dimension_intervals, symbol_intervals);
if (!maybe_sizes_and_strides.has_value()) {
return std::nullopt;
}
return CombineSizesAndStrides(std::move(*maybe_sizes_and_strides),
dimension_intervals);
}
case mlir::AffineExprKind::CeilDiv:
break;
};
LOG(FATAL) << "unreachable";
}
AffineExpr SimplifyAffineExpr(const AffineExpr& expr,
const IndexingMap& reference) {
AffineMap tmp_affine_map =
AffineMap::get(reference.GetDimVars().size(),
reference.GetSymbolCount(),
{expr},
reference.GetMLIRContext());
IndexingMap tmp_indexing_map(
std::move(tmp_affine_map),
reference.GetDimVars(),
reference.GetRangeVars(),
reference.GetRTVars(),
reference.GetConstraints());
tmp_indexing_map.Simplify();
CHECK_EQ(tmp_indexing_map.GetAffineMap().getResults().size(), 1);
return tmp_indexing_map.GetAffineMap().getResults().back();
}
std::optional<ConjointConstraints> TryIntersectConjointConstraints(
ConjointConstraints conjunction_1,
const ConjointConstraints& conjunction_2) {
if (conjunction_1.empty()) {
return conjunction_2;
}
if (conjunction_2.empty()) {
return std::move(conjunction_1);
}
ConjointConstraints result = std::move(conjunction_1);
for (const auto& constraint : conjunction_2) {
Constraint* result_it =
llvm::find_if(result, [&](const Constraint& result_constraint) {
return result_constraint.expr == constraint.expr;
});
const auto& [expr, interval] = constraint;
if (result_it != result.end()) {
auto& [result_expr, result_interval] = *result_it;
result_interval = result_interval.Intersect(interval);
if (!result_interval.IsFeasible()) {
VLOG(1) << "Got two incompatible intervals for expression "
<< ToString(expr);
return std::nullopt;
}
} else {
result.push_back(Constraint{expr, interval});
}
}
return result;
}
}
ConstraintExpression ConstraintExpression::And(
ConstraintExpression first, ConstraintExpression second) {
if (!first.is_satisfiable_ || !second.is_satisfiable_) {
return ConstraintExpression::GetUnsatisfiableConstraintExpression();
}
if (first.IsAlwaysSatisfied()) {
return second;
}
if (second.IsAlwaysSatisfied()) {
return first;
}
ConstraintExpression result;
for (ConjointConstraints& conjunction_1 :
first.disjoint_conjoint_constraints_) {
for (ConjointConstraints& conjunction_2 :
second.disjoint_conjoint_constraints_) {
std::optional<ConjointConstraints> maybe_conjunction =
TryIntersectConjointConstraints(conjunction_1, conjunction_2);
if (maybe_conjunction.has_value()) {
result.disjoint_conjoint_constraints_.push_back(
std::move(*maybe_conjunction));
}
}
}
result.is_satisfiable_ = !result.disjoint_conjoint_constraints_.empty();
return result;
}
ConstraintExpression ConstraintExpression::Or(
ConstraintExpression first, ConstraintExpression second) {
if (!first.is_satisfiable_) {
return second;
}
if (!second.is_satisfiable_) {
return first;
}
absl::c_copy(second.disjoint_conjoint_constraints_,
std::back_inserter(first.disjoint_conjoint_constraints_));
return first;
}
void ConstraintExpression::Or(ConjointConstraints conjunction) {
if (conjunction.empty()) {
return;
}
disjoint_conjoint_constraints_.push_back(std::move(conjunction));
is_satisfiable_ = true;
}
void ConstraintExpression::And(ConjointConstraints conjunction) {
if (!is_satisfiable_ || conjunction.empty()) {
return;
}
if (disjoint_conjoint_constraints_.empty()) {
disjoint_conjoint_constraints_.push_back(std::move(conjunction));
return;
}
llvm::SmallVector<ConjointConstraints, 2> new_constraints;
new_constraints.reserve(disjoint_conjoint_constraints_.size());
for (ConjointConstraints& conjunction_2 : disjoint_conjoint_constraints_) {
std::optional<ConjointConstraints> maybe_result =
TryIntersectConjointConstraints(std::move(conjunction_2), conjunction);
if (maybe_result.has_value()) {
new_constraints.push_back(std::move(*maybe_result));
}
}
is_satisfiable_ = !new_constraints.empty();
disjoint_conjoint_constraints_ = std::move(new_constraints);
}
bool ConstraintExpression::IsSatisfiedBy(
absl::Span<const int64_t> parameters) const {
if (IsAlwaysSatisfied()) {
return true;
}
if (!is_satisfiable_) {
return false;
}
bool constraints_are_satisfied = false;
for (const ConstraintExpression::ConjointConstraints& conjunction :
disjoint_conjoint_constraints_) {
bool conjunction_is_satisfied = true;
for (const auto& [constrained_expr, interval] : conjunction) {
int64_t constrained_value =
EvaluateAffineExpr(constrained_expr, parameters);
if (constrained_value < interval.lower ||
constrained_value > interval.upper) {
conjunction_is_satisfied = false;
break;
}
}
constraints_are_satisfied |= conjunction_is_satisfied;
}
return constraints_are_satisfied;
}
std::string ConstraintExpression::ToString() const {
std::stringstream ss;
Print(ss);
return ss.str();
}
void ConstraintExpression::Print(std::ostream& out) const {
if (IsAlwaysSatisfied()) {
out << "always satisfied";
} else if (is_satisfiable()) {
std::vector<std::string> conjunction_strings;
conjunction_strings.reserve(disjoint_conjoint_constraints_.size());
for (const auto& disjunction : disjoint_conjoint_constraints_) {
std::vector<std::string> constraint_strings;
constraint_strings.reserve(disjunction.size());
for (const auto& [expr, interval] : disjunction) {
constraint_strings.push_back(absl::StrCat(xla::gpu::ToString(expr),
" in ", interval.ToString()));
}
std::sort(constraint_strings.begin(), constraint_strings.end());
conjunction_strings.push_back(absl::StrJoin(constraint_strings, " && "));
}
std::sort(conjunction_strings.begin(), conjunction_strings.end());
out << absl::StrJoin(conjunction_strings, " || ");
} else {
out << "unsatisfiable";
}
out << "\n";
}
namespace {
bool IsConstraintAlwaysSatisfied(mlir::AffineExpr expr, Interval interval) {
if (AffineConstantExpr constant = mlir::dyn_cast<AffineConstantExpr>(expr)) {
return interval.Contains(constant.getValue());
}
return false;
}
bool IsConstraintUnsatisfiable(mlir::AffineExpr expr, Interval interval) {
if (!interval.IsFeasible()) {
return true;
}
if (AffineConstantExpr constant = mlir::dyn_cast<AffineConstantExpr>(expr)) {
return !interval.Contains(constant.getValue());
}
return false;
}
struct Unsatisfiable {};
struct AlwaysSatisfied {};
std::variant<Unsatisfiable, AlwaysSatisfied, ConjointConstraints>
SimplifyConjointConstraints(const ConjointConstraints& conjunction) {
ConjointConstraints result;
for (const auto& [expr, interval] : conjunction) {
if (IsConstraintAlwaysSatisfied(expr, interval)) {
continue;
}
if (IsConstraintUnsatisfiable(expr, interval)) {
return Unsatisfiable{};
}
result.push_back(Constraint{expr, interval});
}
if (result.empty()) {
return AlwaysSatisfied{};
}
auto comp = [](const Constraint& a, const Constraint& b) {
if (a.expr != b.expr) {
CHECK_EQ(a.expr.getContext(), b.expr.getContext())
<< "AffineExpr should be from the same MLIRContext.";
return a.expr.getImpl() < b.expr.getImpl();
}
if (a.interval.lower != b.interval.lower) {
return a.interval.lower < b.interval.lower;
}
return a.interval.upper < b.interval.upper;
};
std::sort(result.begin(), result.end(), comp);
return result;
}
ConstraintExpression SimplifyConstraintExpression(
const ConstraintExpression constraint_expression) {
if (!constraint_expression.is_satisfiable() ||
constraint_expression.IsAlwaysSatisfied()) {
return constraint_expression;
}
SmallVector<ConjointConstraints, 2> simplified_disjoint_conjoint_constraints;
for (const auto& conjunction :
constraint_expression.DisjointConjointConstraints()) {
auto simplified_conjunction = SimplifyConjointConstraints(conjunction);
if (std::holds_alternative<Unsatisfiable>(simplified_conjunction)) {
continue;
}
if (std::holds_alternative<AlwaysSatisfied>(simplified_conjunction)) {
return ConstraintExpression();
}
simplified_disjoint_conjoint_constraints.push_back(
std::get<ConjointConstraints>(std::move(simplified_conjunction)));
}
absl::flat_hash_set<ConjointConstraints> unique_conjunctions;
for (const auto& conjunction : simplified_disjoint_conjoint_constraints) {
if (unique_conjunctions.contains(conjunction)) {
continue;
}
unique_conjunctions.insert(std::move(conjunction));
}
auto result = ConstraintExpression::GetUnsatisfiableConstraintExpression();
for (auto& conjoint_constraints : unique_conjunctions) {
result.Or(std::move(conjoint_constraints));
}
return result;
}
}
void ConstraintExpression::Simplify() {
*this = SimplifyConstraintExpression(std::move(*this));
}
std::optional<SymbolicTile> SymbolicTile::FromIndexingMap(
IndexingMap indexing_map) {
VLOG(1) << "SymbolicTile::FromIndexingMap: " << indexing_map;
bool did_simplify = indexing_map.Simplify();
VLOG(1) << "did_simplify: " << did_simplify;
if (indexing_map.GetConstraintsCount() != 0) {
VLOG(1) << "Deriving symbolic tile from indexing map with pre-existing "
<< "constraints might produce spurious constraints. Bailing out. "
<< indexing_map;
return std::nullopt;
}
AffineMap input_affine_map = indexing_map.GetAffineMap();
MLIRContext* mlir_context = input_affine_map.getContext();
std::vector<AffineExpr> offset_expressions =
SubstituteAllIndicesAndRangeVarSymbolsWithSameValue(
input_affine_map, getAffineConstantExpr(0, mlir_context),
indexing_map.GetRangeVarsCount())
.getResults();
for (AffineExpr& expr : offset_expressions) {
expr = SimplifyAffineExpr(expr, indexing_map);
}
ConstraintExpression constraints;
std::vector<AffineExpr> size_expressions;
std::vector<AffineExpr> stride_expressions;
size_expressions.reserve(offset_expressions.size());
stride_expressions.reserve(offset_expressions.size());
for (auto [composite_indexing, offset] :
llvm::zip(input_affine_map.getResults(), offset_expressions)) {
std::optional<SizeAndStrideExpression> maybe_size_and_stride =
ExtractSizeAndStride(SimplifyAffineExpr(composite_indexing - offset,
indexing_map),
indexing_map.GetDimensionBounds(),
indexing_map.GetSymbolBounds());
if (!maybe_size_and_stride.has_value()) {
VLOG(1) << "No size and stride extracted";
return std::nullopt;
}
size_expressions.push_back(maybe_size_and_stride->size);
stride_expressions.push_back(maybe_size_and_stride->stride);
constraints = ConstraintExpression::And(
std::move(constraints), std::move(maybe_size_and_stride->constraints));
}
for (auto [offset, size, stride] :
llvm::zip(offset_expressions, size_expressions, stride_expressions)) {
auto constant = llvm::dyn_cast<AffineConstantExpr>(stride);
if (constant && constant.getValue() < 0) {
offset = offset + size * stride - stride;
stride = -stride;
} else if (!constant) {
VLOG(1) << "Unexpected non-constant stride expression: "
<< xla::gpu::ToString(stride);
}
}
std::vector<IndexingMap::Variable> tile_sizes = indexing_map.GetDimVars();
for (IndexingMap::Variable& tile_size : tile_sizes) {
tile_size.bounds.lower += 1;
tile_size.bounds.upper += 1;
}
std::vector<AffineExpr> results;
absl::c_move(std::move(offset_expressions), std::back_inserter(results));
absl::c_move(std::move(size_expressions), std::back_inserter(results));
absl::c_move(std::move(stride_expressions), std::back_inserter(results));
AffineMap tile_affine_map =
AffineMap::get(tile_sizes.size(),
indexing_map.GetSymbolCount(),
results,
indexing_map.GetMLIRContext());
IndexingMap tile_map(
std::move(tile_affine_map),
std::move(tile_sizes),
indexing_map.GetRangeVars(),
indexing_map.GetRTVars());
tile_map.RemoveUnusedSymbols();
CHECK_EQ(tile_map.GetRangeVarsCount(), 0);
VLOG(1) << "tile_map: " << tile_map;
constraints.Simplify();
return SymbolicTile(std::move(tile_map), std::move(constraints));
}
std::string SymbolicTile::ToString() const {
std::stringstream ss;
Print(ss);
return ss.str();
}
void SymbolicTile::Print(std::ostream& out) const {
out << "Symbolic tile with \n";
out << "\toffset_map: " << offset_map();
out << "\n\tsize_map: " << size_map();
out << "\n\tstride_map: " << stride_map();
const std::vector<IndexingMap::Variable>& rt_vars = tile_map_.GetRTVars();
if (!rt_vars.empty()) {
out << "\n\trt_vars: ";
for (const auto& [index, rt_var] : llvm::enumerate(rt_vars)) {
out << 's' << index << " in " << rt_var.bounds << ", ";
}
}
if (!constraints_.IsAlwaysSatisfied()) {
out << "\n\tconstraints: ";
constraints_.Print(out);
}
}
namespace {
constexpr int kNumComponentsPerTiledDimension = 3;
}
AffineMap SymbolicTile::offset_map() const {
int64_t num_results = tile_map_.GetAffineMap().getResults().size();
CHECK_EQ(num_results % kNumComponentsPerTiledDimension, 0);
int64_t component_size = num_results / kNumComponentsPerTiledDimension;
return tile_map_.GetAffineMap().getSliceMap(0, component_size);
}
AffineMap SymbolicTile::size_map() const {
AffineMap affine_map = tile_map_.GetAffineMap();
int64_t num_results = affine_map.getResults().size();
CHECK_EQ(num_results % kNumComponentsPerTiledDimension, 0);
int64_t component_size = num_results / kNumComponentsPerTiledDimension;
return AffineMap::get(
affine_map.getNumDims(),
affine_map.getNumSymbols() - tile_map_.GetRTVarsCount(),
affine_map.getResults().slice(component_size, component_size),
affine_map.getContext());
}
AffineMap SymbolicTile::stride_map() const {
AffineMap affine_map = tile_map_.GetAffineMap();
int64_t num_results = affine_map.getResults().size();
CHECK_EQ(num_results % kNumComponentsPerTiledDimension, 0);
int64_t component_size = num_results / kNumComponentsPerTiledDimension;
return AffineMap::get(
affine_map.getNumDims(),
affine_map.getNumSymbols() - tile_map_.GetRTVarsCount(),
affine_map.getResults().slice(2 * component_size, component_size),
affine_map.getContext());
}
}
} | #include "xla/service/gpu/model/symbolic_tile.h"
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/SmallVector.h"
#include "xla/service/gpu/model/affine_map_evaluator.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
using ::llvm::SmallVector;
using ::testing::ElementsAre;
using ::testing::ExplainMatchResult;
using ::testing::IsEmpty;
using ::testing::Optional;
using ::testing::SizeIs;
using Constraint = ConstraintExpression::Constraint;
using ConjointConstraints = ConstraintExpression::ConjointConstraints;
MATCHER_P(MatchSymbolicTileString, symbolic_tile_string, "") {
return ExplainMatchResult(
true, ApproximateMatch(symbolic_tile_string, arg.ToString()),
result_listener);
}
MATCHER_P(MatchConstraintExpressionString, constraint_expression_string, "") {
return ExplainMatchResult(
true, ApproximateMatch(constraint_expression_string, arg.ToString()),
result_listener);
}
using SymbolicTileTest = IndexingTestBase;
TEST_F(SymbolicTileTest, CanPropagateTileFromDotOutputToInputs) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[11, 17, 19] parameter(0)
p1 = f32[11, 19, 23] parameter(1)
ROOT dot = f32[11, 17, 23] dot(p0, p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1, d2) -> (0, 0, 0)
size_map: (d0, d1, d2) -> (d0, d1, 19)
stride_map: (d0, d1, d2) -> (1, 1, 1)
)")));
}
TEST_F(SymbolicTileTest, CanPropagateTileThroughTrivialReshape) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[11, 17, 19] parameter(0)
ROOT reshape = f32[1, 11, 17, 19] reshape(p0)
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1, d2, d3) -> (0, 0, 0)
size_map: (d0, d1, d2, d3) -> (d1, d2, d3)
stride_map: (d0, d1, d2, d3) -> (1, 1, 1)
)")));
}
TEST_F(SymbolicTileTest,
CanPropagateTileThroughNonTrivialMergeReshapeFromOutputToInput) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1,8,6,4]{3,2,1,0} parameter(0)
ROOT reshape = f32[48,4]{1,0} reshape(p0)
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1) -> (0, 0, 0, 0)
size_map: (d0, d1) -> (1, (d0 + 5) floordiv 6, d0 - ((d0 - 1) floordiv 6) * 6, d1)
stride_map: (d0, d1) -> (0, 1, 1, 1)
constraints:
6 mod d0 in [0, 0] || d0 mod 6 in [0, 0]
)")));
}
TEST_F(SymbolicTileTest,
CanPropagateTileThroughNonTrivialSplitReshapeFromOutputToInput) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[192,4]{1,0} parameter(0)
ROOT reshape = f32[4,8,6,4]{3,2,1,0} reshape(p0)
}
)"));
std::optional<SymbolicTile> symbolic_tile =
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin());
EXPECT_THAT(symbolic_tile, Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1, d2, d3) -> (0, 0)
size_map: (d0, d1, d2, d3) -> ((d0 * d1) * d2, d3)
stride_map: (d0, d1, d2, d3) ->
(((-d2 + 7) floordiv 6) * (((-d1 + 9) floordiv 8) *
((-((-d0 + 5) floordiv 4) + 1) * 48) +
(-((-d1 + 9) floordiv 8) + 1) * 6) + -((-d2 + 7) floordiv 6) + 1, 1)
constraints: d0 in [1, 1] && d1 in [1, 1] ||
d0 in [1, 1] && d2 in [1, 1] ||
d0 in [1, 1] && d2 in [6, 6] ||
d1 in [1, 1] && d2 in [1, 1] ||
d1 in [8, 8] && d2 in [1, 1] ||
d1 in [8, 8] && d2 in [6, 6]
)")));
EXPECT_THAT(EvaluateAffineMap(symbolic_tile->stride_map(), {4, 8, 6, 4}),
ElementsAre(1, 1));
EXPECT_THAT(EvaluateAffineMap(symbolic_tile->stride_map(), {1, 1, 6, 4}),
ElementsAre(1, 1));
EXPECT_THAT(EvaluateAffineMap(symbolic_tile->stride_map(), {1, 8, 1, 4}),
ElementsAre(6, 1));
EXPECT_THAT(EvaluateAffineMap(symbolic_tile->stride_map(), {2, 1, 1, 4}),
ElementsAre(48, 1));
EXPECT_THAT(EvaluateAffineMap(symbolic_tile->stride_map(), {2, 8, 1, 4}),
ElementsAre(6, 1));
EXPECT_THAT(EvaluateAffineMap(symbolic_tile->stride_map(), {1, 1, 1, 4}),
ElementsAre(0, 1));
}
TEST_F(SymbolicTileTest, FailsToPropagateTileThroughNonTrivialReshape) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[12, 4, 19] parameter(0)
ROOT reshape = f32[4, 12, 19] reshape(p0)
}
)"));
EXPECT_EQ(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
std::nullopt);
}
TEST_F(SymbolicTileTest, CanPropagateTileThroughElementwiseOp) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[150] parameter(0)
p1 = f32[150] parameter(1)
ROOT add = f32[150] add(p0, p1)
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0) -> (0)
size_map: (d0) -> (d0)
stride_map: (d0) -> (1)
)")));
}
TEST_F(SymbolicTileTest, CanPropagateTileFromBroadcastOutputToInput) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[150] parameter(0)
ROOT broadcast = f32[157,150] broadcast(p0), dimensions={1}
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1) -> (0)
size_map: (d0, d1) -> (d1)
stride_map: (d0, d1) -> (1)
)")));
}
TEST_F(SymbolicTileTest, CanPropagateTileFromReduceOutputToInput) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
ENTRY e {
p0 = f32[125,150] parameter(0)
c0 = f32[] constant(-inf)
ROOT reduce = f32[150] reduce(p0, c0), dimensions={0}, to_apply=max
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0) -> (0, 0)
size_map: (d0) -> (125, d0)
stride_map: (d0) -> (1, 1)
)")));
}
TEST_F(SymbolicTileTest, CanPropagateTileThroughReverse) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[179] parameter(0)
ROOT reverse = f32[179] reverse(p0), dimensions={0}
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0) -> (-d0 + 179)
size_map: (d0) -> (d0)
stride_map: (d0) -> (1)
)")));
}
TEST_F(SymbolicTileTest, CanPropagateTileFromSliceOutputToInput) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[120,142] parameter(0)
ROOT slice = f32[10,21] slice(p0), slice={[40:60:2], [20:104:4]}
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1) -> (40, 20)
size_map: (d0, d1) -> (d0, d1)
stride_map: (d0, d1) -> (2, 4)
)")));
}
TEST_F(SymbolicTileTest, CanPropagateTileThroughTranspose) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[21,10] parameter(0)
ROOT transpose = f32[10,21] transpose(p0), dimensions={1,0}
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1) -> (0, 0)
size_map: (d0, d1) -> (d1, d0)
stride_map: (d0, d1) -> (1, 1)
)")));
}
TEST_F(SymbolicTileTest, CanPropagateTileThroughConcatenate) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[2,5,7] parameter(0)
p1 = f32[2,11,7] parameter(1)
p2 = f32[2,17,7] parameter(2)
ROOT concat = f32[2,33,7] concatenate(p0, p1, p2), dimensions={1}
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1, d2) -> (0, 0, 0)
size_map: (d0, d1, d2) -> (d0, d1, d2)
stride_map: (d0, d1, d2) -> (1, 1, 1)
)")));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[1].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1, d2) -> (0, -5, 0)
size_map: (d0, d1, d2) -> (d0, d1, d2)
stride_map: (d0, d1, d2) -> (1, 1, 1)
)")));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[2].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1, d2) -> (0, -16, 0)
size_map: (d0, d1, d2) -> (d0, d1, d2)
stride_map: (d0, d1, d2) -> (1, 1, 1)
)")));
}
TEST_F(SymbolicTileTest, CanPropagateTileThroughPadOpWithoutInteriorPadding) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
input = f32[4, 4] parameter(0)
padding_value = f32[] parameter(1)
ROOT pad = f32[8,8] pad(input, padding_value), padding=2_2_0x1_3_0
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1) -> (-2, -1)
size_map: (d0, d1) -> (d0, d1)
stride_map: (d0, d1) -> (1, 1)
)")));
}
TEST_F(SymbolicTileTest, CanPropagateTileThroughDynamicSlice) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
%src = s32[2,2,258] parameter(0)
%of1 = s32[] parameter(1)
%of2 = s32[] parameter(2)
%of3 = s32[] parameter(3)
ROOT %ds = s32[1,2,32] dynamic-slice(s32[2,2,258] %src,
s32[] %of1, s32[] %of2, s32[] %of3),
dynamic_slice_sizes={1, 2, 32}
}
)"));
ASSERT_EQ(input_indexing.indexing_maps.size(), 4);
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1, d2)[s0, s1] -> (s0, 0, s1)
size_map: (d0, d1, d2) -> (1, d1, d2)
stride_map: (d0, d1, d2) -> (0, 1, 1)
rt_vars:
s0 in [0, 1],
s1 in [0, 226],
)")));
for (int i = 1; i <= 3; i++) {
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[i].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1, d2) -> ()
size_map: (d0, d1, d2) -> ()
stride_map: (d0, d1, d2) -> ()
)")));
}
}
TEST_F(SymbolicTileTest, CanPropagateTileThroughDynamicUpdateSlice) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
%src = s32[20,30] parameter(0)
%upd = s32[5,10] parameter(1)
%of1 = s32[] parameter(2)
%of2 = s32[] parameter(3)
ROOT %dus = s32[20,30] dynamic-update-slice(
s32[20,30] %src, s32[5,10] %upd, s32[] %of1, s32[] %of2)
}
)"));
ASSERT_EQ(input_indexing.indexing_maps.size(), 4);
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1) -> (0, 0)
size_map: (d0, d1) -> (d0, d1)
stride_map: (d0, d1) -> (1, 1)
)")));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[1].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1)[s0, s1] -> (-s0, -s1)
size_map: (d0, d1) -> (d0, d1)
stride_map: (d0, d1) -> (1, 1)
rt_vars:
s0 in [0, 15],
s1 in [0, 20],
)")));
for (int i = 2; i <= 3; i++) {
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[i].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1) -> ()
size_map: (d0, d1) -> ()
stride_map: (d0, d1) -> ()
)")));
}
}
TEST_F(SymbolicTileTest, CanPropagateTileThroughGather) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY main {
operand = f32[33,76,70] parameter(0)
indices = s32[1806,2] parameter(1)
ROOT r = f32[1806,7,8,4] gather(operand, indices), offset_dims={1,2,3},
collapsed_slice_dims={}, start_index_map={0,1},
index_vector_dim=1, slice_sizes={7,8,4}
}
)"));
ASSERT_EQ(input_indexing.indexing_maps.size(), 2);
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1, d2, d3)[s0, s1] -> (s0, s1, 0)
size_map: (d0, d1, d2, d3) -> (d1, d2, d3)
stride_map: (d0, d1, d2, d3) -> (1, 1, 1)
rt_vars:
s0 in [0, 26],
s1 in [0, 68],
)")));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[1].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1, d2, d3) -> (0, 0)
size_map: (d0, d1, d2, d3) -> (d0, 2)
stride_map: (d0, d1, d2, d3) -> (1, 1)
)")));
}
TEST_F(SymbolicTileTest, CanPropagateTileThroughSplitReshapeOfReverse) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
computation {
p0 = f32[1,8,6,4]{3,2,1,0} parameter(0)
reverse = f32[1,8,6,4]{3,2,1,0} reverse(p0), dimensions={1,2}
ROOT reshape = f32[48,4]{1,0} reshape(reverse)
}
ENTRY e {
p0 = f32[1,8,6,4]{3,2,1,0} parameter(0)
ROOT fusion = f32[48,4]{1,0} fusion(p0), kind=kLoop, calls=computation
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1) ->
(0, -((d0 + 5) floordiv 6) + 8, -(d0 - ((d0 - 1) floordiv 6) * 6) + 6, 0)
size_map: (d0, d1) ->
(1, (d0 + 5) floordiv 6, d0 - ((d0 - 1) floordiv 6) * 6, d1)
stride_map: (d0, d1) -> (0, 1, 1, 1)
)")));
}
TEST_F(SymbolicTileTest, CanPropagateTileThroughSplitReductionOfSplittedAxis) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
computation {
p0 = f32[18] parameter(0)
bitcast = f32[9,2] bitcast(p0)
c0 = f32[] constant(0)
reduce_0 = f32[9] reduce(bitcast, c0), dimensions={1}, to_apply=add
ROOT reduce_1 = f32[] reduce(reduce_0, c0), dimensions={0}, to_apply=add
}
ENTRY e {
p0 = f32[18] parameter(0)
ROOT fusion = f32[] fusion(p0), kind=kLoop, calls=computation
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: () -> (0)
size_map: () -> (18)
stride_map: () -> (1)
)")));
}
TEST_F(SymbolicTileTest, CanPropagateTileThroughSummationOfSymbols) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("()[s0, s1] -> (s1 * 2 + s0)", &mlir_context_), {},
{2, 9});
EXPECT_THAT(SymbolicTile::FromIndexingMap(indexing_map),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: () -> (0)
size_map: () -> (18)
stride_map: () -> (1)
)")));
}
TEST_F(SymbolicTileTest,
FailsGracefullyAtPropagatingTileThroughSliceOfSplitReshape) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
computation {
p0 = f32[1,8,6,4]{3,2,1,0} parameter(0)
reshape = f32[48,4]{1,0} reshape(p0)
ROOT slice = f32[5,2]{1,0} slice(reshape), slice={[18:43:5], [0:4:2]}
}
ENTRY e {
p0 = f32[1,8,6,4]{3,2,1,0} parameter(0)
ROOT fusion = f32[5,2]{1,0} fusion(p0), kind=kLoop, calls=computation
}
)"));
EXPECT_EQ(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
std::nullopt);
}
TEST_F(SymbolicTileTest,
FailsGracefullyAtPropagatingTileThroughMisalignedSliceOfSplitReshape) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
computation {
p0 = f32[1,8,6,4]{3,2,1,0} parameter(0)
reshape = f32[48,4]{1,0} reshape(p0)
ROOT slice = f32[5,2]{1,0} slice(reshape), slice={[20:45:5], [0:4:2]}
}
ENTRY e {
p0 = f32[1,8,6,4]{3,2,1,0} parameter(0)
ROOT fusion = f32[5,2]{1,0} fusion(p0), kind=kLoop, calls=computation
}
)"));
EXPECT_EQ(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
std::nullopt);
}
TEST_F(SymbolicTileTest,
FailsGracefullyAtPropagatingTileThroughSliceOfSplitReshapeOnTranspose) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
computation {
p0 = f32[1,6,8,4]{3,2,1,0} parameter(0)
transpose = f32[1,8,6,4]{3,2,1,0} transpose(p0), dimensions={0,2,1,3}
reshape = f32[48,4]{1,0} reshape(transpose)
ROOT slice = f32[5,2]{1,0} slice(reshape), slice={[18:43:5], [0:4:2]}
}
ENTRY e {
p0 = f32[1,6,8,4]{3,2,1,0} parameter(0)
ROOT fusion = f32[5,2]{1,0} fusion(p0), kind=kLoop, calls=computation
}
)"));
EXPECT_EQ(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
std::nullopt);
}
TEST_F(SymbolicTileTest,
FailsGracefullyAtPropagatingTileThroughSliceOfSplitReshapeOfReverse) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
computation {
p0 = f32[1,8,6,4]{3,2,1,0} parameter(0)
reverse = f32[1,8,6,4]{3,2,1,0} reverse(p0), dimensions={1,2}
reshape = f32[48,4]{1,0} reshape(reverse)
ROOT slice = f32[5,2]{1,0} slice(reshape), slice={[18:43:5], [0:4:2]}
}
ENTRY e {
p0 = f32[1,8,6,4]{3,2,1,0} parameter(0)
ROOT fusion = f32[5,2]{1,0} fusion(p0), kind=kLoop, calls=computation
}
)"));
EXPECT_EQ(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
std::nullopt);
}
TEST_F(SymbolicTileTest,
FailsGracefullyAtPropagatingTileThroughReductionOfConcatenation) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
max_computation {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT maximum = f32[] maximum(p0, p1)
}
computation {
p0 = f32[10,8]{1,0} parameter(0)
p1 = f32[20,8]{1,0} parameter(1)
concatenate = f32[30,8]{1,0} concatenate(p0, p1), dimensions={0}
neg_inf = f32[] constant(-inf)
ROOT reduce = f32[8] reduce(concatenate, neg_inf), dimensions={0},
to_apply=max_computation
}
ENTRY e {
p0 = f32[10,8]{1,0} parameter(0)
p1 = f32[20,8]{1,0} parameter(1)
ROOT fusion = f32[8] fusion(p0, p1), kind=kLoop, calls=computation
}
)"));
EXPECT_EQ(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[1].begin()),
std::nullopt);
}
TEST_F(SymbolicTileTest, CanCombineCompatibleConstraints) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1,8,6,4,8]{4,3,2,1,0} parameter(0)
ROOT reshape = f32[48,32]{1,0} reshape(p0)
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1) -> (0, 0, 0, 0, 0)
size_map: (d0, d1) -> (1, (d0 + 5) floordiv 6, d0 - ((d0 - 1) floordiv 6) * 6, (d1 + 7) floordiv 8, d1 - ((d1 - 1) floordiv 8) * 8)
stride_map: (d0, d1) -> (0, 1, 1, 1, 1)
constraints:
6 mod d0 in [0, 0] && 8 mod d1 in [0, 0] ||
6 mod d0 in [0, 0] && d1 mod 8 in [0, 0] ||
8 mod d1 in [0, 0] && d0 mod 6 in [0, 0] ||
d0 mod 6 in [0, 0] && d1 mod 8 in [0, 0]
)")));
}
TEST_F(SymbolicTileTest,
CanDeriveTileWhenPreexistingConstraintsCanBeSimplifiedAway) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0, d1, d2)[s0] -> (d0 * 2048 + d1, s0)",
&mlir_context_),
{4, 2048, 50304}, {50304});
indexing_map.AddConstraint(ParseAffineExpr("d0 * 2048 + d1", &mlir_context_),
Interval{0, 8191});
EXPECT_THAT(SymbolicTile::FromIndexingMap(indexing_map),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1, d2) -> (0, 0)
size_map: (d0, d1, d2) -> (d0 * d1, 50304)
stride_map: (d0, d1, d2) -> (((-d1 + 2049) floordiv 2048) * ((-((-d0 + 5) floordiv 4) + 1) * 2048) + -((-d1 + 2049) floordiv 2048) + 1, 1)
constraints: d0 in [1, 1] || d1 in [1, 1] || d1 in [2048, 2048]
)")));
}
TEST_F(SymbolicTileTest, CanDeriveTileWhenTheIndexingMapHasSymbolsInASum) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0, d1, d2)[s0] -> (d0, d1, d2 * 128 + s0)",
&mlir_context_),
{4, 2048, 393}, {128});
EXPECT_THAT(SymbolicTile::FromIndexingMap(indexing_map),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1, d2) -> (0, 0, 0)
size_map: (d0, d1, d2) -> (d0, d1, d2 * 128)
stride_map: (d0, d1, d2) -> (1, 1, 1)
)")));
}
TEST_F(SymbolicTileTest, ResultingConstraintsAreSimplifiedAway) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0, d1, d2)[s0] -> (d0, d1, d2 * 128 + s0)",
&mlir_context_),
{4, 2048, 393}, {128});
EXPECT_THAT(SymbolicTile::FromIndexingMap(indexing_map),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: (d0, d1, d2) -> (0, 0, 0)
size_map: (d0, d1, d2) -> (d0, d1, d2 * 128)
stride_map: (d0, d1, d2) -> (1, 1, 1)
)")));
}
class ConstraintExpressionTest : public IndexingTestBase {
public:
using ConstraintVector = std::vector<std::pair<std::string, Interval>>;
ConstraintExpression::Constraint GetConstraint(const std::string& string_expr,
int64_t lower, int64_t upper) {
return {ParseAffineExpr(string_expr, &mlir_context_),
Interval{lower, upper}};
}
ConjointConstraints GetConjointConstraints(
ConstraintVector&& expr_and_interval_pairs) {
ConjointConstraints conjunction;
for (auto& [string_expr, interval] : expr_and_interval_pairs) {
conjunction.push_back(
{ParseAffineExpr(string_expr, &mlir_context_), interval});
}
return conjunction;
}
};
TEST_F(ConstraintExpressionTest,
DefaultConstructedConstraintExpressionIsAlwaysSatisfied) {
EXPECT_TRUE(ConstraintExpression().IsAlwaysSatisfied());
}
TEST_F(ConstraintExpressionTest, PrettyPrintingTest) {
EXPECT_THAT(ConstraintExpression(),
MatchConstraintExpressionString("always satisfied"));
EXPECT_THAT(ConstraintExpression::GetUnsatisfiableConstraintExpression(),
MatchConstraintExpressionString("unsatisfiable"));
ConjointConstraints conjunction_1 =
GetConjointConstraints({{"d0", Interval{0, 5}}, {"d1", Interval{0, 5}}});
ConjointConstraints conjunction_2 =
GetConjointConstraints({{"d2", Interval{0, 5}}});
ConstraintExpression constraints;
constraints.Or(std::move(conjunction_1));
constraints.Or(std::move(conjunction_2));
EXPECT_THAT(constraints, MatchConstraintExpressionString(
"d0 in [0, 5] && d1 in [0, 5] || d2 in [0, 5]"));
}
TEST_F(ConstraintExpressionTest,
ConjunctionOfConstraintsOnTheSameExpressionAreIntersected) {
ConstraintExpression constraints;
constraints.And(GetConjointConstraints({{"d0", Interval{0, 5}}}));
EXPECT_THAT(constraints, MatchConstraintExpressionString("d0 in [0, 5]"));
constraints.And(GetConjointConstraints({{"d0", Interval{3, 6}}}));
EXPECT_THAT(constraints, MatchConstraintExpressionString("d0 in [3, 5]"));
constraints.And(GetConjointConstraints({{"d0", Interval{7, 8}}}));
EXPECT_THAT(constraints, MatchConstraintExpressionString("unsatisfiable"));
}
TEST_F(ConstraintExpressionTest,
UnsatisfiableConstraintExpressionHoldsNoConstraint) {
ConstraintExpression unsatisfiable_constraint =
ConstraintExpression::GetUnsatisfiableConstraintExpression();
EXPECT_FALSE(unsatisfiable_constraint.is_satisfiable());
EXPECT_THAT(unsatisfiable_constraint.DisjointConjointConstraints(),
IsEmpty());
}
TEST_F(
ConstraintExpressionTest,
CanSuccessfullyPerformConjunctionOfConstraintExpressionWithConjointConstraints) {
ConjointConstraints conjunction_1 =
GetConjointConstraints({{"d0", Interval{0, 5}}, {"d1", Interval{0, 5}}});
ConjointConstraints conjunction_2 =
GetConjointConstraints({{"d2", Interval{0, 5}}});
ConstraintExpression constraints;
constraints.And(std::move(conjunction_1));
constraints.And(std::move(conjunction_2));
EXPECT_TRUE(constraints.is_satisfiable());
const auto& conjunctions = constraints.DisjointConjointConstraints();
EXPECT_THAT(conjunctions, SizeIs(1));
EXPECT_THAT(conjunctions.front(), SizeIs(3));
}
TEST_F(
ConstraintExpressionTest,
CorrectlyEliminatesConjunctionFromDisjunctionWhenItBecomesUnsatisfiable) {
ConjointConstraints conjunction_1 =
GetConjointConstraints({{"d0", Interval{0, 5}}});
ConjointConstraints conjunction_2 =
GetConjointConstraints({{"d1", Interval{0, 5}}});
ConstraintExpression constraints;
constraints.Or(std::move(conjunction_1));
constraints.Or(std::move(conjunction_2));
EXPECT_THAT(constraints,
MatchConstraintExpressionString("d0 in [0, 5] || d1 in [0, 5]"));
ConjointConstraints conjunction_3 =
GetConjointConstraints({{"d0", Interval{6, 6}}});
constraints.And(std::move(conjunction_3));
EXPECT_THAT(constraints,
MatchConstraintExpressionString("d0 in [6, 6] && d1 in [0, 5]"));
ConjointConstraints conjunction_4 =
GetConjointConstraints({{"d0", Interval{7, 7}}});
constraints.And(std::move(conjunction_4));
EXPECT_THAT(constraints, MatchConstraintExpressionString("unsatisfiable"));
}
TEST_F(
ConstraintExpressionTest,
CanSuccessfullyPerformDisjunctionOfConstraintExpressionWithConjointConstraints) {
ConjointConstraints conjunction_1 =
GetConjointConstraints({{"d0", Interval{0, 5}}, {"d1", Interval{0, 5}}});
ConjointConstraints conjunction_2 =
GetConjointConstraints({{"d2", Interval{0, 5}}});
ConstraintExpression constraints;
constraints.Or(std::move(conjunction_1));
constraints.Or(std::move(conjunction_2));
EXPECT_TRUE(constraints.is_satisfiable());
const auto& conjunctions = constraints.DisjointConjointConstraints();
EXPECT_THAT(conjunctions, SizeIs(2));
EXPECT_THAT(conjunctions.front(), SizeIs(2));
EXPECT_THAT(conjunctions.back(), SizeIs(1));
}
TEST_F(
ConstraintExpressionTest,
CanSuccessfullyPerformConjunctionOfConstraintExpressionWithConstraintExpression) {
ConjointConstraints conjunction_1 =
GetConjointConstraints({{"d0", Interval{0, 5}}});
ConjointConstraints conjunction_2 =
GetConjointConstraints({{"d1", Interval{0, 5}}});
ConstraintExpression constraints_1;
constraints_1.Or(std::move(conjunction_1));
constraints_1.Or(std::move(conjunction_2));
ConjointConstraints conjunction_3 =
GetConjointConstraints({{"d2", Interval{0, 5}}});
ConjointConstraints conjunction_4 =
GetConjointConstraints({{"d3", Interval{0, 5}}});
ConjointConstraints conjunction_5 =
GetConjointConstraints({{"d4", Interval{0, 5}}});
ConstraintExpression constraints_2;
constraints_2.Or(std::move(conjunction_3));
constraints_2.Or(std::move(conjunction_4));
constraints_2.Or(std::move(conjunction_5));
ConstraintExpression result_constraint_expression =
ConstraintExpression::And(std::move(constraints_1), constraints_2);
EXPECT_TRUE(result_constraint_expression.is_satisfiable());
EXPECT_THAT(result_constraint_expression.DisjointConjointConstraints(),
SizeIs(6));
for (const ConjointConstraints& conjunction :
result_constraint_expression.DisjointConjointConstraints()) {
EXPECT_THAT(conjunction, SizeIs(2));
}
ConstraintExpression empty_constraints;
EXPECT_THAT(ConstraintExpression::And(empty_constraints, constraints_2)
.DisjointConjointConstraints(),
SizeIs(3));
EXPECT_THAT(ConstraintExpression::And(std::move(constraints_2),
std::move(empty_constraints))
.DisjointConjointConstraints(),
SizeIs(3));
}
TEST_F(
ConstraintExpressionTest,
CanSuccessfullyPerformDisjunctionOfConstraintExpressionWithConstraintExpression) {
ConjointConstraints conjunction_1 =
GetConjointConstraints({{"d0", Interval{0, 5}}});
ConjointConstraints conjunction_2 =
GetConjointConstraints({{"d1", Interval{0, 5}}});
ConstraintExpression constraints_1;
constraints_1.Or(std::move(conjunction_1));
constraints_1.Or(std::move(conjunction_2));
ConjointConstraints conjunction_3 =
GetConjointConstraints({{"d2", Interval{0, 5}}});
ConjointConstraints conjunction_4 =
GetConjointConstraints({{"d3", Interval{0, 5}}});
ConjointConstraints conjunction_5 =
GetConjointConstraints({{"d4", Interval{0, 5}}});
ConstraintExpression constraints_2;
constraints_2.Or(std::move(conjunction_3));
constraints_2.Or(std::move(conjunction_4));
constraints_2.Or(std::move(conjunction_5));
ConstraintExpression result_constraint_expression = ConstraintExpression::Or(
std::move(constraints_1), std::move(constraints_2));
EXPECT_TRUE(result_constraint_expression.is_satisfiable());
EXPECT_THAT(result_constraint_expression.DisjointConjointConstraints(),
SizeIs(5));
for (const ConjointConstraints& conjunction :
result_constraint_expression.DisjointConjointConstraints()) {
EXPECT_THAT(conjunction, SizeIs(1));
}
}
TEST_F(
ConstraintExpressionTest,
ConjunctionInvolvingUnsatisfiableConstraintExpressionIsUnsatisfiable) {
ConstraintExpression constraints =
ConstraintExpression::GetUnsatisfiableConstraintExpression();
ConjointConstraints conjunction_1 =
GetConjointConstraints({{"d0", Interval{0, 5}}});
constraints.And(std::move(conjunction_1));
EXPECT_FALSE(constraints.is_satisfiable());
EXPECT_THAT(constraints.DisjointConjointConstraints(), IsEmpty());
}
TEST_F(
ConstraintExpressionTest,
DisjunctionInvolvingUnsatisfiableConstraintExpressionIsSatisfiable) {
ConstraintExpression constraints =
ConstraintExpression::GetUnsatisfiableConstraintExpression();
ConjointConstraints conjunction_1 =
GetConjointConstraints({{"d0", Interval{0, 5}}});
constraints.Or(conjunction_1);
EXPECT_TRUE(constraints.is_satisfiable());
EXPECT_THAT(constraints.DisjointConjointConstraints(), SizeIs(1));
ConstraintExpression constraints_1 =
ConstraintExpression::GetUnsatisfiableConstraintExpression();
ConstraintExpression constraints_2;
constraints_2.Or(std::move(conjunction_1));
ConstraintExpression result_constraint_expression = ConstraintExpression::Or(
std::move(constraints_1), std::move(constraints_2));
EXPECT_TRUE(result_constraint_expression.is_satisfiable());
EXPECT_THAT(result_constraint_expression.DisjointConjointConstraints(),
SizeIs(1));
}
TEST_F(
ConstraintExpressionTest,
DisjunctionInvolvingTwoUnsatisfiableConstraintExpressionsIsUnsatisfiable) {
ConstraintExpression constraints_1 =
ConstraintExpression::GetUnsatisfiableConstraintExpression();
ConstraintExpression constraints_2 =
ConstraintExpression::GetUnsatisfiableConstraintExpression();
EXPECT_FALSE(
ConstraintExpression::And(constraints_1, constraints_2).is_satisfiable());
}
TEST_F(ConstraintExpressionTest,
CanSimplifyAlwaysSatisfiedContraintExpression) {
ConstraintExpression constraints;
constraints.Or(GetConjointConstraints({
{"d0", Interval{0, 1}},
}));
constraints.Or(GetConjointConstraints({
{"25", Interval{0, 100}},
{"1", Interval{1, 1}},
}));
constraints.Or(GetConjointConstraints({
{"d0", Interval{0, -1}},
}));
constraints.Simplify();
EXPECT_THAT(constraints, MatchConstraintExpressionString("always satisfied"));
}
TEST_F(ConstraintExpressionTest, CanSimplifyUnsatisfiableContraintExpression) {
ConstraintExpression constraints;
constraints.Or(GetConjointConstraints({
{"d0", Interval{0, -1}},
}));
constraints.Or(GetConjointConstraints({
{"1", Interval{2, 3}},
}));
constraints.Simplify();
EXPECT_THAT(constraints, MatchConstraintExpressionString("unsatisfiable"));
}
TEST_F(ConstraintExpressionTest,
CanSimplifyAwayAlwaysSatisfiedPartOfConjunction) {
ConstraintExpression constraints;
constraints.Or(GetConjointConstraints({
{"d0", Interval{0, 1}},
{"1", Interval{1, 1}},
{"d1", Interval{0, 1}},
{"2", Interval{2, 3}},
}));
constraints.Simplify();
EXPECT_THAT(constraints,
MatchConstraintExpressionString("d0 in [0, 1] && d1 in [0, 1]"));
}
TEST_F(ConstraintExpressionTest,
CanSimplifyAwayUnsatisfiablePartOfDisjunction) {
ConstraintExpression constraints;
constraints.Or(GetConjointConstraints({
{"d0", Interval{0, 1}},
}));
constraints.Or(GetConjointConstraints({
{"d1", Interval{0, 1}},
{"1", Interval{0, 0}},
{"d2", Interval{0, 1}},
}));
constraints.Simplify();
EXPECT_THAT(constraints, MatchConstraintExpressionString("d0 in [0, 1]"));
}
TEST_F(ConstraintExpressionTest, SimplifyKeepsAlwaysSatisfiedUnchanged) {
ConstraintExpression constraints;
constraints.Simplify();
EXPECT_THAT(constraints, MatchConstraintExpressionString("always satisfied"));
}
TEST_F(ConstraintExpressionTest, SimplifyKeepsUnsatisfiableUnchanged) {
auto constraints =
ConstraintExpression::GetUnsatisfiableConstraintExpression();
constraints.Simplify();
EXPECT_THAT(constraints, MatchConstraintExpressionString("unsatisfiable"));
}
TEST_F(ConstraintExpressionTest, SimplifyRemovesRedundantConstraints) {
Constraint c0 = GetConstraint("d0", 0, 0);
Constraint c1 = GetConstraint("d1", 1, 1);
ConjointConstraints conjunction_0{c0, c1};
ConjointConstraints conjunction_1{c1, c0};
ConjointConstraints conjunction_2{c0};
ConstraintExpression constraints;
constraints.Or(conjunction_0);
constraints.Or(conjunction_1);
constraints.Or(conjunction_2);
constraints.Or(conjunction_1);
constraints.Or(conjunction_0);
constraints.Simplify();
EXPECT_THAT(constraints, MatchConstraintExpressionString(
"d0 in [0, 0] || d0 in [0, 0] && d1 in [1, 1]"));
}
TEST_F(ConstraintExpressionTest, ConstraintSatisfactionIsEvaluatedCorrectly) {
Constraint c0 = GetConstraint("d0 mod 6", 0, 0);
Constraint c1 = GetConstraint("d1 mod 8", 0, 0);
Constraint c2 = GetConstraint("d0 mod 13", 0, 0);
ConjointConstraints conjunction_0{c0, c1};
ConjointConstraints conjunction_1{c1, c2};
ConstraintExpression constraints;
constraints.Or(conjunction_0);
constraints.Or(conjunction_1);
std::vector<int64_t> possible_tile_parameters({6, 8});
EXPECT_TRUE(constraints.IsSatisfiedBy(possible_tile_parameters));
std::vector<int64_t> other_possible_tile_parameters({13, 8});
EXPECT_TRUE(constraints.IsSatisfiedBy(other_possible_tile_parameters));
std::vector<int64_t> impossible_tile_parameters({6, 7});
EXPECT_FALSE(constraints.IsSatisfiedBy(impossible_tile_parameters));
EXPECT_TRUE(ConstraintExpression().IsSatisfiedBy(impossible_tile_parameters));
EXPECT_FALSE(ConstraintExpression::GetUnsatisfiableConstraintExpression()
.IsSatisfiedBy(possible_tile_parameters));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/symbolic_tile.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/symbolic_tile_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bfa6c823-3cc9-4b21-8649-cad61d016b3f | cpp | tensorflow/tensorflow | analytical_latency_estimator | third_party/xla/xla/service/gpu/model/analytical_latency_estimator.cc | third_party/xla/xla/service/gpu/model/analytical_latency_estimator_test.cc | #include "xla/service/gpu/model/analytical_latency_estimator.h"
#include <memory>
#include <utility>
#include "absl/log/log.h"
#include "absl/time/time.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/gpu/model/gpu_collective_performance_model.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_performance_model.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/latency_hiding_scheduler.h"
#include "xla/stream_executor/device_description.h"
#include "tsl/platform/status.h"
namespace xla {
namespace gpu {
LatencyEstimator::TimeCost AnalyticalLatencyEstimator::GetLatencyBetween(
const HloGraphNode& from, const HloGraphNode& target) const {
const HloOpcode from_op = from.GetInstr().opcode();
if (!config_.schedule_send_recvs &&
(from_op == HloOpcode::kSend || from_op == HloOpcode::kRecv)) {
return kLowLatency;
}
if (IsAsyncPair(from, target)) {
double coll_time = absl::ToDoubleMicroseconds(
GpuPerformanceWithCollectiveModel::ComputeCollectiveTime(
from.GetInstr(), &*cost_analysis_, gpu_info_));
VLOG(10) << "Analytical estimator calculated latency between "
<< from.GetInstr().name() << " and " << target.GetInstr().name()
<< " to be: " << coll_time << " us.";
return coll_time;
}
return latency_estimator_->GetLatencyBetween(from, target);
}
LatencyEstimator::TimeCost AnalyticalLatencyEstimator::NodeCost(
const HloInstruction* instr) const {
if (hlo_query::IsAsyncCollectiveStartOp(instr, true) ||
hlo_query::IsAsyncCollectiveDoneOp(instr, true)) {
return kLowCost;
}
absl::Duration total_estimated_time =
GpuPerformanceModel::EstimateRunTimeForInstruction(
instr, gpu_info_, &*cost_analysis_,
GpuPerformanceModelOptions::ForModule(instr->GetModule()))
.exec_time;
LatencyEstimator::TimeCost cost_in_us =
absl::ToDoubleMicroseconds(total_estimated_time);
VLOG(10) << "Analytical estimator calculated cost for: " << instr->name()
<< ". Cost: " << cost_in_us;
return cost_in_us;
}
AnalyticalLatencyEstimator::AnalyticalLatencyEstimator(
const SchedulerConfig& config,
std::unique_ptr<LatencyEstimator> latency_estimator,
const se::DeviceDescription& gpu_info,
HloCostAnalysis::ShapeSizeFunction shape_size_function,
HloComputation* computation)
: config_(config),
gpu_info_(gpu_info),
latency_estimator_(std::move(latency_estimator)),
shape_size_function_(shape_size_function) {
cost_analysis_.emplace(
GpuHloCostAnalysis::Options{shape_size_function_,
{},
{},
true},
gpu_info_);
TF_CHECK_OK(computation->Accept(&cost_analysis_.value()));
}
}
} | #include "xla/service/gpu/model/analytical_latency_estimator.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/gpu/tests/gpu_codegen_test.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/latency_hiding_scheduler.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
int64_t GetInstructionIndexInSchedule(
absl::Span<HloInstruction* const> schedule, absl::string_view hlo_name) {
return std::find_if(schedule.begin(), schedule.end(),
[hlo_name](HloInstruction* instruction) {
return instruction->name() == hlo_name;
}) -
schedule.begin();
}
SchedulerConfig GetDefaultSchedulerConfig() {
SchedulerConfig scheduler_config;
return scheduler_config;
}
absl::StatusOr<bool> RunScheduler(
HloModule* module, const SchedulerConfig& sched_config,
std::unique_ptr<LatencyEstimator> latency_estimator =
std::make_unique<ApproximateLatencyEstimator>()) {
HloCostAnalysis::ShapeSizeFunction shape_size_bytes =
[&shape_size_bytes](const Shape& shape) -> int64_t {
int64_t shape_size = 0;
if (shape.IsTuple()) {
for (auto& sub_shape : shape.tuple_shapes()) {
shape_size += shape_size_bytes(sub_shape);
}
return shape_size;
}
return ShapeUtil::ByteSizeOfElements(shape);
};
auto async_tracker = std::make_unique<AsyncTracker>(sched_config);
auto scheduler_core = std::make_unique<DefaultSchedulerCore>(
shape_size_bytes, async_tracker.get(), latency_estimator.get(),
sched_config);
TF_ASSIGN_OR_RETURN(
bool value, LatencyHidingScheduler(
std::move(latency_estimator), std::move(async_tracker),
std::move(scheduler_core), shape_size_bytes)
.Run(module));
return value;
}
class AnalyticalLatencyHidingSchedulerTest : public GpuCodegenTest {
public:
absl::StatusOr<std::unique_ptr<HloModule>> ParseHloText(
absl::string_view hlo_string) {
return ParseAndReturnVerifiedModule(hlo_string, GetModuleConfigForTest());
}
se::CudaComputeCapability GetCudaComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
};
TEST_F(AnalyticalLatencyHidingSchedulerTest, TestAnalyticalLatencyEstimator) {
if (!GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::PASCAL_)) {
GTEST_SKIP() << "This test is for Pascal+ GPUs.";
}
const se::DeviceDescription dev_info =
backend().default_stream_executor()->GetDeviceDescription();
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
region_20.995 {
Arg_1.997 = f32[] parameter(1)
Arg_0.996 = f32[] parameter(0)
ROOT add.589 = f32[] add(Arg_0.996, Arg_1.997)
}
ENTRY entry {
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[1024,2048,2048]{2,1,0} parameter(2)
p3 = f32[2048,2048,2048]{2,1,0} parameter(3)
all-reduce-start.1 = f32[1024,2048,2048]{2,1,0} all-reduce-start(p2), channel_id=8, replica_groups={{0}}, to_apply=region_20.995, backend_config="{\"is_sync\":false}"
all-reduce-start.2 = f32[2048,2048,2048]{2,1,0} all-reduce-start(p3), channel_id=10, replica_groups={{0}}, to_apply=region_20.995, backend_config="{\"is_sync\":false}"
all-reduce-done.1 = f32[1024,2048,2048]{2,1,0} all-reduce-done(all-reduce-start.1)
all-reduce-done.2 = f32[2048,2048,2048]{2,1,0} all-reduce-done(all-reduce-start.2)
conv0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT tuple.2 = (f32[16,256,256]{2,1,0}, f32[1024,2048,2048]{2,1,0}, f32[2048,2048,2048]{2,1,0}) tuple(conv0, all-reduce-done.1, all-reduce-done.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
hlo_module->mutable_config().set_num_partitions(8);
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
auto scheduler_config = GetDefaultSchedulerConfig();
auto latency_estimator = std::make_unique<AnalyticalLatencyEstimator>(
scheduler_config, std::make_unique<ApproximateLatencyEstimator>(),
dev_info, ShapeSizeBytesFunction(), hlo_module->entry_computation());
EXPECT_TRUE(RunScheduler(hlo_module.get(), scheduler_config,
std::move(latency_estimator))
.ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
std::vector<HloInstruction*> new_instruction_schedule =
module_schedule.sequence(hlo_module->entry_computation()).instructions();
int64_t ar2_index = GetInstructionIndexInSchedule(new_instruction_schedule,
"all-reduce-start.2");
int64_t ar1_done_index = GetInstructionIndexInSchedule(
new_instruction_schedule, "all-reduce-done.1");
int64_t conv0_index =
GetInstructionIndexInSchedule(new_instruction_schedule, "conv0");
EXPECT_LT(ar1_done_index, ar2_index);
EXPECT_LT(ar2_index, conv0_index);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/analytical_latency_estimator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/analytical_latency_estimator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5df7aab9-5757-4f4b-8261-a448a8554408 | cpp | tensorflow/tensorflow | indexing_map | third_party/xla/xla/service/gpu/model/indexing_map.cc | third_party/xla/xla/service/gpu/model/indexing_map_test.cc | #include "xla/service/gpu/model/indexing_map.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <limits>
#include <numeric>
#include <optional>
#include <ostream>
#include <sstream>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/base/optimization.h"
#include "absl/log/check.h"
#include "absl/numeric/int128.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
namespace {
using llvm::ArrayRef;
using llvm::SmallBitVector;
using llvm::SmallVector;
using mlir::AffineBinaryOpExpr;
using mlir::AffineConstantExpr;
using mlir::AffineDimExpr;
using mlir::AffineExpr;
using mlir::AffineExprKind;
using mlir::AffineMap;
using mlir::AffineSymbolExpr;
using mlir::getAffineConstantExpr;
using mlir::getAffineDimExpr;
using mlir::MLIRContext;
AffineExpr GetLhs(AffineExpr e) {
return mlir::cast<AffineBinaryOpExpr>(e).getLHS();
}
AffineExpr GetRhs(AffineExpr e) {
return mlir::cast<AffineBinaryOpExpr>(e).getRHS();
}
template <typename Fn>
AffineExpr MapSummands(AffineExpr expr, const Fn& fn) {
if (expr.getKind() == AffineExprKind::Add) {
auto add = mlir::cast<AffineBinaryOpExpr>(expr);
auto lhs = MapSummands(add.getLHS(), fn);
auto rhs = MapSummands(add.getRHS(), fn);
if (lhs == add.getLHS() && rhs == add.getRHS()) {
return add;
}
return lhs + rhs;
}
return fn(expr);
}
template <typename Fn>
void VisitSummands(mlir::AffineExpr expr, const Fn& visit) {
if (expr.getKind() == AffineExprKind::Add) {
VisitSummands(GetLhs(expr), visit);
VisitSummands(GetRhs(expr), visit);
} else {
visit(expr);
}
}
class AffineExprSimplifier {
public:
explicit AffineExprSimplifier(RangeEvaluator* range_evaluator)
: range_evaluator_(range_evaluator),
zero_(getAffineConstantExpr(0, range_evaluator_->GetMLIRContext())) {}
mlir::AffineMap Simplify(mlir::AffineMap affine_map);
mlir::AffineExpr Simplify(mlir::AffineExpr expr);
bool SimplifyConstraintExprs(IndexingMap& map);
bool SimplifyConstraintRanges(IndexingMap& map);
private:
std::optional<int64_t> GetConstantRhs(mlir::AffineExpr expr,
AffineExprKind kind);
std::pair<mlir::AffineExpr, int64_t> ExtractMultiplier(
mlir::AffineExpr expr) {
if (auto mul = GetConstantRhs(expr, AffineExprKind::Mul)) {
return {GetLhs(expr), *mul};
}
return {expr, 1};
}
mlir::AffineExpr RewriteMod(mlir::AffineBinaryOpExpr mod);
mlir::AffineExpr RewriteFloorDiv(mlir::AffineBinaryOpExpr div);
AffineExpr SimplifyModDiv(AffineExpr dividend, int64_t divisor);
AffineExpr SimplifyDivDiv(AffineExpr dividend, int64_t divisor);
AffineExpr SimplifySumDiv(AffineExpr dividend, int64_t divisor);
mlir::AffineExpr RewriteMul(mlir::AffineBinaryOpExpr mul);
mlir::AffineExpr RewriteSum(mlir::AffineBinaryOpExpr sum);
mlir::AffineExpr SimplifyOnce(mlir::AffineExpr expr);
mlir::AffineExpr SimplifyWithMlir(mlir::AffineExpr expr, int num_dims,
int num_symbols);
bool SimplifyConstraintRangeOnce(AffineExpr* expr, Interval* range);
bool SimplifyConstraintRange(AffineExpr* expr, Interval* range);
bool SimplifyAddConstraint(AffineExpr* add, Interval* range);
std::tuple<AffineExpr , int64_t , AffineExpr > SplitSumByGcd(
AffineExpr sum);
RangeEvaluator* range_evaluator_;
AffineExpr zero_;
};
AffineExpr AffineExprSimplifier::RewriteMod(AffineBinaryOpExpr mod) {
auto rhs = range_evaluator_->ComputeExpressionRange(mod.getRHS());
if (!rhs.IsPoint()) {
return mod;
}
int64_t m = rhs.lower;
if (m == 0) {
return zero_;
}
auto lhs_simplified = SimplifyOnce(mod.getLHS());
auto lhs = range_evaluator_->ComputeExpressionRange(lhs_simplified);
int64_t offset = llvm::divideFloorSigned(lhs.lower, m) * -m;
if (lhs.upper + offset < m) {
return lhs_simplified + offset;
}
if (auto mul = GetConstantRhs(lhs_simplified, AffineExprKind::Mul);
mul && *mul > 0 && (m % *mul == 0)) {
return (GetLhs(lhs_simplified) % (m / *mul)) * *mul;
}
int64_t extracted_constant = 0;
auto new_lhs = MapSummands(lhs_simplified, [&](AffineExpr expr) {
if (auto cst = mlir::dyn_cast<AffineConstantExpr>(expr)) {
extracted_constant += cst.getValue();
return zero_;
}
if (auto multiplier = GetConstantRhs(expr, AffineExprKind::Mul);
multiplier && (*multiplier % m == 0)) {
return zero_;
}
return expr;
});
if (extracted_constant % m != 0) {
new_lhs = new_lhs + (extracted_constant % m);
}
auto [multiplied, multiplier_gcd, not_multiplied] = SplitSumByGcd(new_lhs);
if (multiplier_gcd != 1 && m % multiplier_gcd == 0) {
auto not_multiplied_range =
range_evaluator_->ComputeExpressionRange(not_multiplied);
if (not_multiplied_range == Interval{0, 0}) {
int64_t multiplier_mod_gcd = std::gcd(multiplier_gcd, m);
if (multiplier_mod_gcd == multiplier_gcd) {
new_lhs = multiplied;
} else if (multiplier_mod_gcd > 1) {
new_lhs = MapSummands(
multiplied, [&, multiplier_gcd = multiplier_gcd](AffineExpr expr) {
return expr * (multiplier_gcd / multiplier_mod_gcd);
});
}
return (new_lhs % (m / multiplier_mod_gcd)) * multiplier_mod_gcd;
} else if (Interval{0, multiplier_gcd - 1}.Contains(not_multiplied_range)) {
new_lhs = multiplied * multiplier_gcd;
return new_lhs % mod.getRHS() + not_multiplied;
}
}
return new_lhs == mod.getLHS() ? mod : (new_lhs % m);
}
AffineExpr AffineExprSimplifier::SimplifyModDiv(AffineExpr dividend,
int64_t divisor) {
if (auto mod = GetConstantRhs(dividend, AffineExprKind::Mod);
mod && (*mod % divisor == 0)) {
return GetLhs(dividend).floorDiv(divisor) % (*mod / divisor);
}
return nullptr;
}
AffineExpr AffineExprSimplifier::SimplifyDivDiv(AffineExpr dividend,
int64_t divisor) {
if (auto inner_divisor = GetConstantRhs(dividend, AffineExprKind::FloorDiv)) {
return GetLhs(dividend).floorDiv(divisor * *inner_divisor);
}
return nullptr;
}
AffineExpr AffineExprSimplifier::SimplifySumDiv(AffineExpr dividend,
int64_t divisor) {
AffineExpr extracted = zero_;
auto new_dividend = MapSummands(dividend, [&](AffineExpr expr) {
if (auto multiplier = GetConstantRhs(expr, AffineExprKind::Mul)) {
if (*multiplier % divisor == 0) {
int64_t factor = *multiplier / divisor;
extracted = extracted + GetLhs(expr) * factor;
return zero_;
}
}
return expr;
});
auto [multiplied, multiplier_gcd, not_multiplied] =
SplitSumByGcd(new_dividend);
int64_t multiplier_divisor_gcd = std::gcd(divisor, multiplier_gcd);
auto no_multiplier_range =
range_evaluator_->ComputeExpressionRange(not_multiplied);
if (multiplier_divisor_gcd != 1 &&
Interval{0, multiplier_divisor_gcd - 1}.Contains(no_multiplier_range)) {
new_dividend = multiplied * (multiplier_gcd / multiplier_divisor_gcd);
divisor /= multiplier_divisor_gcd;
} else if (no_multiplier_range.IsPoint() && no_multiplier_range.lower != 0) {
multiplier_divisor_gcd =
std::gcd(no_multiplier_range.lower, multiplier_divisor_gcd);
if (multiplier_divisor_gcd != 1) {
new_dividend = multiplied * (multiplier_gcd / multiplier_divisor_gcd) +
(no_multiplier_range.lower / multiplier_divisor_gcd);
divisor /= multiplier_divisor_gcd;
}
}
std::optional<int64_t> inner_divisor = std::nullopt;
int num_inner_divisors = 0;
VisitSummands(new_dividend, [&](AffineExpr summand) {
if (auto divisor = GetConstantRhs(summand, AffineExprKind::FloorDiv)) {
inner_divisor = divisor;
++num_inner_divisors;
}
});
if (num_inner_divisors == 1) {
new_dividend = MapSummands(new_dividend, [&](AffineExpr summand) {
if (auto inner_divisor =
GetConstantRhs(summand, AffineExprKind::FloorDiv)) {
return GetLhs(summand);
}
return summand * *inner_divisor;
});
divisor *= *inner_divisor;
}
if (new_dividend != dividend) {
return new_dividend.floorDiv(divisor) + extracted;
}
return nullptr;
}
AffineExpr AffineExprSimplifier::RewriteFloorDiv(AffineBinaryOpExpr div) {
auto rhs_range = range_evaluator_->ComputeExpressionRange(div.getRHS());
auto lhs_simplified = SimplifyOnce(div.getLHS());
if (!rhs_range.IsPoint()) {
return lhs_simplified.floorDiv(SimplifyOnce(div.getRHS()));
}
int64_t d = rhs_range.lower;
if (d > 1) {
if (auto result = SimplifyModDiv(lhs_simplified, d)) {
return result;
}
if (auto result = SimplifyDivDiv(lhs_simplified, d)) {
return result;
}
if (auto result = SimplifySumDiv(lhs_simplified, d)) {
return result;
}
}
return lhs_simplified != div.getLHS() ? lhs_simplified.floorDiv(d) : div;
}
mlir::AffineExpr AffineExprSimplifier::RewriteMul(
mlir::AffineBinaryOpExpr mul) {
auto rhs_range = range_evaluator_->ComputeExpressionRange(mul.getRHS());
if (!rhs_range.IsPoint()) {
return mul;
}
int64_t multiplier = rhs_range.lower;
auto lhs = SimplifyOnce(mul.getLHS());
if (lhs.getKind() == AffineExprKind::Add) {
return MapSummands(
lhs, [&](AffineExpr summand) { return summand * rhs_range.lower; });
}
if (multiplier == 1) {
return lhs;
}
if (lhs == mul.getLHS()) {
return mul;
}
return lhs * multiplier;
}
std::optional<int64_t> AffineExprSimplifier::GetConstantRhs(
AffineExpr expr, AffineExprKind kind) {
if (expr.getKind() != kind) {
return std::nullopt;
}
auto bound = range_evaluator_->ComputeExpressionRange(
mlir::cast<AffineBinaryOpExpr>(expr).getRHS());
if (!bound.IsPoint()) {
return std::nullopt;
}
return bound.lower;
}
int CompareExprs(AffineExpr a, AffineExpr b) {
if ((b.getKind() == AffineExprKind::Constant) !=
(a.getKind() == AffineExprKind::Constant)) {
return a.getKind() == AffineExprKind::Constant ? 1 : -1;
}
if (a.getKind() < b.getKind()) {
return -1;
}
if (a.getKind() > b.getKind()) {
return 1;
}
assert(a.getKind() == b.getKind());
int64_t a_value = 0;
int64_t b_value = 0;
switch (a.getKind()) {
case AffineExprKind::Add:
case AffineExprKind::FloorDiv:
case AffineExprKind::CeilDiv:
case AffineExprKind::Mul:
case AffineExprKind::Mod: {
auto lhs = CompareExprs(GetLhs(a), GetLhs(b));
if (lhs != 0) {
return lhs;
}
return CompareExprs(GetRhs(a), GetRhs(b));
}
case AffineExprKind::Constant: {
a_value = mlir::cast<AffineConstantExpr>(a).getValue();
b_value = mlir::cast<AffineConstantExpr>(b).getValue();
break;
}
case AffineExprKind::SymbolId: {
a_value = mlir::cast<AffineSymbolExpr>(a).getPosition();
b_value = mlir::cast<AffineSymbolExpr>(b).getPosition();
break;
}
case AffineExprKind::DimId: {
a_value = mlir::cast<AffineDimExpr>(a).getPosition();
b_value = mlir::cast<AffineDimExpr>(b).getPosition();
break;
}
}
return a_value < b_value ? -1 : (a_value > b_value ? 1 : 0);
}
mlir::AffineExpr AffineExprSimplifier::RewriteSum(
mlir::AffineBinaryOpExpr sum) {
SmallVector<std::pair<AffineExpr, int64_t >> mods;
SmallVector<std::pair<AffineExpr, int64_t >> divs;
llvm::SmallDenseMap<AffineExpr, int64_t > summands;
VisitSummands(sum, [&](AffineExpr expr) {
AffineExpr simplified = SimplifyOnce(expr);
auto [lhs, multiplier] = ExtractMultiplier(simplified);
if (lhs.getKind() == AffineExprKind::Mod) {
mods.push_back({lhs, multiplier});
} else if (lhs.getKind() == AffineExprKind::FloorDiv) {
divs.push_back({lhs, multiplier});
} else {
summands[lhs] += multiplier;
}
});
if (mods.size() * divs.size() >= 100) {
std::string s;
llvm::raw_string_ostream ss(s);
ss << sum;
LOG(WARNING) << "Unexpectedly large number of mods and divs in " << s
<< ". Please open an issue on GitHub at "
<< "https:
}
if (!divs.empty()) {
for (int mod_i = 0; mod_i < mods.size(); ++mod_i) {
auto [mod, mod_mul] = mods[mod_i];
auto mod_c = GetConstantRhs(mod, AffineExprKind::Mod);
if (!mod_c) continue;
AffineExpr simplified_mod = Simplify(GetLhs(mod).floorDiv(*mod_c));
for (int div_i = 0; div_i < divs.size(); ++div_i) {
auto [div, div_mul] = divs[div_i];
if (simplified_mod != div) continue;
if ((div_mul % mod_mul) || (div_mul / mod_mul) != mod_c) continue;
summands[GetLhs(mod)] += mod_mul;
divs[div_i].first = nullptr;
mods[mod_i].first = nullptr;
break;
}
}
for (int div_i = 0; div_i < divs.size(); ++div_i) {
auto [div, div_mul] = divs[div_i];
if (!div || div_mul > 0) continue;
auto div_c = GetConstantRhs(div, AffineExprKind::FloorDiv);
if (!div_c || *div_c < 0 || (div_mul % *div_c)) continue;
int64_t b = div_mul / *div_c;
auto x = GetLhs(div);
VisitSummands(x, [&](AffineExpr summand) { summands[summand] += b; });
mods.push_back({x % *div_c, -b});
divs[div_i].first = nullptr;
}
}
for (auto [expr, mul] : mods) {
if (expr) {
summands[expr] += mul;
}
}
for (auto [expr, mul] : divs) {
if (expr) {
summands[expr] += mul;
}
}
SmallVector<AffineExpr, 4> expanded_summands;
for (auto [expr, mul] : summands) {
expanded_summands.push_back(expr * mul);
}
llvm::sort(expanded_summands,
[](AffineExpr a, AffineExpr b) { return CompareExprs(a, b) < 0; });
AffineExpr result = zero_;
for (auto expr : expanded_summands) {
result = result + expr;
}
return result;
}
AffineExpr AffineExprSimplifier::SimplifyOnce(AffineExpr expr) {
if (expr.getKind() == AffineExprKind::Constant) {
return expr;
}
auto bounds = range_evaluator_->ComputeExpressionRange(expr);
if (bounds.IsPoint()) {
return getAffineConstantExpr(bounds.lower,
range_evaluator_->GetMLIRContext());
}
switch (expr.getKind()) {
case AffineExprKind::Mul:
return RewriteMul(mlir::cast<AffineBinaryOpExpr>(expr));
case AffineExprKind::Add:
return RewriteSum(mlir::cast<AffineBinaryOpExpr>(expr));
case AffineExprKind::Mod:
return RewriteMod(mlir::cast<AffineBinaryOpExpr>(expr));
case AffineExprKind::FloorDiv:
return RewriteFloorDiv(mlir::cast<AffineBinaryOpExpr>(expr));
default:
return expr;
}
}
AffineExpr AffineExprSimplifier::Simplify(AffineExpr expr) {
while (true) {
auto simplified = SimplifyOnce(expr);
if (simplified == expr) {
return expr;
}
expr = simplified;
}
}
AffineMap AffineExprSimplifier::Simplify(AffineMap affine_map) {
SmallVector<AffineExpr, 4> results;
results.reserve(affine_map.getNumResults());
for (AffineExpr expr : affine_map.getResults()) {
results.push_back(Simplify(expr));
}
return AffineMap::get(affine_map.getNumDims(), affine_map.getNumSymbols(),
results, affine_map.getContext());
}
bool AffineExprSimplifier::SimplifyAddConstraint(AffineExpr* add,
Interval* range) {
if (add->getKind() != AffineExprKind::Add) {
return false;
}
auto rhs_range = range_evaluator_->ComputeExpressionRange(GetRhs(*add));
if (rhs_range.IsPoint()) {
*add = GetLhs(*add);
range->lower -= rhs_range.lower;
range->upper -= rhs_range.lower;
return true;
}
if (range->lower != 0) {
return false;
}
auto [multiplied, multiplier_gcd, not_multiplied] = SplitSumByGcd(*add);
if (multiplier_gcd == 1) {
return false;
}
Interval difference_range =
Interval{range->upper, range->upper} -
range_evaluator_->ComputeExpressionRange(not_multiplied);
if (!difference_range.FloorDiv(multiplier_gcd).IsPoint()) {
return false;
}
*add = multiplied * multiplier_gcd;
return true;
}
bool AffineExprSimplifier::SimplifyConstraintRangeOnce(AffineExpr* expr,
Interval* range) {
switch (expr->getKind()) {
case AffineExprKind::DimId:
case AffineExprKind::SymbolId:
case AffineExprKind::Constant: {
return false;
}
case AffineExprKind::Add:
return SimplifyAddConstraint(expr, range);
default: {
auto binary_op = mlir::cast<AffineBinaryOpExpr>(*expr);
CHECK(binary_op);
auto lhs = binary_op.getLHS();
auto rhs_range = range_evaluator_->ComputeExpressionRange(GetRhs(*expr));
if (!rhs_range.IsPoint()) {
return false;
}
int64_t rhs_cst = rhs_range.lower;
switch (expr->getKind()) {
case AffineExprKind::Mul: {
int64_t factor = rhs_cst;
if (factor < 0) {
factor *= -1;
range->lower *= -1;
range->upper *= -1;
std::swap(range->lower, range->upper);
}
range->lower = llvm::divideCeilSigned(range->lower, factor);
range->upper = llvm::divideFloorSigned(range->upper, factor);
*expr = lhs;
return true;
}
case AffineExprKind::FloorDiv: {
int64_t divisor = rhs_cst;
if (divisor < 0) {
divisor *= -1;
range->lower *= -1;
range->upper *= -1;
std::swap(range->lower, range->upper);
}
range->lower *= divisor;
range->upper = (range->upper + 1) * divisor - 1;
*expr = lhs;
return true;
}
default: {
return false;
}
}
}
}
}
bool AffineExprSimplifier::SimplifyConstraintRange(AffineExpr* expr,
Interval* range) {
bool is_simplified = false;
while (SimplifyConstraintRangeOnce(expr, range)) {
is_simplified = true;
}
return is_simplified;
}
SmallVector<AffineExpr, 4> GetComposedSymbolsPermutationToCorrectOrder(
const IndexingMap& first, const IndexingMap& second) {
if (second.GetRTVarsCount() == 0) {
return {};
}
SmallVector<AffineExpr, 4> symbol_replacements;
MLIRContext* mlir_context = first.GetMLIRContext();
for (int id = 0; id < second.GetRangeVarsCount(); ++id) {
symbol_replacements.push_back(getAffineSymbolExpr(id, mlir_context));
}
int64_t first_range_vars_count = first.GetRangeVarsCount();
int64_t second_range_vars_count = second.GetRangeVarsCount();
int64_t first_rt_vars_count = first.GetRTVarsCount();
int64_t second_rt_vars_count = second.GetRTVarsCount();
int64_t rt_vars_second_start =
first_range_vars_count + second_range_vars_count;
for (int64_t id = 0; id < second_rt_vars_count; ++id) {
symbol_replacements.push_back(
getAffineSymbolExpr(rt_vars_second_start++, mlir_context));
}
int64_t range_vars_first_start = second_range_vars_count;
for (int64_t id = 0; id < first_range_vars_count; ++id) {
symbol_replacements.push_back(
getAffineSymbolExpr(range_vars_first_start++, mlir_context));
}
int64_t rt_vars_first_start =
first_range_vars_count + second_range_vars_count + second_rt_vars_count;
for (int64_t id = 0; id < first_rt_vars_count; ++id) {
symbol_replacements.push_back(
getAffineSymbolExpr(rt_vars_first_start++, mlir_context));
}
return symbol_replacements;
}
SmallVector<AffineExpr, 4> MapSymbolsToComposedSymbolsList(
const IndexingMap& map, const IndexingMap& composed) {
SmallVector<AffineExpr, 4> symbol_replacements;
MLIRContext* mlir_context = map.GetMLIRContext();
int64_t range_vars_start =
composed.GetRangeVarsCount() - map.GetRangeVarsCount();
for (int64_t id = 0; id < map.GetRangeVarsCount(); ++id) {
symbol_replacements.push_back(
getAffineSymbolExpr(range_vars_start++, mlir_context));
}
int64_t rt_vars_start = composed.GetSymbolCount() - map.GetRTVarsCount();
for (int64_t id = 0; id < map.GetRTVarsCount(); ++id) {
symbol_replacements.push_back(
getAffineSymbolExpr(rt_vars_start++, mlir_context));
}
return symbol_replacements;
}
}
static constexpr std::string_view kVarKindDefault = "default";
static constexpr std::string_view kVarKindThreadX = "th_x";
static constexpr std::string_view kVarKindThreadY = "th_y";
static constexpr std::string_view kVarKindThreadZ = "th_z";
static constexpr std::string_view kVarKindBlockX = "bl_x";
static constexpr std::string_view kVarKindBlockY = "bl_y";
static constexpr std::string_view kVarKindBlockZ = "bl_z";
static constexpr std::string_view kVarKindWarp = "warp";
static constexpr std::string_view kVarKindWarpThread = "th_w";
std::string_view ToVariableName(VariableKind var_kind) {
switch (var_kind) {
case VariableKind::kDefault:
return kVarKindDefault;
case VariableKind::kThreadX:
return kVarKindThreadX;
case VariableKind::kThreadY:
return kVarKindThreadY;
case VariableKind::kThreadZ:
return kVarKindThreadZ;
case VariableKind::kBlockX:
return kVarKindBlockX;
case VariableKind::kBlockY:
return kVarKindBlockY;
case VariableKind::kBlockZ:
return kVarKindBlockZ;
case VariableKind::kWarp:
return kVarKindWarp;
case VariableKind::kWarpThread:
return kVarKindWarpThread;
}
llvm_unreachable("Unknown VariableType");
}
VariableKind ToVariableType(std::string_view var_name) {
if (var_name == kVarKindThreadX) return VariableKind::kThreadX;
if (var_name == kVarKindThreadY) return VariableKind::kThreadY;
if (var_name == kVarKindThreadZ) return VariableKind::kThreadZ;
if (var_name == kVarKindBlockX) return VariableKind::kBlockX;
if (var_name == kVarKindBlockY) return VariableKind::kBlockY;
if (var_name == kVarKindBlockZ) return VariableKind::kBlockZ;
if (var_name == kVarKindWarp) return VariableKind::kWarp;
if (var_name == kVarKindWarpThread) return VariableKind::kWarpThread;
return VariableKind::kDefault;
}
std::ostream& operator<<(std::ostream& out, VariableKind var_type) {
out << ToVariableName(var_type);
return out;
}
std::ostream& operator<<(std::ostream& out, const Interval& interval) {
out << absl::StrFormat("[%d, %d]", interval.lower, interval.upper);
return out;
}
std::string Interval::ToString() const {
std::stringstream ss;
ss << *this;
return ss.str();
}
inline llvm::raw_ostream& operator<<(llvm::raw_ostream& os,
const Interval& interval) {
os << absl::StrFormat("[%d, %d]", interval.lower, interval.upper);
return os;
}
int64_t Interval::GetLoopTripCount() const {
if (!IsFeasible()) {
return 0;
}
DCHECK((static_cast<absl::int128>(upper) - lower + 1) <=
std::numeric_limits<int64_t>::max());
return upper - lower + 1;
}
Interval::ComparisonResult Interval::Gt(const Interval& b) const {
if (!IsFeasible() || !b.IsFeasible()) {
return {std::nullopt};
}
if (lower > b.upper) {
return {true};
}
if (upper <= b.lower) {
return {false};
}
return {std::nullopt};
}
Interval::ComparisonResult Interval::Eq(const Interval& b) const {
Interval intersection = Intersect(b);
if (!intersection.IsFeasible()) return {false};
if (intersection.IsPoint() && IsPoint() && b.IsPoint()) {
return {true};
}
return {std::nullopt};
}
Interval Interval::operator+(const Interval& rhs) const {
int64_t out_lower;
int64_t out_upper;
constexpr int64_t kMin = std::numeric_limits<int64_t>::min();
constexpr int64_t kMax = std::numeric_limits<int64_t>::max();
bool lower_overflow = llvm::AddOverflow(lower, rhs.lower, out_lower);
bool upper_overflow = llvm::AddOverflow(upper, rhs.upper, out_upper);
if (lower_overflow || lower == kMin || rhs.lower == kMin) {
if (lower < 0 || rhs.lower < 0) {
out_lower = kMin;
} else {
out_lower = kMax;
out_upper = kMax;
}
}
if (upper_overflow || upper == kMax || rhs.upper == kMax) {
if (upper > 0 || rhs.upper > 0) {
out_upper = kMax;
} else {
out_upper = kMin;
out_lower = kMin;
}
}
return {out_lower, out_upper};
}
Interval Interval::operator*(const Interval& rhs) const {
constexpr int64_t kMin = std::numeric_limits<int64_t>::min();
constexpr int64_t kMax = std::numeric_limits<int64_t>::max();
auto mul = [&](int64_t p) {
int64_t l = lower;
int64_t u = upper;
if (p < 0) {
std::swap(l, u);
}
int64_t out_lower;
int64_t out_upper;
if (llvm::MulOverflow(l, p, out_lower) ||
(p == -1 && l == kMax)) {
out_lower = kMin;
}
if (llvm::MulOverflow(u, p, out_upper)) {
out_upper = kMax;
}
return Interval{out_lower, out_upper};
};
return mul(rhs.lower).Union(mul(rhs.upper));
}
Interval Interval::operator-() const {
int64_t ub = lower == std::numeric_limits<int64_t>::min()
? std::numeric_limits<int64_t>::max()
: -lower;
int64_t lb = upper == std::numeric_limits<int64_t>::max()
? std::numeric_limits<int64_t>::min()
: -upper;
return Interval{lb, ub};
}
Interval Interval::FloorDiv(int64_t rhs) const {
auto saturate_div = [](int64_t lhs, int64_t rhs) {
constexpr int64_t kMin = std::numeric_limits<int64_t>::min();
constexpr int64_t kMax = std::numeric_limits<int64_t>::max();
if (lhs == kMin) {
return rhs > 0 ? kMin : kMax;
}
if (lhs == kMax) {
return rhs > 0 ? kMax : kMin;
}
return llvm::divideFloorSigned(lhs, rhs);
};
int64_t a = saturate_div(lower, rhs);
int64_t b = saturate_div(upper, rhs);
return {std::min(a, b), std::max(a, b)};
}
bool operator==(const IndexingMap::Variable& lhs,
const IndexingMap::Variable& rhs) {
return lhs.bounds == rhs.bounds;
}
std::vector<IndexingMap::Variable> DimVarsFromTensorSizes(
absl::Span<const int64_t> tensor_sizes) {
std::vector<IndexingMap::Variable> ranges;
ranges.reserve(tensor_sizes.size());
for (int64_t size : tensor_sizes) {
ranges.push_back(IndexingMap::Variable{0, size - 1});
}
return ranges;
}
std::vector<IndexingMap::Variable> DimVarsFromGPUGrid(
absl::Span<const int64_t> grid_sizes) {
CHECK_EQ(grid_sizes.size(), 6)
<< "Grid must be 6-dimensional (th_x, th_y, th_z, bl_x, bl_y, bl_z)";
return {
IndexingMap::Variable{0, grid_sizes[0] - 1, kVarKindThreadX},
IndexingMap::Variable{0, grid_sizes[1] - 1, kVarKindThreadY},
IndexingMap::Variable{0, grid_sizes[2] - 1, kVarKindThreadZ},
IndexingMap::Variable{0, grid_sizes[3] - 1, kVarKindBlockX},
IndexingMap::Variable{0, grid_sizes[4] - 1, kVarKindBlockY},
IndexingMap::Variable{0, grid_sizes[5] - 1, kVarKindBlockZ},
};
}
std::vector<IndexingMap::Variable> RangeVarsFromTensorSizes(
absl::Span<const int64_t> tensor_sizes) {
std::vector<IndexingMap::Variable> ranges;
ranges.reserve(tensor_sizes.size());
for (int64_t size : tensor_sizes) {
ranges.push_back({IndexingMap::Variable{0, size - 1}});
}
return ranges;
}
IndexingMap::IndexingMap(
AffineMap affine_map, std::vector<IndexingMap::Variable> dimensions,
std::vector<IndexingMap::Variable> range_vars,
std::vector<IndexingMap::Variable> rt_vars,
absl::Span<std::pair<AffineExpr, Interval> const> constraints)
: affine_map_(affine_map),
dim_vars_(std::move(dimensions)),
range_vars_(std::move(range_vars)),
rt_vars_(std::move(rt_vars)) {
if (!VerifyVariableIntervals()) {
ResetToKnownEmpty();
return;
}
for (const auto& [expr, range] : constraints) {
AddConstraint(expr, range);
}
}
IndexingMap::IndexingMap(
AffineMap affine_map, std::vector<IndexingMap::Variable> dimensions,
std::vector<IndexingMap::Variable> range_vars,
std::vector<IndexingMap::Variable> rt_vars,
const llvm::DenseMap<AffineExpr, Interval>& constraints)
: affine_map_(affine_map),
dim_vars_(std::move(dimensions)),
range_vars_(std::move(range_vars)),
rt_vars_(std::move(rt_vars)),
constraints_(constraints) {
if (!VerifyVariableIntervals() || !VerifyConstraintIntervals()) {
ResetToKnownEmpty();
return;
}
}
IndexingMap IndexingMap::FromTensorSizes(
AffineMap affine_map, absl::Span<const int64_t> dim_upper_bounds,
absl::Span<const int64_t> symbol_upper_bounds) {
return IndexingMap{affine_map, DimVarsFromTensorSizes(dim_upper_bounds),
RangeVarsFromTensorSizes(symbol_upper_bounds),
{}};
}
RangeEvaluator IndexingMap::GetRangeEvaluator() const {
return RangeEvaluator(*this, GetMLIRContext());
}
const Interval& IndexingMap::GetDimensionBound(int64_t dim_id) const {
return dim_vars_[dim_id].bounds;
}
Interval& IndexingMap::GetMutableDimensionBound(int64_t dim_id) {
return dim_vars_[dim_id].bounds;
}
std::vector<Interval> IndexingMap::GetDimensionBounds() const {
std::vector<Interval> bounds;
bounds.reserve(affine_map_.getNumDims());
for (const auto& dim : dim_vars_) {
bounds.push_back(dim.bounds);
}
return bounds;
}
const Interval& IndexingMap::GetSymbolBound(int64_t symbol_id) const {
int64_t range_var_count = GetRangeVarsCount();
return symbol_id < range_var_count
? range_vars_[symbol_id].bounds
: rt_vars_[symbol_id - range_var_count].bounds;
}
Interval& IndexingMap::GetMutableSymbolBound(int64_t symbol_id) {
int64_t range_var_count = GetRangeVarsCount();
return symbol_id < range_var_count
? range_vars_[symbol_id].bounds
: rt_vars_[symbol_id - range_var_count].bounds;
}
std::vector<Interval> IndexingMap::GetSymbolBounds() const {
std::vector<Interval> bounds;
bounds.reserve(affine_map_.getNumSymbols());
for (const auto& range_var : range_vars_) {
bounds.push_back(range_var.bounds);
}
for (const auto& rt_var : rt_vars_) {
bounds.push_back(rt_var.bounds);
}
return bounds;
}
void IndexingMap::AddConstraint(mlir::AffineExpr expr, Interval range) {
if (IsKnownEmpty()) {
return;
}
if (!range.IsFeasible()) {
ResetToKnownEmpty();
return;
}
if (auto dim_expr = mlir::dyn_cast<AffineDimExpr>(expr)) {
Interval& current_range = GetMutableDimensionBound(dim_expr.getPosition());
current_range = current_range.Intersect(range);
if (!current_range.IsFeasible()) ResetToKnownEmpty();
return;
}
if (auto symbol_expr = mlir::dyn_cast<AffineSymbolExpr>(expr)) {
Interval& current_range = GetMutableSymbolBound(symbol_expr.getPosition());
current_range = current_range.Intersect(range);
if (!current_range.IsFeasible()) ResetToKnownEmpty();
return;
}
if (auto constant_expr = mlir::dyn_cast<AffineConstantExpr>(expr)) {
if (!range.Contains(constant_expr.getValue())) {
ResetToKnownEmpty();
}
return;
}
auto [it, inserted] = constraints_.insert({expr, range});
if (!inserted) {
it->second = it->second.Intersect(range);
if (!it->second.IsFeasible()) {
ResetToKnownEmpty();
}
}
}
void IndexingMap::EraseConstraint(mlir::AffineExpr expr) {
constraints_.erase(expr);
}
bool IndexingMap::ConstraintsSatisfied(
ArrayRef<AffineExpr> dim_const_exprs,
ArrayRef<AffineExpr> symbol_const_exprs) const {
CHECK(dim_const_exprs.size() == affine_map_.getNumDims());
CHECK(symbol_const_exprs.size() == affine_map_.getNumSymbols());
if (IsKnownEmpty()) {
return false;
}
for (auto& [expr, range] : constraints_) {
int64_t expr_value =
mlir::cast<AffineConstantExpr>(
expr.replaceDimsAndSymbols(dim_const_exprs, symbol_const_exprs))
.getValue();
if (expr_value < range.lower || expr_value > range.upper) {
return false;
}
}
return true;
}
SmallVector<int64_t, 4> IndexingMap::Evaluate(
ArrayRef<AffineExpr> dim_const_exprs,
ArrayRef<AffineExpr> symbol_const_exprs) const {
CHECK(dim_const_exprs.size() == GetDimensionCount());
CHECK(symbol_const_exprs.size() == GetSymbolCount());
AffineMap eval = affine_map_.replaceDimsAndSymbols(
dim_const_exprs, symbol_const_exprs, dim_const_exprs.size(),
symbol_const_exprs.size());
return eval.getConstantResults();
}
bool IndexingMap::IsSymbolConstrained(int64_t symbol_id) const {
for (const auto& [expr, _] : constraints_) {
bool result = false;
expr.walk([&](mlir::AffineExpr leaf) {
auto sym = mlir::dyn_cast<mlir::AffineSymbolExpr>(leaf);
if (sym && sym.getPosition() == symbol_id) {
result = true;
}
});
if (result) return true;
}
return false;
}
RangeEvaluator::RangeEvaluator(const IndexingMap& indexing_map,
MLIRContext* mlir_context, bool use_constraints)
: mlir_context_(mlir_context),
indexing_map_(indexing_map),
use_constraints_(use_constraints) {}
bool RangeEvaluator::IsAlwaysPositiveOrZero(mlir::AffineExpr expr) {
return ComputeExpressionRange(expr).lower >= 0;
}
bool RangeEvaluator::IsAlwaysNegativeOrZero(mlir::AffineExpr expr) {
return ComputeExpressionRange(expr).upper <= 0;
}
Interval RangeEvaluator::ComputeExpressionRange(AffineExpr expr) {
switch (expr.getKind()) {
case AffineExprKind::Constant: {
int64_t value = mlir::cast<AffineConstantExpr>(expr).getValue();
return Interval{value, value};
}
case AffineExprKind::DimId:
return indexing_map_.GetDimensionBound(
mlir::cast<AffineDimExpr>(expr).getPosition());
case AffineExprKind::SymbolId:
return indexing_map_.GetSymbolBound(
mlir::cast<AffineSymbolExpr>(expr).getPosition());
default:
break;
}
auto binary_op = mlir::dyn_cast<AffineBinaryOpExpr>(expr);
CHECK(binary_op);
auto lhs = ComputeExpressionRange(binary_op.getLHS());
auto rhs = ComputeExpressionRange(binary_op.getRHS());
Interval result;
switch (expr.getKind()) {
case AffineExprKind::Add:
result = lhs + rhs;
break;
case AffineExprKind::Mul:
result = lhs * rhs;
break;
case AffineExprKind::Mod: {
CHECK(rhs.IsPoint()) << "RHS of mod must be a constant";
int64_t m = rhs.lower;
if (0 <= lhs.lower && lhs.upper < m) {
result = lhs;
} else {
result = {0, m - 1};
}
break;
}
case AffineExprKind::FloorDiv: {
CHECK(rhs.IsPoint()) << "RHS of floor_div must be a constant";
int64_t d = rhs.lower;
int64_t a = llvm::divideFloorSigned(lhs.lower, d);
int64_t b = llvm::divideFloorSigned(lhs.upper, d);
result = {std::min(a, b), std::max(a, b)};
break;
}
default:
LOG(FATAL) << "Unsupported expression";
}
if (use_constraints_) {
auto constraint = indexing_map_.GetConstraints().find(expr);
if (constraint != indexing_map_.GetConstraints().end()) {
return result.Intersect(constraint->second);
}
}
return result;
}
MLIRContext* IndexingMap::GetMLIRContext() const {
return IsUndefined() ? nullptr : affine_map_.getContext();
}
bool operator==(const IndexingMap& lhs, const IndexingMap& rhs) {
return lhs.GetAffineMap() == rhs.GetAffineMap() &&
lhs.GetDimVars() == rhs.GetDimVars() &&
lhs.GetRangeVars() == rhs.GetRangeVars() &&
lhs.GetRTVars() == rhs.GetRTVars() &&
lhs.GetConstraints() == rhs.GetConstraints();
}
IndexingMap operator*(const IndexingMap& lhs, const IndexingMap& rhs) {
return ComposeIndexingMaps(lhs, rhs);
}
bool IndexingMap::Verify(std::ostream& out) const {
if (IsUndefined()) {
return true;
}
if (affine_map_.getNumDims() != dim_vars_.size()) {
out << "dim size must match the number of dimensions in "
"the affine map";
return false;
}
if (affine_map_.getNumSymbols() != range_vars_.size() + rt_vars_.size()) {
out << "range vars size + rt var size must match the number of "
"symbols in the affine map";
return false;
}
return true;
}
bool IndexingMap::Simplify() {
if (IsUndefined() || IsKnownEmpty()) return false;
bool constraints_were_simplified = false;
RangeEvaluator constraint_range_evaluator(*this, GetMLIRContext(),
false);
AffineExprSimplifier constraint_simplifier(&constraint_range_evaluator);
while (true) {
bool did_simplify = false;
did_simplify |= constraint_simplifier.SimplifyConstraintExprs(*this);
did_simplify |= constraint_simplifier.SimplifyConstraintRanges(*this);
if (!did_simplify) {
break;
}
constraints_were_simplified = true;
}
constraints_were_simplified |= MergeModConstraints();
RangeEvaluator range_evaluator(*this, GetMLIRContext(),
true);
AffineMap simplified_affine_map =
AffineExprSimplifier(&range_evaluator).Simplify(affine_map_);
bool affine_map_was_simplified = simplified_affine_map != affine_map_;
if (affine_map_was_simplified) {
affine_map_ = simplified_affine_map;
}
return affine_map_was_simplified || constraints_were_simplified;
}
bool AffineExprSimplifier::SimplifyConstraintExprs(IndexingMap& map) {
std::vector<AffineExpr> to_remove;
std::vector<std::pair<AffineExpr, Interval>> to_add;
for (const auto& [expr, range] : map.GetConstraints()) {
AffineExpr simplified = Simplify(expr);
Interval evaluated_range =
range_evaluator_->ComputeExpressionRange(simplified);
if (evaluated_range.upper <= range.upper &&
evaluated_range.lower >= range.lower) {
to_remove.push_back(expr);
continue;
}
if (simplified == expr) continue;
to_add.push_back({simplified, range});
to_remove.push_back(expr);
}
for (const auto& expr : to_remove) {
map.EraseConstraint(expr);
}
for (const auto& [expr, range] : to_add) {
map.AddConstraint(expr, range);
}
return !to_add.empty();
}
bool AffineExprSimplifier::SimplifyConstraintRanges(IndexingMap& map) {
std::vector<AffineExpr> to_remove;
std::vector<std::pair<AffineExpr, Interval>> to_add;
for (const auto& [expr, range] : map.GetConstraints()) {
AffineExpr simplified_expr = expr;
Interval simplified_range = range;
if (SimplifyConstraintRange(&simplified_expr, &simplified_range)) {
to_add.push_back({simplified_expr, simplified_range});
to_remove.push_back(expr);
}
}
for (const auto& expr : to_remove) {
map.EraseConstraint(expr);
}
for (const auto& [expr, range] : to_add) {
map.AddConstraint(expr, range);
}
return !to_add.empty();
}
std::tuple<AffineExpr, int64_t, AffineExpr> AffineExprSimplifier::SplitSumByGcd(
AffineExpr sum) {
std::optional<int64_t> multiplier_gcd = std::nullopt;
AffineExpr no_multiplier = zero_;
VisitSummands(sum, [&](AffineExpr expr) {
if (auto multiplier = GetConstantRhs(expr, AffineExprKind::Mul)) {
if (multiplier_gcd.has_value()) {
multiplier_gcd = std::gcd(*multiplier_gcd, *multiplier);
} else {
multiplier_gcd = *multiplier;
}
}
});
if (multiplier_gcd.value_or(1) == 1) {
return {zero_, 1, sum};
}
auto scaled = MapSummands(sum, [&](AffineExpr expr) {
if (auto multiplier = GetConstantRhs(expr, AffineExprKind::Mul)) {
return GetLhs(expr) * (*multiplier / *multiplier_gcd);
}
no_multiplier = no_multiplier + expr;
return zero_;
});
return {scaled, *multiplier_gcd, no_multiplier};
}
namespace {
struct UsedParameters {
llvm::DenseSet<int64_t> dimension_ids;
llvm::DenseSet<int64_t> symbol_ids;
};
void GetUsedParametersImpl(const AffineExpr& expr,
UsedParameters& used_parameters) {
if (auto dim_expr = mlir::dyn_cast<AffineDimExpr>(expr)) {
used_parameters.dimension_ids.insert(dim_expr.getPosition());
return;
}
if (auto symbol_expr = mlir::dyn_cast<AffineSymbolExpr>(expr)) {
used_parameters.symbol_ids.insert(symbol_expr.getPosition());
return;
}
if (auto binary_expr = mlir::dyn_cast<AffineBinaryOpExpr>(expr)) {
GetUsedParametersImpl(binary_expr.getLHS(), used_parameters);
GetUsedParametersImpl(binary_expr.getRHS(), used_parameters);
}
}
UsedParameters GetUsedParameters(const mlir::AffineExpr& expr) {
UsedParameters used_parameters;
GetUsedParametersImpl(expr, used_parameters);
return used_parameters;
}
bool IsFunctionOfUnusedVarsOnly(const UsedParameters& used_parameters,
const SmallBitVector& unused_dims_bit_vector,
const SmallBitVector& unused_symbols_bit_vector,
bool removing_dims, bool removing_symbols) {
if (!used_parameters.dimension_ids.empty() && !removing_dims) {
return false;
}
if (!used_parameters.symbol_ids.empty() && !removing_symbols) {
return false;
}
for (int64_t dim_id : used_parameters.dimension_ids) {
if (!unused_dims_bit_vector[dim_id]) return false;
}
for (int64_t symbol_id : used_parameters.symbol_ids) {
if (!unused_symbols_bit_vector[symbol_id]) return false;
}
return true;
}
struct UnusedVariables {
SmallBitVector unused_dims;
SmallBitVector unused_symbols;
SmallVector<AffineExpr> constraints_with_unused_vars_only;
};
UnusedVariables DetectUnusedVariables(const IndexingMap& indexing_map,
bool removing_dims,
bool removing_symbols) {
AffineMap affine_map = indexing_map.GetAffineMap();
UnusedVariables unused_vars;
unused_vars.unused_dims = mlir::getUnusedDimsBitVector({affine_map});
unused_vars.unused_symbols = mlir::getUnusedSymbolsBitVector({affine_map});
SmallVector<std::pair<AffineExpr, UsedParameters>, 2>
unused_constraints_candidates;
for (const auto& [expr, range] : indexing_map.GetConstraints()) {
UsedParameters used_parameters = GetUsedParameters(expr);
if (IsFunctionOfUnusedVarsOnly(used_parameters, unused_vars.unused_dims,
unused_vars.unused_symbols, removing_dims,
removing_symbols)) {
unused_constraints_candidates.push_back({expr, used_parameters});
continue;
}
for (int64_t dim_id : used_parameters.dimension_ids) {
unused_vars.unused_dims[dim_id] = false;
}
for (int64_t symbol_id : used_parameters.symbol_ids) {
unused_vars.unused_symbols[symbol_id] = false;
}
}
for (const auto& [expr, used_parameters] : unused_constraints_candidates) {
if (IsFunctionOfUnusedVarsOnly(used_parameters, unused_vars.unused_dims,
unused_vars.unused_symbols, removing_dims,
removing_symbols)) {
unused_vars.constraints_with_unused_vars_only.push_back(expr);
}
}
return unused_vars;
}
SmallBitVector ConcatenateBitVectors(const SmallBitVector& lhs,
const SmallBitVector& rhs) {
SmallBitVector concat(lhs.size() + rhs.size(), false);
int id = 0;
for (int i = 0; i < lhs.size(); ++i, ++id) {
concat[id] = lhs[i];
}
for (int i = 0; i < rhs.size(); ++i, ++id) {
concat[id] = rhs[i];
}
return concat;
}
}
bool IndexingMap::CompressVars(const llvm::SmallBitVector& unused_dims,
const llvm::SmallBitVector& unused_symbols) {
MLIRContext* mlir_context = GetMLIRContext();
bool num_dims_changed = unused_dims.count() > 0;
bool num_symbols_changed = unused_symbols.count() > 0;
if (!num_dims_changed && !num_symbols_changed) return false;
unsigned num_dims_before = GetDimensionCount();
unsigned num_symbols_before = GetSymbolCount();
SmallVector<AffineExpr, 2> dim_replacements;
if (num_dims_changed) {
affine_map_ = mlir::compressDims(affine_map_, unused_dims);
std::vector<IndexingMap::Variable> compressed_dim_vars;
dim_replacements = SmallVector<AffineExpr, 2>(
num_dims_before, getAffineConstantExpr(0, mlir_context));
int64_t used_dims_count = 0;
for (int i = 0; i < unused_dims.size(); ++i) {
if (!unused_dims[i]) {
compressed_dim_vars.push_back(dim_vars_[i]);
dim_replacements[i] = getAffineDimExpr(used_dims_count++, mlir_context);
}
}
dim_vars_ = std::move(compressed_dim_vars);
}
SmallVector<AffineExpr, 2> symbol_replacements;
if (num_symbols_changed) {
affine_map_ = mlir::compressSymbols(affine_map_, unused_symbols);
symbol_replacements = SmallVector<AffineExpr, 2>(
num_symbols_before, getAffineConstantExpr(0, mlir_context));
std::vector<IndexingMap::Variable> compressed_range_vars;
std::vector<IndexingMap::Variable> compressed_rt_vars;
MLIRContext* mlir_context = GetMLIRContext();
int64_t used_symbols_count = 0;
auto range_vars_count = range_vars_.size();
for (int i = 0; i < unused_symbols.size(); ++i) {
if (!unused_symbols[i]) {
if (i < range_vars_count) {
compressed_range_vars.push_back(range_vars_[i]);
} else {
compressed_rt_vars.push_back(rt_vars_[i - range_vars_count]);
}
symbol_replacements[i] =
getAffineSymbolExpr(used_symbols_count++, mlir_context);
}
}
range_vars_ = std::move(compressed_range_vars);
rt_vars_ = std::move(compressed_rt_vars);
}
std::vector<AffineExpr> to_remove;
std::vector<std::pair<AffineExpr, Interval>> to_add;
for (const auto& [expr, range] : constraints_) {
auto updated_expr =
expr.replaceDimsAndSymbols(dim_replacements, symbol_replacements);
if (updated_expr == expr) continue;
to_add.push_back({updated_expr, range});
to_remove.push_back(expr);
}
for (const auto& expr : to_remove) {
constraints_.erase(expr);
}
for (const auto& [expr, range] : to_add) {
AddConstraint(expr, range);
}
return true;
}
SmallBitVector IndexingMap::RemoveUnusedSymbols() {
if (IsUndefined()) return {};
if (GetSymbolCount() == 0) return {};
UnusedVariables unused_vars = DetectUnusedVariables(
*this, false, true);
for (AffineExpr expr : unused_vars.constraints_with_unused_vars_only) {
constraints_.erase(expr);
}
if (!CompressVars({}, unused_vars.unused_symbols)) {
return {};
}
return std::move(unused_vars.unused_symbols);
}
void IndexingMap::ResetToKnownEmpty() {
auto zero = getAffineConstantExpr(0, GetMLIRContext());
affine_map_ = AffineMap::get(
affine_map_.getNumDims(), affine_map_.getNumSymbols(),
llvm::SmallVector<AffineExpr>(affine_map_.getNumResults(), zero),
GetMLIRContext());
for (auto& dim_var : dim_vars_) {
dim_var.bounds = Interval{0, -1};
}
for (auto& range_var : range_vars_) {
range_var.bounds = Interval{0, -1};
}
constraints_.clear();
is_known_empty_ = true;
}
bool IndexingMap::VerifyVariableIntervals() {
return llvm::all_of(dim_vars_,
[](const IndexingMap::Variable& dim_var) {
return dim_var.bounds.IsFeasible();
}) &&
llvm::all_of(range_vars_,
[](const IndexingMap::Variable& range_var) {
return range_var.bounds.IsFeasible();
}) &&
llvm::all_of(rt_vars_, [](const IndexingMap::Variable& rt_var) {
return rt_var.bounds.IsFeasible();
});
}
bool IndexingMap::VerifyConstraintIntervals() {
return llvm::all_of(constraints_, [](const auto& constraint) {
return constraint.second.IsFeasible();
});
}
SmallBitVector IndexingMap::RemoveUnusedVars() {
if (IsUndefined()) return {};
UnusedVariables unused_vars = DetectUnusedVariables(
*this, true, true);
for (AffineExpr expr : unused_vars.constraints_with_unused_vars_only) {
constraints_.erase(expr);
}
if (!CompressVars(unused_vars.unused_dims, unused_vars.unused_symbols)) {
return {};
}
return ConcatenateBitVectors(unused_vars.unused_dims,
unused_vars.unused_symbols);
}
bool IndexingMap::MergeModConstraints() {
RangeEvaluator range_evaluator(*this, GetMLIRContext(),
false);
bool did_simplify = false;
llvm::DenseMap<AffineExpr, llvm::SmallVector<AffineBinaryOpExpr, 2>>
grouped_constraints;
for (const auto& [expr, _] : constraints_) {
if (expr.getKind() != AffineExprKind::Mod) continue;
auto binop = mlir::cast<AffineBinaryOpExpr>(expr);
grouped_constraints[binop.getLHS()].push_back(binop);
}
for (const auto& [lhs, binops] : grouped_constraints) {
llvm::DenseMap<int64_t, llvm::SmallVector<AffineBinaryOpExpr, 2>>
mod_groups;
for (const auto& binop : binops) {
Interval mod_result = constraints_[binop];
if (mod_result.IsPoint()) {
mod_groups[mod_result.lower].push_back(binop);
}
}
if (mod_groups.empty()) continue;
Interval* interval_to_update = nullptr;
if (lhs.getKind() == AffineExprKind::DimId) {
interval_to_update = &GetMutableDimensionBound(
mlir::cast<AffineDimExpr>(lhs).getPosition());
} else if (lhs.getKind() == AffineExprKind::SymbolId) {
interval_to_update = &GetMutableSymbolBound(
mlir::cast<AffineSymbolExpr>(lhs).getPosition());
}
for (const auto& [res, ops] : mod_groups) {
int64_t div = 1;
for (const auto& op : ops) {
int64_t rhs_value =
range_evaluator.ComputeExpressionRange(op.getRHS()).lower;
div = std::lcm(div, rhs_value);
}
if (ops.size() > 1) {
for (const auto& op : ops) {
constraints_.erase(op);
}
constraints_[lhs % div] = Interval{res, res};
did_simplify = true;
}
if (interval_to_update != nullptr) {
Interval old = *interval_to_update;
int64_t l = (interval_to_update->lower / div) * div + res;
interval_to_update->lower =
l >= interval_to_update->lower ? l : l + div;
int64_t h = (interval_to_update->upper / div) * div + res;
interval_to_update->upper =
h <= interval_to_update->upper ? h : h - div;
if (*interval_to_update != old) {
did_simplify = true;
}
}
}
}
return did_simplify;
}
IndexingMap ComposeIndexingMaps(const IndexingMap& first,
const IndexingMap& second) {
if (second.IsUndefined() || first.IsUndefined()) {
return IndexingMap::GetUndefined();
}
MLIRContext* mlir_context = first.GetMLIRContext();
AffineMap producer_affine_map = second.GetAffineMap();
AffineMap composed_map = producer_affine_map.compose(first.GetAffineMap());
std::vector<IndexingMap::Variable> combined_range_vars;
combined_range_vars.reserve(second.GetRangeVarsCount() +
first.GetRangeVarsCount());
for (const IndexingMap::Variable& range_var :
llvm::concat<const IndexingMap::Variable>(second.GetRangeVars(),
first.GetRangeVars())) {
combined_range_vars.push_back(range_var);
}
std::vector<IndexingMap::Variable> combined_rt_vars;
combined_rt_vars.reserve(second.GetRTVarsCount() + first.GetRTVarsCount());
for (const IndexingMap::Variable& rt_var :
llvm::concat<const IndexingMap::Variable>(second.GetRTVars(),
first.GetRTVars())) {
combined_rt_vars.push_back(rt_var);
}
SmallVector<AffineExpr, 4> symbol_replacements =
GetComposedSymbolsPermutationToCorrectOrder(first, second);
if (!symbol_replacements.empty()) {
composed_map = composed_map.replaceDimsAndSymbols(
{}, symbol_replacements, composed_map.getNumDims(),
composed_map.getNumSymbols());
}
IndexingMap composed_indexing_map(composed_map, first.GetDimVars(),
std::move(combined_range_vars),
std::move(combined_rt_vars));
std::vector<AffineExpr> constraints;
std::vector<Interval> constraints_ranges;
for (const auto& [expr, range] : second.GetConstraints()) {
constraints.push_back(expr);
constraints_ranges.push_back(range);
}
auto constraints_map = AffineMap::get(producer_affine_map.getNumDims(),
producer_affine_map.getNumSymbols(),
constraints, mlir_context);
auto remapped_constraints =
constraints_map.compose(first.GetAffineMap())
.replaceDimsAndSymbols({}, symbol_replacements,
composed_indexing_map.GetDimensionCount(),
composed_indexing_map.GetSymbolCount());
for (const auto& [expr, range] :
llvm::zip(remapped_constraints.getResults(), constraints_ranges)) {
composed_indexing_map.AddConstraint(expr, range);
}
SmallVector<AffineExpr, 4> first_map_symbols_to_composed_symbols =
MapSymbolsToComposedSymbolsList(first, composed_indexing_map);
for (const auto& [expr, range] : first.GetConstraints()) {
composed_indexing_map.AddConstraint(
expr.replaceSymbols(first_map_symbols_to_composed_symbols), range);
}
for (auto [index, expr] :
llvm::enumerate(first.GetAffineMap().getResults())) {
Interval producer_dim_range =
second.GetDimensionBound(static_cast<int64_t>(index));
composed_indexing_map.AddConstraint(
expr.replaceSymbols(first_map_symbols_to_composed_symbols),
producer_dim_range);
}
return composed_indexing_map;
}
bool IndexingMap::RescaleSymbols() {
MergeModConstraints();
llvm::DenseSet<AffineExpr> to_delete;
llvm::DenseMap<AffineExpr, AffineExpr> to_replace;
for (const auto& [expr, range] : constraints_) {
if (range.lower != range.upper) continue;
auto shift_value = range.lower;
if (expr.getKind() != AffineExprKind::Mod) continue;
auto mod_expr = mlir::cast<AffineBinaryOpExpr>(expr);
auto constant_expr = mlir::dyn_cast<AffineConstantExpr>(mod_expr.getRHS());
if (!constant_expr) continue;
if (constant_expr.getValue() <= 0) continue;
auto scaling_factor = constant_expr.getValue();
if (mod_expr.getLHS().getKind() != AffineExprKind::SymbolId) continue;
auto symbol_expr = mlir::cast<AffineSymbolExpr>(mod_expr.getLHS());
if (to_replace.contains(symbol_expr)) {
continue;
}
to_replace[symbol_expr] = constant_expr * symbol_expr + shift_value;
to_delete.insert(expr);
affine_map_ = affine_map_.replace(
symbol_expr, constant_expr * symbol_expr + shift_value,
affine_map_.getNumDims(), affine_map_.getNumSymbols());
auto& symbol_range = range_vars_[symbol_expr.getPosition()].bounds;
symbol_range.lower = (symbol_range.lower - shift_value) / scaling_factor;
symbol_range.upper = (symbol_range.upper - shift_value) / scaling_factor;
}
llvm::DenseMap<mlir::AffineExpr, Interval> new_constraints;
for (const auto& [expr, range] : constraints_) {
if (!to_delete.contains(expr)) {
new_constraints[expr.replace(to_replace)] = range;
}
}
constraints_ = std::move(new_constraints);
return !to_delete.empty();
}
bool IndexingMap::IsRangeVarSymbol(mlir::AffineSymbolExpr symbol) const {
unsigned int position = symbol.getPosition();
CHECK_LE(position, GetSymbolCount());
return position < range_vars_.size();
}
bool IndexingMap::IsRTVarSymbol(mlir::AffineSymbolExpr symbol) const {
unsigned int position = symbol.getPosition();
CHECK_LE(position, GetSymbolCount());
return position >= range_vars_.size();
}
IndexingMap IndexingMap::ConvertSymbolsToDimensions() const {
int num_symbols = GetSymbolCount();
if (IsUndefined() || IsKnownEmpty() || num_symbols == 0) {
return *this;
}
int num_dims = GetDimensionCount();
MLIRContext* mlir_context = GetMLIRContext();
int64_t num_vars = num_dims + num_symbols;
std::vector<IndexingMap::Variable> new_dim_vars;
new_dim_vars.reserve(num_vars);
llvm::append_range(new_dim_vars, GetDimVars());
SmallVector<AffineExpr> syms_replacements;
int64_t symbol_id = num_dims;
for (const IndexingMap::Variable& var :
llvm::concat<const IndexingMap::Variable>(range_vars_, rt_vars_)) {
syms_replacements.push_back(getAffineDimExpr(symbol_id++, mlir_context));
new_dim_vars.push_back(IndexingMap::Variable{var.bounds});
}
SmallVector<std::pair<AffineExpr, Interval>, 4> new_constraints;
for (const auto& [expr, range] : constraints_) {
new_constraints.push_back(
std::make_pair(expr.replaceSymbols(syms_replacements), range));
}
AffineMap canonical_map =
affine_map_.replaceDimsAndSymbols({}, syms_replacements, num_vars, 0);
IndexingMap new_indexing_map(canonical_map, new_dim_vars, {},
{}, new_constraints);
return new_indexing_map;
}
}
} | #include "xla/service/gpu/model/indexing_map.h"
#include <cstdint>
#include <limits>
#include <memory>
#include <optional>
#include <sstream>
#include <tuple>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/hash/hash_testing.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/service/gpu/model/indexing_map_serialization.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
using ::mlir::AffineMap;
using ::testing::AnyOf;
using ::testing::ElementsAre;
class IndexingMapTest : public HloTestBase {
public:
IndexingMap Parse(absl::string_view indexing_map_str) {
auto indexing_map = ParseIndexingMap(indexing_map_str, &mlir_context_);
EXPECT_TRUE(indexing_map.has_value());
return *indexing_map;
}
mlir::MLIRContext mlir_context_;
};
std::vector<bool> ConvertToSTL(const llvm::SmallBitVector& bit_vector) {
std::vector<bool> result;
result.reserve(bit_vector.size());
for (int i = 0; i < bit_vector.size(); ++i) {
result.push_back(bit_vector[i]);
}
return result;
}
TEST_F(IndexingMapTest, VariableKind) {
EXPECT_EQ(ToVariableType("default"), VariableKind::kDefault);
EXPECT_EQ(ToVariableType("th_x"), VariableKind::kThreadX);
EXPECT_EQ(ToVariableType("th_y"), VariableKind::kThreadY);
EXPECT_EQ(ToVariableType("th_z"), VariableKind::kThreadZ);
EXPECT_EQ(ToVariableType("bl_x"), VariableKind::kBlockX);
EXPECT_EQ(ToVariableType("bl_y"), VariableKind::kBlockY);
EXPECT_EQ(ToVariableType("bl_z"), VariableKind::kBlockZ);
EXPECT_EQ(ToVariableType("warp"), VariableKind::kWarp);
EXPECT_EQ(ToVariableType("th_w"), VariableKind::kWarpThread);
EXPECT_EQ(ToVariableName(VariableKind::kDefault), "default");
EXPECT_EQ(ToVariableName(VariableKind::kThreadX), "th_x");
EXPECT_EQ(ToVariableName(VariableKind::kThreadY), "th_y");
EXPECT_EQ(ToVariableName(VariableKind::kThreadZ), "th_z");
EXPECT_EQ(ToVariableName(VariableKind::kBlockX), "bl_x");
EXPECT_EQ(ToVariableName(VariableKind::kBlockY), "bl_y");
EXPECT_EQ(ToVariableName(VariableKind::kBlockZ), "bl_z");
EXPECT_EQ(ToVariableName(VariableKind::kWarp), "warp");
EXPECT_EQ(ToVariableName(VariableKind::kWarpThread), "th_w");
}
TEST_F(IndexingMapTest, VerifyDimensions) {
auto indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0) -> (d0)", &mlir_context_),
{10, 10}, {});
std::stringstream ss;
EXPECT_FALSE(indexing_map.Verify(ss));
EXPECT_EQ(ss.str(),
"dim size must match the number of dimensions in the affine map");
}
TEST_F(IndexingMapTest, VerifySymbols) {
auto indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0) -> (d0)", &mlir_context_),
{10}, {10});
std::stringstream ss;
EXPECT_FALSE(indexing_map.Verify(ss));
EXPECT_EQ(ss.str(),
"range vars size + rt var size must match the number of symbols in "
"the affine map");
}
TEST_F(IndexingMapTest, RTVar) {
IndexingMap indexing_map(
ParseAffineMap("(d0, d1)[range, rt0, rt1] -> (d1, d0, range + rt0, rt1)",
&mlir_context_),
{IndexingMap::Variable{0, 99, "d0"}, IndexingMap::Variable{0, 43, "d1"}},
{IndexingMap::Variable{-99, 99, "range"}},
{IndexingMap::Variable{Interval{0, 2}},
IndexingMap::Variable({Interval{0, 7}})});
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0, d1)[range, rt0, rt1] -> (d1, d0, range + rt0, rt1),
domain:
d0 in [0, 99],
d1 in [0, 43],
range in [-99, 99],
rt0 in [0, 2],
rt1 in [0, 7]
)"));
}
TEST_F(IndexingMapTest, Evaluation) {
IndexingMap indexing_map = Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1, s0),
domain:
d0 in [0, 3],
d1 in [0, 3],
s0 in [0, 1],
s1 in [0, 1]
)");
auto results = indexing_map.Evaluate(
mlir::getAffineConstantExprs({1, 2}, &mlir_context_),
mlir::getAffineConstantExprs({3, 4}, &mlir_context_));
EXPECT_THAT(results, ElementsAre(2, 1, 4, 3));
auto feasible = indexing_map.ConstraintsSatisfied(
mlir::getAffineConstantExprs({1, 2}, &mlir_context_),
mlir::getAffineConstantExprs({3, 4}, &mlir_context_));
EXPECT_TRUE(feasible);
indexing_map.AddConstraint(ParseAffineExpr("s0 mod 4", &mlir_context_),
Interval{0, 0});
auto infeasible = indexing_map.ConstraintsSatisfied(
mlir::getAffineConstantExprs({1, 2}, &mlir_context_),
mlir::getAffineConstantExprs({5, 4}, &mlir_context_));
EXPECT_FALSE(infeasible);
}
TEST_F(IndexingMapTest, Composition_Permutation) {
IndexingMap producer = Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1, s0),
domain:
d0 in [0, 3],
d1 in [0, 3],
s0 in [0, 1],
s1 in [0, 1]
)");
IndexingMap consumer = Parse(R"(
(d0)[s0] -> (d0, s0),
domain:
d0 in [0, 3],
s0 in [0, 3]
)");
auto composed = ComposeIndexingMaps(consumer, producer);
EXPECT_THAT(composed, MatchIndexingMap(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0),
domain:
d0 in [0, 3],
s0 in [0, 1],
s1 in [0, 1],
s2 in [0, 3]
)"));
}
TEST_F(IndexingMapTest, Composition_RestrictedInterval) {
IndexingMap producer = Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1, s0),
domain:
d0 in [0, 4],
d1 in [0, 5],
s0 in [0, 6],
s1 in [0, 1]
)");
IndexingMap consumer = Parse(R"(
(d0)[s0] -> (d0, s0),
domain:
d0 in [0, 9],
s0 in [0, 7]
)");
auto composed = ComposeIndexingMaps(consumer, producer);
EXPECT_THAT(composed, MatchIndexingMap(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0),
domain:
d0 in [0, 4],
s0 in [0, 6],
s1 in [0, 1],
s2 in [0, 5]
)"));
}
TEST_F(IndexingMapTest, Composition_ProducerAndConsumerHaveConstraints) {
IndexingMap producer = Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1, s0),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 19],
d0 mod 8 in [0, 0],
s0 mod 3 in [1, 1]
)");
IndexingMap consumer = Parse(R"(
(d0)[s0] -> (d0, s0),
domain:
d0 in [0, 9],
s0 in [0, 7],
d0 + s0 in [0, 20],
s0 mod 4 in [0, 0]
)");
auto composed = ComposeIndexingMaps(consumer, producer);
EXPECT_THAT(composed, MatchIndexingMap(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0),
domain:
d0 in [0, 9],
s0 in [0, 69],
s1 in [0, 19],
s2 in [0, 7],
d0 + s2 in [0, 20],
d0 mod 8 in [0, 0],
s0 mod 3 in [1, 1],
s2 mod 4 in [0, 0]
)"));
EXPECT_TRUE(composed.Simplify());
EXPECT_THAT(composed, MatchIndexingMap(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0),
domain:
d0 in [0, 8],
s0 in [1, 67],
s1 in [0, 19],
s2 in [0, 4],
d0 mod 8 in [0, 0],
s0 mod 3 in [1, 1],
s2 mod 4 in [0, 0]
)"));
}
TEST_F(IndexingMapTest, Composition_RTVar) {
std::vector<IndexingMap::Variable> rt_vars{
IndexingMap::Variable{Interval{0, 0}},
IndexingMap::Variable({Interval{0, 1}}),
IndexingMap::Variable({Interval{0, 226}})};
IndexingMap producer(
ParseAffineMap(
"(d0, d1, d2)[rt0, rt1, rt2] -> (d0 + rt0, d1 + rt1, d2 + rt2)",
&mlir_context_),
{IndexingMap::Variable{{0, 0}}, IndexingMap::Variable{{0, 1}},
IndexingMap::Variable{{0, 226}}},
{}, std::move(rt_vars));
IndexingMap consumer(
ParseAffineMap("(d0, d1)[s] -> (0, d1, s)", &mlir_context_),
{IndexingMap::Variable{0, 0}, IndexingMap::Variable{0, 1}},
{IndexingMap::Variable{0, 31, "s"}}, {});
auto composed = ComposeIndexingMaps(consumer, producer);
EXPECT_THAT(ToString(composed), MatchIndexingString(R"(
(d0, d1)[s, rt0, rt1, rt2] -> (rt0, d1 + rt1, s + rt2),
domain:
d0 in [0, 0],
d1 in [0, 1],
s in [0, 31],
rt0 in [0, 0],
rt1 in [0, 1],
rt2 in [0, 226]
)"));
}
TEST_F(IndexingMapTest, Composition_OnlyRTVars) {
IndexingMap producer(
ParseAffineMap("(d0, d1)[s0, s1] -> (d0 + s0, d1 + 4 * s1)",
&mlir_context_),
{IndexingMap::Variable{0, 24}, IndexingMap::Variable{0, 15}}, {},
{IndexingMap::Variable{Interval{0, 2}, "ps_0"},
IndexingMap::Variable{Interval{0, 1}, "ps_1"}});
std::vector<IndexingMap::Variable> consumer_rt_vars;
IndexingMap consumer(
ParseAffineMap("(d0, d1)[s0, s1] -> (d0 + 2 * s0, d1 + 3 * s1)",
&mlir_context_),
{IndexingMap::Variable{0, 24}, IndexingMap::Variable{0, 15}}, {},
{IndexingMap::Variable{Interval{0, 25}, "cs_0"},
IndexingMap::Variable{Interval{0, 16}, "cs_1"}});
auto composed = ComposeIndexingMaps(consumer, producer);
EXPECT_THAT(ToString(composed), MatchIndexingString(R"(
(d0, d1)[ps_0, ps_1, cs_0, cs_1] ->
(d0 + cs_0 * 2 + ps_0, d1 + cs_1 * 3 + ps_1 * 4),
domain:
d0 in [0, 24],
d1 in [0, 15],
ps_0 in [0, 2],
ps_1 in [0, 1],
cs_0 in [0, 25],
cs_1 in [0, 16],
d0 + cs_0 * 2 in [0, 24],
d1 + cs_1 * 3 in [0, 15]
)"));
}
TEST_F(IndexingMapTest, RemoveUnusedVars_ConstraintUsesDim) {
auto indexing_map = Parse(R"(
(d0, d1)[s0, s1] -> (d1, s0, s1),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 19],
d0 + s0 in [1, 100],
s0 mod 3 in [0, 0]
)");
indexing_map.RemoveUnusedVars();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0, d1)[s0, s1] -> (d1, s0, s1),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 19],
d0 + s0 in [1, 100],
s0 mod 3 in [0, 0]
)"));
}
TEST_F(IndexingMapTest, RemoveUnusedVars_ConstraintUsesUnusedDim) {
auto indexing_map = Parse(R"(
(d0, d1)[s0, s1] -> (s0, d1, s1),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 19],
d0 mod 3 in [0, 0]
)");
indexing_map.RemoveUnusedVars();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0)[s0, s1] -> (s0, d0, s1),
domain:
d0 in [0, 59],
s0 in [0, 69],
s1 in [0, 19]
)"));
}
TEST_F(IndexingMapTest, RemoveUnusedSymbols_ConstraintUsesOnlyUnusedSym) {
auto indexing_map = Parse(R"(
(d0, d1)[s0, s1] -> (d0, d1, s1),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 19],
s0 mod 3 in [0, 0]
)");
indexing_map.RemoveUnusedSymbols();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0, d1)[s0] -> (d0, d1, s0),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 19]
)"));
}
TEST_F(IndexingMapTest, RemoveUnusedVars_ConstraintsWithManyDims) {
auto indexing_map = Parse(R"(
(d0, d1, d2, d3, d4)[s0, s1, s2] -> (s0 * 4 + d1 + d3 - 42),
domain:
d0 in [0, 0],
d1 in [0, 1],
d2 in [0, 2],
d3 in [0, 3],
d4 in [0, 4],
s0 in [0, 31],
s1 in [0, 63],
s2 in [0, 95],
s0 * 4 + d1 + d3 in [24, 459],
s0 + s2 in [0, 512]
)");
auto unused_vars = indexing_map.RemoveUnusedVars();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0, d1)[s0, s1] -> (d0 + s0 * 4 + d1 - 42),
domain:
d0 in [0, 1],
d1 in [0, 3],
s0 in [0, 31],
s1 in [0, 95],
d0 + s0 * 4 + d1 in [24, 459],
s0 + s1 in [0, 512]
)"));
EXPECT_THAT(ConvertToSTL(unused_vars),
::testing::ElementsAreArray(
{true, false, true, false, true, false, true, false}));
}
TEST_F(IndexingMapTest, RemoveUnusedSymbols_ConstraintUsesSymbol) {
auto indexing_map = Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 19],
s0 + s1 in [1, 100],
s0 mod 3 in [0, 0]
)");
indexing_map.RemoveUnusedSymbols();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 19],
s0 + s1 in [1, 100],
s0 mod 3 in [0, 0]
)"));
}
TEST_F(IndexingMapTest, RemoveUnusedSymbols_ConstraintUsesOnlyUnusedSymbols) {
auto indexing_map = Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 19],
s0 mod 3 in [0, 0]
)");
indexing_map.RemoveUnusedSymbols();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0, d1)[s0] -> (d1, d0, s0),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 19]
)"));
}
TEST_F(IndexingMapTest, RemoveUnusedSymbols_ConstraintIsAConstantWithinRange) {
auto indexing_map = Parse(R"(
(d0) -> (d0),
domain:
d0 in [0, 49],
0 in [-10, 5]
)");
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0) -> (d0),
domain:
d0 in [0, 49]
)"));
}
TEST_F(IndexingMapTest, KnownEmpty_CreatingIndexingMapWithInfeasibleRange) {
auto indexing_map = Parse(R"(
(d0) -> (d0),
domain:
d0 in [0, -2]
)");
EXPECT_THAT(indexing_map, MatchIndexingMap("KNOWN EMPTY"));
}
TEST_F(IndexingMapTest, KnownEmpty_AddingConstraintOutOfRange) {
auto indexing_map = Parse(R"(
(d0) -> (d0),
domain:
d0 in [0, 49],
0 in [10, 15]
)");
EXPECT_THAT(indexing_map, MatchIndexingMap("KNOWN EMPTY"));
}
TEST_F(IndexingMapTest, KnownEmpty_Composition) {
auto indexing_map = Parse("(d0) -> (d0), domain: d0 in [0, 49]");
auto known_empty = Parse("(d0) -> (d0), domain: d0 in [0, -1]");
EXPECT_THAT(known_empty, MatchIndexingMap("KNOWN EMPTY"));
EXPECT_THAT(indexing_map * known_empty, MatchIndexingMap("KNOWN EMPTY"));
EXPECT_THAT(known_empty * indexing_map, MatchIndexingMap("KNOWN EMPTY"));
EXPECT_EQ((indexing_map * known_empty).GetAffineMap().getNumResults(), 1);
EXPECT_EQ((known_empty * indexing_map).GetAffineMap().getNumResults(), 1);
}
TEST_F(IndexingMapTest,
KnownEmpty_AddingConstraintOutOfRangeAfterSimplification) {
auto indexing_map = Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 19],
s1 floordiv 20 in [2, 2]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(indexing_map, MatchIndexingMap("KNOWN EMPTY"));
}
TEST_F(IndexingMapTest, RemoveUnusedSymbols_ConstraintsWithManySymbols) {
auto indexing_map = Parse(R"(
(d0)[s0, s1, s2, s3, s4] -> (d0 * 4 + s1 + s3 - 42),
domain:
d0 in [0, 31],
s0 in [0, 0],
s1 in [0, 1],
s2 in [0, 2],
s3 in [0, 3],
s4 in [0, 4],
d0 * 4 + s1 + s3 in [24, 459]
)");
indexing_map.RemoveUnusedSymbols();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0)[s0, s1] -> (d0 * 4 + s0 + s1 - 42),
domain:
d0 in [0, 31],
s0 in [0, 1],
s1 in [0, 3],
d0 * 4 + s0 + s1 in [24, 459]
)"));
}
TEST_F(IndexingMapTest, RemoveUnusedSymbols_ConstraintsWithRTVars) {
IndexingMap indexing_map(
ParseAffineMap("(d0)[s0, s1, s2, s3, s4] -> (d0 * 4 + s1 + s3 - 42)",
&mlir_context_),
{IndexingMap::Variable{{0, 31}}},
{IndexingMap::Variable{{0, 0}}, IndexingMap::Variable{{0, 1}},
IndexingMap::Variable{{0, 2}}},
{IndexingMap::Variable{Interval{0, 3}},
IndexingMap::Variable{Interval{0, 4}}});
indexing_map.AddConstraint(
ParseAffineExpr("d0 * 4 + s1 + s3", &mlir_context_), Interval{24, 459});
indexing_map.RemoveUnusedSymbols();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0)[s0, rt0] -> (d0 * 4 + s0 + rt0 - 42),
domain:
d0 in [0, 31],
s0 in [0, 1],
rt0 in [0, 3],
d0 * 4 + s0 + rt0 in [24, 459]
)"));
};
TEST_F(IndexingMapTest, ConvertSymbolsToDimensions) {
IndexingMap indexing_map(
ParseAffineMap(
"(d0)[s0, s1, s2, s3] -> (d0 * 4 + s0 + s1 + 2 * s2 + 3 * s3 - 42)",
&mlir_context_),
{IndexingMap::Variable{{0, 31}}},
{IndexingMap::Variable{{0, 0}}, IndexingMap::Variable{{0, 1}}},
{IndexingMap::Variable{Interval{0, 3}},
IndexingMap::Variable{Interval{0, 4}}});
indexing_map.AddConstraint(
ParseAffineExpr("d0 * 4 + s0 + 2 * s2", &mlir_context_),
Interval{24, 459});
EXPECT_THAT(indexing_map.ConvertSymbolsToDimensions(), MatchIndexingMap(R"(
(d0, d1, d2, d3, d4) -> (d0 * 4 + d1 + d2 + d3 * 2 + d4 * 3 - 42),
domain:
d0 in [0, 31],
d1 in [0, 0],
d2 in [0, 1],
d3 in [0, 3],
d4 in [0, 4],
d0 * 4 + d1 + d3 * 2 in [24, 459]
)"));
}
TEST_F(IndexingMapTest, ConstraintIntervalSimplification_Sum) {
auto indexing_map = Parse(R"(
(d0) -> (d0),
domain:
d0 in [0, 99],
d0 mod 8 + 5 in [50, 54]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0) -> (d0),
domain:
d0 in [0, 99],
d0 mod 8 in [45, 49]
)"));
}
TEST_F(IndexingMapTest,
ConstraintIntervalSimplification_Sum_IndependentOfSymbol) {
auto indexing_map = Parse(R"(
(d0)[s0, s1] -> (d0 * 6 + s0 * 3 + s1),
domain:
d0 in [0, 1999],
s0 in [0, 1],
s1 in [0, 2],
d0 * 6 + s0 * 3 + s1 in [0, 599]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0, s1] -> (d0 * 6 + s0 * 3 + s1),
domain:
d0 in [0, 99],
s0 in [0, 1],
s1 in [0, 2]
)"));
}
TEST_F(IndexingMapTest,
ConstraintIntervalSimplification_Sum_NotIndependentOfSymbol) {
auto indexing_map = Parse(R"(
(d0)[s0, s1] -> (d0 * 6 + s0 * 3 + s1),
domain:
d0 in [0, 1999],
s0 in [0, 1],
s1 in [0, 2],
d0 * 6 + s0 * 3 + s1 in [0, 598]
)");
EXPECT_FALSE(indexing_map.Simplify());
}
TEST_F(IndexingMapTest, ConstraintIntervalSimplification_Sum_GcdGreaterOne) {
auto indexing_map = Parse(R"(
(d0)[s0] -> (d0 * 6 + s0 * 3),
domain:
d0 in [0, 1999],
s0 in [0, 1],
d0 * 6 + s0 * 3 in [0, 599]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0] -> (d0 * 6 + s0 * 3),
domain:
d0 in [0, 99],
s0 in [0, 1]
)"));
}
TEST_F(IndexingMapTest,
ConstraintIntervalSimplification_FloorDivPositiveDivisorPositiveBounds) {
auto indexing_map = Parse(R"(
(d0) -> (d0),
domain:
d0 in [0, 99],
d0 floordiv 8 in [5, 11]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0) -> (d0),
domain:
d0 in [40, 95]
)"));
}
TEST_F(IndexingMapTest,
ConstraintIntervalSimplification_FloorDivPositiveDivisorNegativeBounds) {
auto indexing_map = Parse(R"(
(d0)[s0] -> (d0),
domain:
d0 in [0, 99],
s0 in [-99, 99],
s0 floordiv 3 in [-11, -5]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0] -> (d0),
domain:
d0 in [0, 99],
s0 in [-33, -13]
)"));
}
TEST_F(IndexingMapTest,
ConstraintIntervalSimplification_FloorDivNegativeDivisorNegativeBounds) {
auto indexing_map = Parse(R"(
(d0)[s0] -> (d0),
domain:
d0 in [0, 99],
s0 in [-99, 99],
s0 floordiv -3 in [-11, -5]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0] -> (d0),
domain:
d0 in [0, 99],
s0 in [15, 35]
)"));
}
TEST_F(IndexingMapTest,
ConstraintIntervalSimplification_MulPositiveMultiplierPositiveBounds) {
auto indexing_map = Parse(R"(
(d0) -> (d0),
domain:
d0 in [0, 99],
d0 * 8 in [14, 33]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0) -> (d0),
domain:
d0 in [2, 4]
)"));
}
TEST_F(IndexingMapTest,
ConstraintIntervalSimplification_MulPositiveMultiplierNegativeBounds) {
auto indexing_map = Parse(R"(
(d0)[s0] -> (d0),
domain:
d0 in [0, 99],
s0 in [-99, 99],
s0 * 3 in [-11, -5]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0] -> (d0),
domain:
d0 in [0, 99],
s0 in [-3, -2]
)"));
}
TEST_F(IndexingMapTest,
ConstraintIntervalSimplification_MulNegativeMultiplierNegativeBounds) {
auto indexing_map = Parse(R"(
(d0)[s0] -> (d0),
domain:
d0 in [0, 99],
s0 in [-99, 99],
s0 * -3 in [-11, -5]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0] -> (d0),
domain:
d0 in [0, 99],
s0 in [2, 3]
)"));
}
TEST_F(IndexingMapTest, ConstraintMerge_Mod) {
auto indexing_map = Parse(R"(
(d0)[s0, s1] -> (d0, s1, s0),
domain:
d0 in [0, 3],
s0 in [-21, -2],
s1 in [0, 10],
d0 mod 3 in [0, 0],
s0 mod 2 in [0, 0],
s0 mod 3 in [0, 0],
s1 mod 5 in [1, 1]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0, s1] -> (d0, s1, s0),
domain:
d0 in [0, 3],
s0 in [-18, -6],
s1 in [1, 6],
d0 mod 3 in [0, 0],
s0 mod 6 in [0, 0],
s1 mod 5 in [1, 1]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_ConstantDims) {
auto indexing_map = Parse(R"(
(d0) -> (d0),
domain:
d0 in [5, 5]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0) -> (5),
domain:
d0 in [5, 5]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_SumOrderRegression) {
auto indexing_map = Parse(R"(
(d0, d1)[s0, s1] -> (((((d0 + (d0 mod 3)) floordiv 3)
+ (s0 + ((s0 + s0) mod 3))) + (((d0 + s0) mod 3) + 0))),
domain:
d0 in [0, 9],
d1 in [0, 19],
s0 in [0, 29],
s1 in [0, 39]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_FALSE(indexing_map.Simplify());
}
TEST_F(IndexingMapTest, AffineMapSimplification_SumOrderRegression2) {
auto indexing_map = Parse(R"(
(d0)[s0] -> ((((s0 + d0) + d0) floordiv 2)),
domain:
d0 in [0, 9],
s0 in [0, 19]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_FALSE(indexing_map.Simplify());
}
TEST_F(IndexingMapTest, AffineMapSimplification_FloorDivRegression) {
auto indexing_map = Parse(R"(
(d0, d1) -> (((d0 floordiv 3) * 3 + d1 floordiv 2) floordiv 6),
domain:
d0 in [0, 11],
d1 in [0, 5]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0, d1) -> (d0 floordiv 6),
domain:
d0 in [0, 11],
d1 in [0, 5]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_ModIsSub) {
auto indexing_map = Parse(R"(
(d0) -> (d0 mod 42),
domain:
d0 in [53, 71]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0) -> (d0 - 42),
domain:
d0 in [53, 71]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_ModIsAdd) {
auto indexing_map = Parse(R"(
(d0) -> (d0 mod 5),
domain:
d0 in [-5, -1]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0) -> (d0 + 5),
domain:
d0 in [-5, -1]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_ModIsNotAdd) {
auto indexing_map1 = Parse("(d0) -> (d0 mod 5), domain: d0 in [-4, 0]");
EXPECT_FALSE(indexing_map1.Simplify());
auto indexing_map2 = Parse("(d0) -> (d0 mod 5), domain: d0 in [-6, -1]");
EXPECT_FALSE(indexing_map2.Simplify());
}
TEST_F(IndexingMapTest, AffineMapSimplification_SubIsMod) {
auto indexing_map = Parse(R"(
(d0)[s0] -> (d0 - (s0 floordiv 3) * 3 + s0),
domain:
d0 in [0, 1],
s0 in [0, 3]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0] -> (d0 + s0 mod 3),
domain:
d0 in [0, 1],
s0 in [0, 3]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_SubIsModMultiplied) {
auto indexing_map = Parse(R"(
(d0)[s0] -> (d0 - (s0 floordiv 3) * 12 + s0 * 7),
domain:
d0 in [0, 1],
s0 in [0, 3]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0] -> (d0 + (s0 mod 3) * 4 + s0 * 3),
domain:
d0 in [0, 1],
s0 in [0, 3]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_SubIsModSum) {
auto indexing_map = Parse(R"(
(d0)[s0] -> (1 + d0 - ((s0 + 1) floordiv 3) * 3 + s0),
domain:
d0 in [0, 1],
s0 in [0, 3]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0] -> (d0 + (s0 + 1) mod 3),
domain:
d0 in [0, 1],
s0 in [0, 3]
)"));
}
TEST_F(IndexingMapTest,
AffineMapSimplification_DivsAndModsIfSmallerThanDivisor) {
auto indexing_map = Parse(R"(
(d0, d1) -> (d0 + d1 floordiv 16, d1 mod 16),
domain:
d0 in [0, 7],
d1 in [0, 15]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 7],
d1 in [0, 15]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_DivsAndModsWithMultipliers) {
auto indexing_map = Parse(R"(
(d0, d1, d2) -> ((d0 * 100 + d1 * 10 + d2) floordiv 100,
((d0 * 100 + d1 * 10 + d2) mod 100) floordiv 10,
d2 mod 10),
domain:
d0 in [0, 8],
d1 in [0, 8],
d2 in [0, 8]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0, d1, d2) -> (d0, d1, d2),
domain:
d0 in [0, 8],
d1 in [0, 8],
d2 in [0, 8]
)"));
}
TEST_F(IndexingMapTest,
AffineMapSimplification_DivsAndModsWithDivisibleMultipliers) {
auto indexing_map = Parse(R"(
(d0, d1, d2) -> ((d0 * 16 + d1 * 4 + d2) floordiv 8,
(d0 * 16 + d1 * 4 + d2) mod 8),
domain:
d0 in [0, 9],
d1 in [0, 9],
d2 in [0, 9]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0, d1, d2) -> (d0 * 2 + (d1 * 4 + d2) floordiv 8,
(d1 * 4 + d2) mod 8),
domain:
d0 in [0, 9],
d1 in [0, 9],
d2 in [0, 9]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_DivsAndModsWithReverse) {
auto indexing_map = Parse(R"(
(d0, d1) -> (-((d0 * -11 - d1 + 109) floordiv 11) + 9,
d0 * 11 + d1 + ((d0 * -11 - d1 + 109) floordiv 11) * 11 - 99),
domain:
d0 in [0, 7],
d1 in [0, 8]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 7],
d1 in [0, 8]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_SimplifyReshape) {
auto indexing_map = Parse(R"(
()[s0] -> ((s0 * 128) mod 715 + ((s0 * 128) floordiv 715) * 715),
domain:
s0 in [0, 127]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
()[s0] -> (s0 * 128),
domain:
s0 in [0, 127]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_SimplifyReshape2) {
auto indexing_map = Parse(R"(
(d0, d1) -> ((d0 mod 8) * 128 + d1 + (d0 floordiv 8) * 1024),
domain:
d0 in [0, 1023],
d1 in [0, 127]
)");
;
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0, d1) -> (d0 * 128 + d1),
domain:
d0 in [0, 1023],
d1 in [0, 127]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_SimplifyReshape3) {
auto indexing_map = Parse(R"(
(d0, d1) -> (((d1 * 2 + d0 floordiv 64) mod 3) * 256 + (d0 mod 64) * 4
+ ((d1 * 128 + d0) floordiv 192) * 768),
domain:
d0 in [0, 127],
d1 in [0, 3071]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0, d1) -> (d0 * 4 + d1 * 512),
domain:
d0 in [0, 127],
d1 in [0, 3071]
)"));
}
TEST_F(IndexingMapTest,
AffineMapSimplification_ModWithNegativeMultiplerDoesNotGetSimplified) {
auto indexing_map = Parse(R"(
(d0) -> ((-d0) mod 2),
domain:
d0 in [0, 127]
)");
EXPECT_FALSE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0) -> ((-d0) mod 2),
domain:
d0 in [0, 127]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_SimplifyBitcastAndBack) {
auto indexing_map = Parse(R"(
(d0, d1) -> ((d0 floordiv 1536) * 786432
+ (((d0 * 2 + d1 floordiv 64) floordiv 3) mod 1024) * 768
+ ((d0 * 2 + d1 floordiv 64) mod 3) * 256 + (d1 mod 64) * 4),
domain:
d0 in [0, 3071],
d1 in [0, 127]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0, d1) -> (d0 * 512 + d1 * 4),
domain:
d0 in [0, 3071],
d1 in [0, 127]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_SimplifyReshape_Regression) {
auto indexing_map = Parse(R"(
()[s0] -> ((s0 * 128) mod 715 + ((s0 * 64) floordiv 715) * 715),
domain:
s0 in [0, 127]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
()[s0] -> (((s0 * 64) floordiv 715) * 715 + (s0 * 128) mod 715),
domain:
s0 in [0, 127]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_DivsInSequence) {
auto indexing_map = Parse(R"(
()[s0] -> (s0 - ((s0 floordiv 2) floordiv 7) * 14 + (s0 floordiv 14) * 14),
domain:
s0 in [0, 1233]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
()[s0] -> (s0),
domain:
s0 in [0, 1233]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_DivDiv) {
auto indexing_map = Parse(R"(
()[s0, s1] -> ((s0 * 2 + s1 floordiv 64) floordiv 3),
domain:
s0 in [0, 1233],
s1 in [0, 127]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
()[s0, s1] -> ((s0 * 128 + s1) floordiv 192),
domain:
s0 in [0, 1233],
s1 in [0, 127]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_DivSumConstant) {
auto indexing_map = Parse(R"(
()[s0] -> ((s0 * 6 + 9) floordiv 18),
domain:
s0 in [0, 1233]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
()[s0] -> ((s0 * 2 + 3) floordiv 6),
domain:
s0 in [0, 1233]
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_DivSumDiv) {
auto indexing_map = Parse(R"(
()[s0, s1] -> ((s0 floordiv 3 + s1 floordiv 3) floordiv 6),
domain:
s0 in [0, 1233],
s1 in [0, 127]
)");
EXPECT_FALSE(indexing_map.Simplify());
}
TEST_F(IndexingMapTest, AffineMapSimplification_NegativeDiv) {
auto indexing_map = Parse(R"(
()[s0] -> ((s0 floordiv 2) floordiv -7),
domain:
s0 in [0, 1233]
)");
EXPECT_FALSE(indexing_map.Simplify());
}
TEST_F(IndexingMapTest, AffineMapSimplification_ExtractFromMod) {
auto indexing_map = Parse(R"(
()[s0, s1, s2, s3] -> ((s0 * 458752 + s1 + s2 * 4 + s3 * 512) mod 20000),
domain:
s0 in [0, 871],
s1 in [0, 3],
s2 in [0, 127],
s3 in [0, 895]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
()[s0, s1, s2, s3] -> (
((s0 * 114688 + s3 * 128 + s2) mod 5000) * 4 + s1
),
domain:
s0 in [0, 871],
s1 in [0, 3],
s2 in [0, 127],
s3 in [0, 895]
)"));
}
TEST_F(IndexingMapTest,
AffineMapSimplification_ExtractFromDiv_NegativeMultiplier) {
auto indexing_map = Parse(R"(
()[s0, s1] -> ((s0 * 16 - (s1 floordiv 4) floordiv 2 + (s1 floordiv 8) * 2)
floordiv 4),
domain:
s0 in [0, 1],
s1 in [0, 127]
)");
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
()[s0, s1] -> (
s0 * 4 + s1 floordiv 32
),
domain:
s0 in [0, 1],
s1 in [0, 127]
)"));
}
TEST_F(IndexingMapTest, RescaleSymbols_Simple) {
auto indexing_map = Parse(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0 floordiv 6),
domain:
d0 in [0, 3],
s0 in [0, 6],
s1 in [0, 1],
s2 in [0, 5],
s0 mod 6 in [0, 0]
)");
EXPECT_TRUE(indexing_map.RescaleSymbols());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0),
domain:
d0 in [0, 3],
s0 in [0, 1],
s1 in [0, 1],
s2 in [0, 5]
)"));
}
TEST_F(IndexingMapTest, RescaleSymbols_WithShift) {
auto indexing_map = Parse(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0),
domain:
d0 in [0, 3],
s0 in [0, 41],
s1 in [0, 1],
s2 in [0, 5],
s0 mod 6 in [3, 3]
)");
EXPECT_TRUE(indexing_map.RescaleSymbols());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0 * 6 + 3),
domain:
d0 in [0, 3],
s0 in [0, 6],
s1 in [0, 1],
s2 in [0, 5]
)"));
}
TEST_F(IndexingMapTest, RescaleSymbols_TwoModConstraints) {
auto indexing_map = Parse(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0 floordiv 6),
domain:
d0 in [0, 3],
s0 in [0, 7],
s1 in [0, 1],
s2 in [0, 5],
s0 mod 2 in [0, 0],
s0 mod 3 in [0, 0]
)");
EXPECT_TRUE(indexing_map.RescaleSymbols());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0),
domain:
d0 in [0, 3],
s0 in [0, 1],
s1 in [0, 1],
s2 in [0, 5]
)"));
}
TEST_F(IndexingMapTest, RescaleSymbols_RescaledSymbolInOtherNonModConstraint) {
auto indexing_map = Parse(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0),
domain:
d0 in [0, 3],
s0 in [0, 9],
s1 in [0, 1],
s2 in [0, 5],
s0 * s2 in [0, 28],
s0 mod 6 in [3, 3]
)");
EXPECT_TRUE(indexing_map.RescaleSymbols());
EXPECT_THAT(ToString(indexing_map), MatchIndexingString(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0 * 6 + 3),
domain:
d0 in [0, 3],
s0 in [0, 1],
s1 in [0, 1],
s2 in [0, 5],
(s0 * 6 + 3) * s2 in [0, 28]
)"));
}
TEST_F(IndexingMapTest,
RescaleSymbols_TwoModConstraintsForTheSameSymbolWhichCannotBeMerged) {
auto indexing_map = Parse(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0),
domain:
d0 in [0, 3],
s0 in [0, 99],
s1 in [0, 1],
s2 in [0, 5],
s0 mod 6 in [3, 3],
s0 mod 7 in [5, 5]
)");
EXPECT_TRUE(indexing_map.RescaleSymbols());
const mlir::AffineExpr result3 = indexing_map.GetAffineMap().getResult(3);
ASSERT_THAT(indexing_map.GetConstraints(), ::testing::SizeIs(1));
const mlir::AffineExpr constraint_expr =
indexing_map.GetConstraints().begin()->first;
const Interval constraint_interval =
indexing_map.GetConstraints().begin()->second;
EXPECT_THAT(
std::make_tuple(result3, constraint_expr, constraint_interval),
AnyOf(
std::make_tuple(ParseAffineExpr("s0 * 6 + 3", &mlir_context_),
ParseAffineExpr("(s0 * 6 + 3) mod 7", &mlir_context_),
Interval{5, 5}),
std::make_tuple(ParseAffineExpr("s0 * 7 + 5", &mlir_context_),
ParseAffineExpr("(s0 * 7 + 5) mod 6", &mlir_context_),
Interval{3, 3})));
}
TEST_F(IndexingMapTest, RescaleSymbolsKeepsHashmapConsistent) {
auto indexing_map = Parse(R"(
(d0)[s0, s1, s2] -> (s2, d0, s0, s0 floordiv 6),
domain:
d0 in [0, 3],
s0 in [0, 6],
s1 in [0, 1],
s2 in [0, 5],
s0 mod 6 in [0, 0],
s0 * s1 in [0, 100]
)");
EXPECT_TRUE(indexing_map.RescaleSymbols());
for (auto& [expr, interval] : indexing_map.GetConstraints()) {
EXPECT_TRUE(indexing_map.GetConstraints().contains(expr))
<< "Don't modify the *keys* of the hashmap.";
}
}
TEST_F(IndexingMapTest, RangeEvaluatorTest) {
auto indexing_map = Parse(R"(
(d0, d1, d2, d3)[] -> (0),
domain:
d0 in [0, 9],
d1 in [-10, -1],
d2 in [-1, 2],
d3 in [0, 0]
)");
RangeEvaluator range_evaluator(indexing_map, &mlir_context_);
mlir::AffineExpr d0, d1, d2, d3;
bindDims(&mlir_context_, d0, d1, d2, d3);
EXPECT_TRUE(range_evaluator.IsAlwaysPositiveOrZero(d0));
EXPECT_FALSE(range_evaluator.IsAlwaysNegativeOrZero(d0));
EXPECT_FALSE(range_evaluator.IsAlwaysPositiveOrZero(d1));
EXPECT_TRUE(range_evaluator.IsAlwaysNegativeOrZero(d1));
EXPECT_FALSE(range_evaluator.IsAlwaysPositiveOrZero(d2));
EXPECT_FALSE(range_evaluator.IsAlwaysNegativeOrZero(d2));
EXPECT_TRUE(range_evaluator.IsAlwaysPositiveOrZero(d3));
EXPECT_TRUE(range_evaluator.IsAlwaysNegativeOrZero(d3));
}
TEST(IntervalComparisonTest, PointComparisons) {
Interval interval{12, 64};
auto point = [](int64_t n) { return Interval{n, n}; };
EXPECT_EQ(interval.Gt(point(11)), true);
EXPECT_EQ(interval.Gt(point(12)), std::nullopt);
EXPECT_EQ(interval.Gt(point(65)), false);
EXPECT_EQ(interval.Lt(point(65)), true);
EXPECT_EQ(interval.Lt(point(64)), std::nullopt);
EXPECT_EQ(interval.Lt(point(10)), false);
EXPECT_EQ(interval.Eq(point(11)), false);
EXPECT_EQ(interval.Eq(point(12)), std::nullopt);
EXPECT_EQ(interval.Eq(point(15)), std::nullopt);
EXPECT_EQ(interval.Eq(point(65)), false);
EXPECT_EQ(interval.Ne(point(11)), true);
EXPECT_EQ(interval.Ne(point(15)), std::nullopt);
EXPECT_EQ(interval.Ne(point(65)), true);
EXPECT_EQ(interval.Ge(point(12)), true);
EXPECT_EQ(interval.Ge(point(64)), std::nullopt);
EXPECT_EQ(interval.Ge(point(65)), false);
EXPECT_EQ(interval.Le(point(11)), false);
EXPECT_EQ(interval.Le(point(64)), true);
EXPECT_EQ(interval.Le(point(63)), std::nullopt);
EXPECT_EQ(interval.Le(point(65)), true);
EXPECT_EQ(point(15).Eq(point(15)), true);
EXPECT_EQ(point(15).Eq(point(16)), false);
EXPECT_EQ(point(15).Ne(point(15)), false);
EXPECT_EQ(point(15).Ne(point(16)), true);
}
TEST(IntervalComparisonTest, RangeComparisons) {
Interval interval{12, 64};
auto range = [](int64_t l, int64_t u) { return Interval{l, u}; };
EXPECT_EQ(interval.Gt(range(-10, 11)), true);
EXPECT_EQ(interval.Gt(range(-10, 12)), std::nullopt);
EXPECT_EQ(interval.Gt(interval), std::nullopt);
EXPECT_EQ(interval.Gt(range(10, 20)), std::nullopt);
EXPECT_EQ(interval.Gt(range(50, 60)), std::nullopt);
EXPECT_EQ(interval.Gt(range(64, 100)), false);
EXPECT_EQ(interval.Gt(range(65, 100)), false);
EXPECT_EQ(interval.Lt(range(65, 100)), true);
EXPECT_EQ(interval.Lt(range(64, 100)), std::nullopt);
EXPECT_EQ(interval.Lt(interval), std::nullopt);
EXPECT_EQ(interval.Lt(range(50, 60)), std::nullopt);
EXPECT_EQ(interval.Lt(range(10, 20)), std::nullopt);
EXPECT_EQ(interval.Lt(range(-10, 12)), false);
EXPECT_EQ(interval.Lt(range(-10, 11)), false);
EXPECT_EQ(interval.Eq(interval), std::nullopt);
EXPECT_EQ(interval.Eq(range(65, 100)), false);
EXPECT_EQ(interval.Eq(range(0, 11)), false);
}
MATCHER_P(IntervalIs, interval, "") {
std::pair<int64_t, int64_t> arg_pair{arg.lower, arg.upper};
return ::testing::ExplainMatchResult(
::testing::Pair(interval.lower, interval.upper), arg_pair,
result_listener);
}
TEST(IntervalMathTest, Addition) {
Interval a{12, 64};
Interval b{-100, 120};
Interval sum{12 - 100, 64 + 120};
EXPECT_THAT(a + b, IntervalIs(sum));
}
TEST(IntervalMathTest, AdditionSaturating) {
Interval a{12, 64};
Interval b{-100, 120};
Interval c{100, std::numeric_limits<int64_t>::max() - 80};
Interval any{std::numeric_limits<int64_t>::min(),
std::numeric_limits<int64_t>::max()};
Interval positive{0, std::numeric_limits<int64_t>::max()};
Interval negative{std::numeric_limits<int64_t>::min(), 0};
auto range = [](int64_t l, int64_t u) { return Interval{l, u}; };
EXPECT_THAT(positive + negative, IntervalIs(any));
EXPECT_THAT(any + any, IntervalIs(any));
EXPECT_THAT(b + any, IntervalIs(any));
EXPECT_THAT(c + any, IntervalIs(any));
EXPECT_THAT(c + positive,
IntervalIs(range(100, std::numeric_limits<int64_t>::max())));
Interval c_plus_negative{negative.lower, c.upper};
EXPECT_THAT(c + negative, IntervalIs(c_plus_negative));
Interval a_plus_c{112, std::numeric_limits<int64_t>::max() - 16};
EXPECT_THAT(a + c, IntervalIs(a_plus_c));
Interval b_plus_c{0, std::numeric_limits<int64_t>::max()};
EXPECT_THAT(b + c, IntervalIs(b_plus_c));
}
TEST(IntervalMathTest, Multiplication) {
Interval pos{10, 100};
Interval neg{-10, -1};
Interval both_small{-5, 6};
Interval both_large{-20, 1000};
auto range = [](int64_t l, int64_t u) { return Interval{l, u}; };
EXPECT_THAT(pos * neg, IntervalIs(range(-1000, -10)));
EXPECT_THAT(pos * both_small, IntervalIs(range(-500, 600)));
EXPECT_THAT(pos * both_large, IntervalIs(range(-2000, 100000)));
EXPECT_THAT(neg * both_small, IntervalIs(range(-60, 50)));
EXPECT_THAT(neg * both_large, IntervalIs(range(-10000, 200)));
EXPECT_THAT(both_small * both_large, IntervalIs(range(-5000, 6000)));
}
TEST(IntervalMathTest, MultiplicationSaturating) {
Interval any{std::numeric_limits<int64_t>::min(),
std::numeric_limits<int64_t>::max()};
Interval bit33{42, std::numeric_limits<uint32_t>::max()};
Interval bit33_sq{42 * 42, std::numeric_limits<int64_t>::max()};
EXPECT_THAT(bit33 * bit33, IntervalIs(bit33_sq));
EXPECT_THAT(any * any, IntervalIs(any));
Interval greater_41{42, std::numeric_limits<int64_t>::max()};
Interval neg_one{-1, -1};
Interval less_neg_41{std::numeric_limits<int64_t>::min(), -42};
EXPECT_THAT(greater_41 * neg_one, IntervalIs(less_neg_41));
EXPECT_THAT(less_neg_41 * neg_one, IntervalIs(greater_41));
EXPECT_THAT(any * neg_one, IntervalIs(any));
}
template <typename T>
void ExpectSupportsAbslHashAndEqAndNe(absl::Span<const T> values) {
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(values));
for (const T& a : values) {
for (const T& b : values) {
EXPECT_EQ(a != b, !(a == b));
}
}
}
TEST_F(IndexingMapTest, IntervalSupportsAbslHashAndEqAndNe) {
ExpectSupportsAbslHashAndEqAndNe<Interval>(
{Interval{1, 1}, Interval{0, 1}, Interval{1, 2}});
}
TEST_F(IndexingMapTest, IntervalSupportsLlvmStyleHashingAndEqAndNe) {
auto check_consistent = [](const Interval& a, const Interval& b) {
if (a == b) {
EXPECT_EQ(hash_value(a), hash_value(b));
}
if (hash_value(a) != hash_value(b)) {
EXPECT_NE(a, b);
}
EXPECT_EQ(a != b, !(a == b));
};
std::vector<Interval> intervals = {Interval{1, 1}, Interval{0, 1},
Interval{1, 2}};
for (const auto& a : intervals) {
for (const auto& b : intervals) {
check_consistent(a, b);
}
}
}
TEST_F(IndexingMapTest, DimVarSupportsAbslHashAndEqAndNe) {
ExpectSupportsAbslHashAndEqAndNe<IndexingMap::Variable>(
{IndexingMap::Variable{1, 1}, IndexingMap::Variable{0, 1},
IndexingMap::Variable{1, 2}});
}
TEST_F(IndexingMapTest, RangeVarSupportsAbslHashAndEqAndNe) {
ExpectSupportsAbslHashAndEqAndNe<IndexingMap::Variable>(
{IndexingMap::Variable{1, 1}, IndexingMap::Variable{0, 1},
IndexingMap::Variable{1, 2}});
}
TEST_F(IndexingMapTest, RTVarSupportsAbslHashAndEqAndNe) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> hlo_module,
ParseAndReturnVerifiedModule(R"(
HloModule m
ENTRY e {
ROOT %constant = s64[] constant(42)
}
)"));
ASSERT_NE(hlo_module, nullptr);
ExpectSupportsAbslHashAndEqAndNe<IndexingMap::Variable>(
{IndexingMap::Variable{Interval{1, 1}},
IndexingMap::Variable{Interval{1, 2}},
IndexingMap::Variable{Interval{1, 2}},
IndexingMap::Variable{Interval{1, 2}}});
}
TEST_F(IndexingMapTest, IndexingMapSupportsAbslHashAndEqAndNe) {
ExpectSupportsAbslHashAndEqAndNe<IndexingMap>(
{Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1, s0),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 79]
)"),
Parse(R"(
(d0, d1)[s0, s1] -> (d1 * 2, d0, s1, s0),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 79]
)"),
Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1, s0),
domain:
d0 in [0, 50],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 79]
)"),
Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1, s0),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 79]
)"),
Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1, s0),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 79],
d0 mod 8 in [0, 0],
d0 mod 16 in [0, 0]
)"),
Parse(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1, s0),
domain:
d0 in [0, 49],
d1 in [0, 59],
s0 in [0, 69],
s1 in [0, 79],
d0 mod 8 in [0, 0],
d0 mod 32 in [0, 0]
)"),
IndexingMap(
ParseAffineMap("(d0)[s0, s1, s2, s3, s4] -> (d0 * 4 + s1 + s3 - 42)",
&mlir_context_),
{IndexingMap::Variable{{0, 31}}},
{IndexingMap::Variable{{0, 0}}, IndexingMap::Variable{{0, 1}},
IndexingMap::Variable{{0, 2}}},
{IndexingMap::Variable{Interval{0, 3}},
IndexingMap::Variable{Interval{0, 4}}}),
IndexingMap(
ParseAffineMap("(d0)[s0, s1, s2, s3, s4] -> (d0 * 4 + s1 + s3 - 42)",
&mlir_context_),
{IndexingMap::Variable{{0, 31}}},
{IndexingMap::Variable{{0, 0}}, IndexingMap::Variable{{0, 1}},
IndexingMap::Variable{{0, 2}}},
{IndexingMap::Variable{Interval{0, 3}},
IndexingMap::Variable{Interval{0, 5}}})});
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/indexing_map.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/indexing_map_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
43da607d-bf25-4e1f-836a-cb8c6ddb47bb | cpp | tensorflow/tensorflow | symbolic_tiled_hlo_instruction | third_party/xla/xla/service/gpu/model/symbolic_tiled_hlo_instruction.cc | third_party/xla/xla/service/gpu/model/symbolic_tiled_hlo_instruction_test.cc | #include "xla/service/gpu/model/symbolic_tiled_hlo_instruction.h"
#include <cstdint>
#include <sstream>
#include <string>
#include "absl/types/span.h"
#include "llvm/ADT/SmallVector.h"
#include "xla/service/gpu/model/affine_map_evaluator.h"
#include "xla/service/gpu/model/symbolic_tile.h"
namespace xla {
namespace gpu {
llvm::SmallVector<int64_t> SymbolicTiledHloInstruction::TileOffsets(
absl::Span<int64_t const> tile_parameters) const {
return EvaluateAffineMap(symbolic_tile().offset_map(),
tile_parameters);
}
llvm::SmallVector<int64_t> SymbolicTiledHloInstruction::TileSizes(
absl::Span<int64_t const> tile_parameters) const {
return EvaluateAffineMap(symbolic_tile().size_map(),
tile_parameters);
}
llvm::SmallVector<int64_t> SymbolicTiledHloInstruction::TileStrides(
absl::Span<int64_t const> tile_parameters) const {
return EvaluateAffineMap(symbolic_tile().stride_map(),
tile_parameters);
}
std::string SymbolicTiledHloInstruction::ToString() const {
std::stringstream ss;
ss << "\thlo: " << hlo_->ToString() << "\n";
ss << "\t" << symbolic_tile().ToString() << "\n";
ss << "\tindexing map: " << indexing_map_ << "\n";
return ss.str();
}
}
} | #include "xla/service/gpu/model/symbolic_tiled_hlo_instruction.h"
#include <cstdint>
#include <optional>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/model/symbolic_tile.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using SymbolicTiledHloInstructionTest = HloTestBase;
TEST_F(SymbolicTiledHloInstructionTest, TransposeTileSizesAreSupported) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
fused_computation {
p0 = f32[16,32] parameter(0)
p1 = f32[32,16] parameter(1)
transpose = f32[32,16] transpose(p0), dimensions={1,0}
ROOT subtract = f32[32,16] subtract(transpose, p1)
}
ENTRY main {
p0 = f32[16,32] parameter(0)
p1 = f32[32,16] parameter(1)
ROOT root = f32[32,16] fusion(p0, p1), kind=kLoop, calls=fused_computation
}
)"));
mlir::MLIRContext mlir_ctx;
auto fusion = module->entry_computation()->root_instruction();
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(fusion);
auto output_to_input_indexing = ComputeGroupedOutputToInputIndexing(
*fusion_adaptor, fusion_adaptor->GetRoots()[0], &mlir_ctx);
HloInstruction* subtract = fusion->fused_expression_root();
HloInstruction* p0 = subtract->mutable_operand(0)->mutable_operand(0);
HloInstruction* p1 = subtract->mutable_operand(1);
IndexingMap p0_indexing =
*output_to_input_indexing[fusion->operand(0)].begin();
std::optional<SymbolicTile> p0_symbolic_tile =
SymbolicTile::FromIndexingMap(p0_indexing);
ASSERT_TRUE(p0_symbolic_tile.has_value());
SymbolicTiledHloInstruction tiled_p0(p0, p0_indexing);
tiled_p0.set_symbolic_tile(*p0_symbolic_tile);
ASSERT_TRUE(p0_symbolic_tile.has_value());
IndexingMap p1_indexing =
*output_to_input_indexing[fusion->operand(1)].begin();
std::optional<SymbolicTile> p1_symbolic_tile =
SymbolicTile::FromIndexingMap(p1_indexing);
ASSERT_TRUE(p1_symbolic_tile.has_value());
SymbolicTiledHloInstruction tiled_p1(p1, p1_indexing);
tiled_p1.set_symbolic_tile(*p1_symbolic_tile);
std::vector<int64_t> output_tile_sizes = {8, 4};
auto p0_tile_sizes = tiled_p0.TileSizes(output_tile_sizes);
EXPECT_THAT(tiled_p0.TileSizes(output_tile_sizes), ElementsAre(4, 8));
EXPECT_THAT(tiled_p1.TileSizes(output_tile_sizes), ElementsAre(8, 4));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/symbolic_tiled_hlo_instruction.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/symbolic_tiled_hlo_instruction_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
935f64c6-1b0e-49e5-a889-d0eafb7ff84e | cpp | tensorflow/tensorflow | autotuner_compile_util | third_party/xla/xla/service/gpu/autotuning/autotuner_compile_util.cc | third_party/xla/xla/service/gpu/autotuning/autotuner_compile_util_test.cc | #include "xla/service/gpu/autotuning/autotuner_compile_util.h"
#include <cstdint>
#include <iterator>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/executable_run_options.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/compiler.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/service/gpu/gpu_executable_run_options.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/service/service_executable_run_options.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#include "xla/stream_executor/stream.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
std::vector<ExecutionInput> ExecutionInputsFromBuffers(
absl::Span<se::DeviceMemoryBase const> buffers,
absl::Span<Shape const> shapes) {
CHECK_EQ(buffers.size(), shapes.size());
std::vector<ExecutionInput> inputs;
for (int i = 0; i < buffers.size(); ++i) {
inputs.emplace_back(shapes.at(i));
inputs.back().SetUnownedBuffer(
{}, MaybeOwningDeviceMemory(buffers.at(i)));
}
return inputs;
}
}
AutotunerCompileUtil::AutotunerCompileUtil(const AutotuneConfig& config,
Compiler* compiler,
se::StreamExecutor& stream_executor,
se::Stream& stream,
se::DeviceMemoryAllocator& allocator,
const DebugOptions& opts)
: config_(config),
compiler_(compiler),
stream_executor_(stream_executor),
stream_(stream),
allocator_(allocator),
opts_(opts) {
opts_.set_xla_enable_dumping(false);
opts_.set_xla_gpu_dump_autotune_results_to("");
opts_.set_xla_gpu_load_autotune_results_from("");
opts_.set_xla_gpu_dump_llvmir(false);
opts_.set_xla_gpu_dump_autotune_logs_to("");
opts_.set_xla_gpu_force_compilation_parallelism(1);
opts_.set_xla_gpu_enable_llvm_module_compilation_parallelism(false);
opts_.clear_xla_gpu_enable_command_buffer();
opts_.set_xla_gpu_async_dot(false);
opts_.set_xla_embed_ir_in_executable(false);
opts_.set_xla_gpu_kernel_cache_file("");
}
absl::StatusOr<std::optional<AutotunerCompileUtil::ProfilingOutput>>
AutotunerCompileUtil::ProfileExecutable(
Executable* executable, se::Stream* stream,
absl::Span<se::DeviceMemoryBase const> input_buffers,
absl::Span<Shape const> input_shapes) {
{
std::vector<ExecutionInput> execution_inputs =
ExecutionInputsFromBuffers(input_buffers, input_shapes);
absl::StatusOr<ExecutionOutput> execution_output =
Execute(*executable, std::move(execution_inputs));
if (!execution_output.ok()) {
if (execution_output.status().code() ==
absl::StatusCode::kResourceExhausted) {
return {std::nullopt};
}
return execution_output.status();
}
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
}
std::vector<ExecutionInput> execution_inputs =
ExecutionInputsFromBuffers(input_buffers, input_shapes);
ExecutionProfile profile;
profile.set_warmup_run_executed(true);
TF_ASSIGN_OR_RETURN(
ExecutionOutput execution_output,
Execute(*executable, std::move(execution_inputs), &profile));
return std::make_optional<ProfilingOutput>(
absl::Nanoseconds(profile.compute_time_ns()),
execution_output.Commit().ConsumeResult());
}
absl::StatusOr<std::unique_ptr<Executable>> AutotunerCompileUtil::Compile(
GenerateModuleFn extractor) {
absl::StatusOr<std::unique_ptr<HloModule>> new_hlo_module = extractor(opts_);
if (new_hlo_module.status().GetPayload(kUncompilableFusion).has_value()) {
return std::unique_ptr<Executable>();
} else if (!new_hlo_module.status().ok()) {
return new_hlo_module.status();
}
absl::StatusOr<std::unique_ptr<Executable>> out = compiler_->RunBackend(
std::move(*new_hlo_module), &stream_executor_,
Compiler::CompileOptions{&allocator_, nullptr,
{},
true});
if (out.status().code() == absl::StatusCode::kResourceExhausted ||
out.status().code() == absl::StatusCode::kCancelled) {
return std::unique_ptr<Executable>();
}
return out;
}
absl::StatusOr<std::unique_ptr<HloModule>> AutotunerCompileUtil::ExtractModule(
GenerateModuleFn extractor) {
return extractor(opts_);
}
absl::StatusOr<std::optional<AutotunerCompileUtil>>
AutotunerCompileUtil::Create(const AutotuneConfig& config,
const DebugOptions& opts) {
if (config.IsDeviceless()) {
return std::nullopt;
}
se::StreamExecutor* stream_exec = config.GetExecutor();
se::DeviceMemoryAllocator* allocator = config.GetAllocator();
TF_ASSIGN_OR_RETURN(se::Stream* const stream, config.GetStream());
TF_ASSIGN_OR_RETURN(Compiler * compiler,
Compiler::GetForPlatform(stream_exec->GetPlatform()));
return AutotunerCompileUtil(config, compiler, *stream_exec, *stream,
*allocator, opts);
}
absl::StatusOr<ExecutionOutput> AutotunerCompileUtil::Execute(
Executable& executable, std::vector<ExecutionInput> arguments,
ExecutionProfile* profile) {
GpuExecutableRunOptions gpu_opts;
gpu_opts.set_requires_exclusive_lock_on_gpu();
ExecutableRunOptions run_options;
run_options.set_device_ordinal(stream_executor_.device_ordinal());
run_options.set_stream(&stream_);
run_options.set_allocator(&allocator_);
run_options.set_gpu_executable_run_options(&gpu_opts);
run_options.set_execution_profile(profile);
ServiceExecutableRunOptions service_run_options(run_options);
TF_ASSIGN_OR_RETURN(ExecutionOutput output,
executable.ExecuteAsyncOnStreamWrapper(
&service_run_options, std::move(arguments)));
return std::move(output);
}
absl::StatusOr<RedzoneBuffers> RedzoneBuffers::FromInstruction(
const HloInstruction& instruction, const AutotuneConfig& config,
const DebugOptions& debug_options, BuffersToCreate buffers_to_create) {
RedzoneBuffers buffers;
TF_ASSIGN_OR_RETURN(auto rz_allocator, AutotunerUtil::CreateRedzoneAllocator(
config, debug_options));
buffers.redzone_allocator_ =
std::make_unique<se::RedzoneAllocator>(std::move(rz_allocator));
int64_t rng_state = 0;
TF_RETURN_IF_ERROR(
buffers.CreateInputs(instruction, config, debug_options, rng_state));
if (buffers_to_create == BuffersToCreate::kAllInputsAllOutputs ||
buffers_to_create == BuffersToCreate::kAllInputsOutputsNoScratch) {
TF_RETURN_IF_ERROR(buffers.CreateOutputs(instruction, config, debug_options,
buffers_to_create, rng_state));
}
return buffers;
}
absl::Status RedzoneBuffers::CreateInputs(const HloInstruction& instruction,
const AutotuneConfig& config,
const DebugOptions& debug_options,
int64_t& rng_state) {
for (const auto* operand : instruction.operands()) {
TF_ASSIGN_OR_RETURN(
se::DeviceMemoryBase buf,
AutotunerUtil::CreateBuffer(*redzone_allocator_, operand->shape(),
config, rng_state));
input_buffers_.push_back(buf);
input_shapes_.push_back(operand->shape());
}
return absl::OkStatus();
}
absl::Status RedzoneBuffers::CreateOutputs(const HloInstruction& instruction,
const AutotuneConfig& config,
const DebugOptions& debug_options,
BuffersToCreate buffers_to_create,
int64_t& rng_state) {
if (!instruction.shape().IsTuple()) {
TF_ASSIGN_OR_RETURN(
se::DeviceMemoryBase buf,
AutotunerUtil::CreateBuffer(*redzone_allocator_, instruction.shape(),
config, rng_state));
output_buffers_.push_back(buf);
output_shape_ = instruction.shape();
return absl::OkStatus();
}
auto current_shape_it = instruction.shape().tuple_shapes().begin();
auto end = instruction.shape().tuple_shapes().end();
end -= buffers_to_create == kAllInputsAllOutputs ? 0 : 1;
output_shape_ = std::distance(current_shape_it, end) == 1
? output_shape_ = *current_shape_it
: ShapeUtil::MakeTupleShape(
std::vector<Shape>{current_shape_it, end});
for (; current_shape_it < end; current_shape_it++) {
if (current_shape_it->IsTuple()) {
return Unimplemented("Nested tuples are unsupported by RedzoneBuffers.");
}
TF_ASSIGN_OR_RETURN(
se::DeviceMemoryBase buf,
AutotunerUtil::CreateBuffer(*redzone_allocator_, *current_shape_it,
config, rng_state));
output_buffers_.push_back(buf);
}
return absl::OkStatus();
}
}
} | #include "xla/service/gpu/autotuning/autotuner_compile_util.h"
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/service/platform_util.h"
#include "xla/stream_executor/platform.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
using AutotunerCompileUtilTest = HloTestBase;
TEST_F(AutotunerCompileUtilTest, VerifyOutputNotATuple) {
constexpr absl::string_view kHlo = R"(
HloModule hlo
ENTRY main {
p0 = f32[2,2] parameter(0)
p1 = f32[4,4] parameter(1)
p2 = f32[6,6] parameter(2)
ROOT root = f32[1,2,3] custom-call(p0, p1, p2), custom_call_target="fake"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, GetOptimizedModule(kHlo));
se::Platform* platform = PlatformUtil::GetDefaultPlatform().value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
PlatformUtil::GetStreamExecutors(platform));
AutotuneConfig autotune_config{DeviceConfig{executors.at(0), nullptr},
GetDebugOptionsForTest()};
auto& root = *module->entry_computation()->root_instruction();
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputs));
EXPECT_EQ(rzb.input_shapes().size(), 3);
EXPECT_EQ(rzb.input_buffers().size(), 3);
EXPECT_EQ(rzb.output_buffers().size(), 0);
EXPECT_NE(rzb.output_shape(), root.shape());
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb2,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputsAllOutputs));
EXPECT_EQ(rzb2.input_shapes().size(), 3);
EXPECT_EQ(rzb2.input_buffers().size(), 3);
EXPECT_EQ(rzb2.output_buffers().size(), 1);
EXPECT_EQ(rzb2.output_shape(), root.shape());
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb3,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputsOutputsNoScratch));
EXPECT_EQ(rzb3.input_shapes().size(), 3);
EXPECT_EQ(rzb3.input_buffers().size(), 3);
EXPECT_EQ(rzb3.output_buffers().size(), 1);
EXPECT_EQ(rzb3.output_shape(), root.shape());
}
TEST_F(AutotunerCompileUtilTest, VerifyOutputTupleOneElement) {
constexpr absl::string_view kHlo = R"(
HloModule hlo
ENTRY main {
p0 = f32[2,2] parameter(0)
p1 = f32[4,4] parameter(1)
p2 = f32[6,6] parameter(2)
ROOT root = (f32[1,2,3]) custom-call(p0, p1, p2), custom_call_target="fake"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, GetOptimizedModule(kHlo));
se::Platform* platform = PlatformUtil::GetDefaultPlatform().value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
PlatformUtil::GetStreamExecutors(platform));
AutotuneConfig autotune_config{DeviceConfig{executors.at(0), nullptr},
GetDebugOptionsForTest()};
auto& root = *module->entry_computation()->root_instruction();
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputs));
EXPECT_EQ(rzb.input_shapes().size(), 3);
EXPECT_EQ(rzb.input_buffers().size(), 3);
EXPECT_EQ(rzb.output_buffers().size(), 0);
EXPECT_NE(rzb.output_shape(), root.shape());
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb2,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputsAllOutputs));
EXPECT_EQ(rzb2.input_shapes().size(), 3);
EXPECT_EQ(rzb2.input_buffers().size(), 3);
EXPECT_EQ(rzb2.output_buffers().size(), 1);
EXPECT_FALSE(rzb2.output_shape().IsTuple());
EXPECT_EQ(rzb2.output_shape(), root.shape().tuple_shapes(0));
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb3,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputsOutputsNoScratch));
EXPECT_EQ(rzb3.input_shapes().size(), 3);
EXPECT_EQ(rzb3.input_buffers().size(), 3);
EXPECT_EQ(rzb3.output_buffers().size(), 0);
}
TEST_F(AutotunerCompileUtilTest, VerifyOutputTupleTwoElements) {
constexpr absl::string_view kHlo = R"(
HloModule hlo
ENTRY main {
p0 = f32[2,2] parameter(0)
p1 = f32[4,4] parameter(1)
p2 = f32[6,6] parameter(2)
ROOT root = (f32[1,2,3], u8[1,2]) custom-call(p0, p1, p2), custom_call_target="fake"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, GetOptimizedModule(kHlo));
se::Platform* platform = PlatformUtil::GetDefaultPlatform().value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
PlatformUtil::GetStreamExecutors(platform));
AutotuneConfig autotune_config{DeviceConfig{executors.at(0), nullptr},
GetDebugOptionsForTest()};
auto& root = *module->entry_computation()->root_instruction();
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputs));
EXPECT_EQ(rzb.input_shapes().size(), 3);
EXPECT_EQ(rzb.input_buffers().size(), 3);
EXPECT_EQ(rzb.output_buffers().size(), 0);
EXPECT_NE(rzb.output_shape(), root.shape());
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb2,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputsAllOutputs));
EXPECT_EQ(rzb2.input_shapes().size(), 3);
EXPECT_EQ(rzb2.input_buffers().size(), 3);
EXPECT_EQ(rzb2.output_buffers().size(), 2);
EXPECT_TRUE(rzb2.output_shape().IsTuple());
EXPECT_EQ(rzb2.output_shape(), root.shape());
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb3,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputsOutputsNoScratch));
EXPECT_EQ(rzb3.input_shapes().size(), 3);
EXPECT_EQ(rzb3.input_buffers().size(), 3);
EXPECT_EQ(rzb3.output_buffers().size(), 1);
EXPECT_FALSE(rzb3.output_shape().IsTuple());
EXPECT_EQ(rzb3.output_shape(), root.shape().tuple_shapes(0));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/autotuning/autotuner_compile_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/autotuning/autotuner_compile_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d2b37d13-4f6d-45ff-bfc3-4017594a9cec | cpp | tensorflow/tensorflow | gemm_fusion_autotuner | third_party/xla/xla/service/gpu/autotuning/gemm_fusion_autotuner.cc | third_party/xla/xla/service/gpu/autotuning/gemm_fusion_autotuner_test.cc | #include "xla/service/gpu/autotuning/gemm_fusion_autotuner.h"
#include <algorithm>
#include <array>
#include <atomic>
#include <cstdint>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "third_party/gpus/cuda/include/cublas_v2.h"
#include "xla/autotune_results.pb.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/pjrt/distributed/key_value_store_interface.h"
#include "xla/primitive_util.h"
#include "xla/service/algorithm_util.h"
#include "xla/service/call_inliner.h"
#include "xla/service/dump.h"
#include "xla/service/executable.h"
#include "xla/service/float_normalization.h"
#include "xla/service/gpu/autotuning/autotuner_compile_util.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/buffer_comparator.h"
#include "xla/service/gpu/gpu_float_support.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/kernels/custom_kernel.h"
#include "xla/service/gpu/kernels/custom_kernel_fusion.h"
#include "xla/service/gpu/kernels/custom_kernel_fusion_pattern.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/split_k_gemm_rewriter.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/gpu/transforms/cudnn_fusion_compiler.h"
#include "xla/service/gpu/transforms/custom_kernel_fusion_rewriter.h"
#include "xla/service/gpu/transforms/fusion_wrapper.h"
#include "xla/service/gpu/transforms/gemm_rewriter.h"
#include "xla/service/gpu/transforms/priority_fusion.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_graph_dumper.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/shaped_buffer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#include "xla/stream_executor/semantic_version.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "xla/tools/hlo_decomposer.h"
#include "xla/tsl/lib/core/bits.h"
#include "xla/tsl/util/proto/proto_utils.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/blocking_counter.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/profiler/lib/scoped_annotation.h"
namespace xla {
namespace gpu {
using BackendConfig = GemmFusionAutotunerImpl::BackendConfig;
using BackendConfigs = GemmFusionAutotunerImpl::BackendConfigs;
using ProfilingOutput = AutotunerCompileUtil::ProfilingOutput;
namespace {
constexpr int kMinTileSize = 16;
constexpr TritonGemmConfig kDefaultGemmTiling = {32, 32, 32, 1, 1, 4};
constexpr int kMaxWavesForSplitK = 5;
constexpr std::array<int, 6> kBlockSizes = {16, 32, 64, 128, 256, 512};
constexpr std::array<int, 4> kNumStages = {1, 2, 3, 4};
constexpr std::array<int, 4> kNumWarps = {2, 4, 8, 16};
constexpr std::array<int, 5> kSplitK = {1, 2, 4, 8, 16};
constexpr std::array<int, 5> kNumCtas = {1, 2, 4, 8, 16};
using AutoTuneCacheKeyCount = absl::flat_hash_map<AutotuneCacheKey, uint64_t>;
class GemmConfigSetCollector : public ConstDfsHloVisitorWithDefault {
public:
explicit GemmConfigSetCollector(GemmFusionAutotunerImpl* impl)
: impl_(impl) {}
absl::StatusOr<BackendConfigs> CollectGemmConfigSets(
const HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads = {}) {
error_out_on_cache_miss_ =
module->config()
.debug_options()
.xla_gpu_require_complete_aot_autotune_results();
gemm_config_sets_.clear();
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_RETURN_IF_ERROR(computation->Accept(this));
}
return std::move(gemm_config_sets_);
}
AutoTuneCacheKeyCount GetFusionsCount() {
return std::move(fusion_count_map_);
}
absl::Status HandleFusion(const HloInstruction* hlo) override {
const HloFusionInstruction* fusion = Cast<HloFusionInstruction>(hlo);
TF_ASSIGN_OR_RETURN(auto gpu_config,
hlo->backend_config<GpuBackendConfig>());
const FusionBackendConfig& backend_config =
gpu_config.fusion_backend_config();
AutotuneCacheKey key = AutotunerUtil::GetKey(hlo, impl_->GetConfig());
auto [iterator, inserted] = fusion_count_map_.insert({key, 1});
if (!inserted) {
++(iterator->second);
}
TF_ASSIGN_OR_RETURN(bool is_in_cache,
AutotunerUtil::IsInCache(key, impl_->GetConfig()));
if (is_in_cache || handled_fusions_.contains(key)) {
return absl::OkStatus();
}
bool missing_config = (backend_config.kind() == kTritonGemmFusionKind &&
!backend_config.has_triton_gemm_config()) ||
(backend_config.kind() == kCuDnnFusionKind &&
!backend_config.has_cudnn_fusion_config()) ||
(backend_config.kind() == kCustomFusionKind &&
!backend_config.has_custom_fusion_config());
if (missing_config) {
if (error_out_on_cache_miss_) {
return absl::NotFoundError(absl::StrCat(
"Complete autotuning results are required, but no cache result "
"found for key: ",
key.ToString()));
}
TF_ASSIGN_OR_RETURN(std::vector<BackendConfig> configs,
impl_->GenerateConfigs(*fusion));
gemm_config_sets_.push_back({fusion, std::move(configs)});
}
handled_fusions_.insert(key);
return absl::OkStatus();
}
absl::Status DefaultAction(const HloInstruction* hlo) override {
return absl::OkStatus();
}
private:
bool error_out_on_cache_miss_;
GemmFusionAutotunerImpl* impl_;
BackendConfigs gemm_config_sets_;
AutoTuneCacheKeyCount fusion_count_map_;
absl::flat_hash_set<AutotuneCacheKey> handled_fusions_;
};
struct TileSizeLimit {
int block_m = 0;
int block_n = 0;
int block_k = 0;
};
absl::StatusOr<TileSizeLimit> GetLimits(const HloDotInstruction& dot) {
TF_ASSIGN_OR_RETURN(int64_t non_contracting_index_lhs,
NonContractingDimensionIndex(dot, 0));
TF_ASSIGN_OR_RETURN(int64_t non_contracting_index_rhs,
NonContractingDimensionIndex(dot, 1));
TF_ASSIGN_OR_RETURN(int64_t contracting_index,
ContractingDimensionIndex(dot, 1));
const int max_m = tsl::NextPowerOfTwoS64(
dot.operand(0)->shape().dimensions(non_contracting_index_lhs));
const int max_n = tsl::NextPowerOfTwoS64(
dot.operand(1)->shape().dimensions(non_contracting_index_rhs));
const int max_k = tsl::NextPowerOfTwoS64(
dot.operand(1)->shape().dimensions(contracting_index));
return TileSizeLimit{
std::max(max_m, kMinTileSize),
std::max(max_n, kMinTileSize),
std::max(max_k, kMinTileSize),
};
}
int GetLogEveryN() { return VLOG_IS_ON(3) ? 100 : 1000; }
int64_t PriorityFusionShapeSize(const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
}
HloCostAnalysis::Options PriorityFusionOptions() {
return {PriorityFusionShapeSize,
{},
{},
true};
}
absl::StatusOr<std::unique_ptr<HloModule>> TritonGemmAutotuneExtractor(
const TritonGemmConfig& config,
const se::DeviceDescription& gpu_device_info,
const HloFusionInstruction* fusion, DebugOptions debug_opts,
bool allow_filtering_kernels_spilling_registers) {
std::unique_ptr<HloModule> new_module =
ExtractInstructionIntoNewModule(*fusion);
if (!allow_filtering_kernels_spilling_registers) {
debug_opts.set_xla_gpu_filter_kernels_spilling_registers_on_autotuning(
false);
}
new_module->mutable_config().set_debug_options(debug_opts);
HloComputation* entry_computation = new_module->entry_computation();
HloInstruction* cloned_dot_fusion = entry_computation->root_instruction();
TF_ASSIGN_OR_RETURN(auto gpu_config,
cloned_dot_fusion->backend_config<GpuBackendConfig>());
FusionBackendConfig& backend_config =
*gpu_config.mutable_fusion_backend_config();
*backend_config.mutable_triton_gemm_config() = config.ToProto();
TF_RETURN_IF_ERROR(cloned_dot_fusion->set_backend_config(gpu_config));
if (config.split_k > 1) {
TF_RETURN_IF_ERROR(MakeDotSplitKBatch(cloned_dot_fusion, config));
for (PrimitiveType type :
{BF16, F8E5M2, F8E4M3FN, F8E4M3B11FNUZ, F8E5M2FNUZ, F8E4M3FNUZ}) {
GpuFloatSupport float_support(gpu_device_info.cuda_compute_capability(),
type);
FloatNormalization float_normalization(&float_support);
TF_RETURN_IF_ERROR(float_normalization.Run(new_module.get()).status());
}
PriorityFusion priority_fusion(
nullptr, gpu_device_info, PriorityFusionOptions());
TF_RETURN_IF_ERROR(priority_fusion.Run(new_module.get()).status());
FusionWrapper fusion_wrapper;
TF_RETURN_IF_ERROR(fusion_wrapper.Run(new_module.get()).status());
}
return new_module;
}
absl::StatusOr<std::unique_ptr<HloModule>> CublasGemmAutotuneExtractor(
const AutotuneConfig& config, const se::DeviceDescription& gpu_device_info,
const se::SemanticVersion& toolkit_version,
const HloFusionInstruction* fusion, const DebugOptions& debug_opts) {
const HloComputation* fusion_computation =
fusion->called_computations().at(0);
std::unique_ptr<HloModule> new_module =
ExtractComputationIntoNewModule(*fusion_computation);
new_module->mutable_config().set_debug_options(debug_opts);
auto* dot = hlo_query::GetFirstInstructionWithOpcode(
*new_module->entry_computation(), HloOpcode::kDot);
if (dot->precision_config().algorithm() ==
PrecisionConfig::ALG_DOT_BF16_BF16_F32_X3 ||
dot->precision_config().algorithm() ==
PrecisionConfig::ALG_DOT_BF16_BF16_F32_X6 ||
dot->precision_config().algorithm() ==
PrecisionConfig::ALG_DOT_TF32_TF32_F32_X3) {
dot->mutable_precision_config()->set_algorithm(
PrecisionConfig::ALG_DOT_F32_F32_F32);
}
for (GemmRewriterOptions::DType dtype :
{GemmRewriterOptions::DType::kFp8Only,
GemmRewriterOptions::DType::kNonFp8Only}) {
GemmRewriter rewriter(config.GetGpuComputeCapability(), toolkit_version,
GemmRewriterOptions{dtype});
PriorityFusion fusion_pass(
nullptr, gpu_device_info, PriorityFusionOptions());
TF_RETURN_IF_ERROR(rewriter.Run(new_module.get()).status());
TF_RETURN_IF_ERROR(fusion_pass.Run(new_module.get()).status());
}
return new_module;
}
absl::Status UpdateFusionInstructionKernelIndex(
HloInstruction* fusion_instruction, int kernel_index) {
GpuBackendConfig gpu_config =
fusion_instruction->backend_config<GpuBackendConfig>().value();
gpu_config.mutable_fusion_backend_config()
->mutable_custom_fusion_config()
->set_kernel_index(kernel_index);
TF_RETURN_IF_ERROR(fusion_instruction->set_backend_config(gpu_config));
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<HloModule>> CustomFusionKernelAutotuneExtractor(
const GemmFusionAutotunerImpl::CustomKernelFusionConfig& cutlass_config,
const AutotuneConfig& config, const se::SemanticVersion& toolkit_version,
const HloFusionInstruction* fusion, const DebugOptions& debug_opts) {
const HloComputation* fusion_computation = fusion->called_computation();
std::unique_ptr<HloModule> new_module =
ExtractComputationIntoNewModule(*fusion_computation);
new_module->mutable_config().set_debug_options(debug_opts);
CustomKernelFusionRewriter rewriter(
&config.GetExecutor()->GetDeviceDescription());
PriorityFusion fusion_pass(
nullptr, config.GetExecutor()->GetDeviceDescription(),
PriorityFusionOptions());
TF_RETURN_IF_ERROR(rewriter.Run(new_module.get()).status());
TF_RETURN_IF_ERROR(fusion_pass.Run(new_module.get()).status());
HloInstruction* custom_kernel_fusion =
hlo_query::GetFirstInstructionWithOpcode(*new_module->entry_computation(),
HloOpcode::kFusion);
int64_t kernel_index = cutlass_config.kernel_index;
TF_RETURN_IF_ERROR(
UpdateFusionInstructionKernelIndex(custom_kernel_fusion, kernel_index));
return new_module;
}
absl::StatusOr<std::unique_ptr<HloModule>> FusionExtractor(
const HloFusionInstruction& fusion, const DebugOptions& debug_opts) {
std::unique_ptr<HloModule> module = ExtractInstructionIntoNewModule(fusion);
module->mutable_config().set_debug_options(debug_opts);
return module;
}
absl::StatusOr<std::unique_ptr<HloModule>> CuDnnFusionExtractor(
const HloFusionInstruction& fusion, const DebugOptions& debug_opts,
const int plan_id) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> module,
FusionExtractor(fusion, debug_opts));
GpuBackendConfig gpu_config;
FusionBackendConfig& backend_config =
*gpu_config.mutable_fusion_backend_config();
backend_config.set_kind(std::string(kCuDnnFusionKind));
backend_config.mutable_cudnn_fusion_config()->set_plan_id(plan_id);
TF_RETURN_IF_ERROR(
module->entry_computation()->root_instruction()->set_backend_config(
gpu_config));
return module;
}
bool IsFusionKind(const HloInstruction& hlo, absl::string_view kind) {
auto gpu_config = hlo.backend_config<GpuBackendConfig>();
if (!gpu_config.ok()) {
return false;
}
return gpu_config->fusion_backend_config().kind() == kind;
}
int GetCuDnnPlanCount(const HloInstruction& hlo,
const AutotuneConfig& autotune_config) {
if (auto gpu_config = hlo.backend_config<GpuBackendConfig>();
!gpu_config.ok() ||
gpu_config->fusion_backend_config().has_cudnn_fusion_config()) {
return {};
}
return CuDnnFusionCompiler::GetAvailablePlanCount(
*autotune_config.GetExecutor(), *DynCast<HloFusionInstruction>(&hlo));
}
AutotuneResult FromConfig(const BackendConfig& config) {
AutotuneResult res;
if (std::holds_alternative<GemmFusionAutotunerImpl::CuBlasConfig>(config)) {
res.mutable_gemm()->set_algorithm(CUBLAS_GEMM_DEFAULT);
} else if (std::holds_alternative<
GemmFusionAutotunerImpl::CustomKernelFusionConfig>(config)) {
res.mutable_custom_kernel_fusion()->set_kernel_index(
std::get<GemmFusionAutotunerImpl::CustomKernelFusionConfig>(config)
.kernel_index);
} else if (std::holds_alternative<GemmFusionAutotunerImpl::CuDnnConfig>(
config)) {
res.mutable_algorithm()->set_algo_id(
std::get<GemmFusionAutotunerImpl::CuDnnConfig>(config).plan_id);
} else if (std::holds_alternative<TritonGemmConfig>(config)) {
*res.mutable_triton() = std::get<TritonGemmConfig>(config).ToProto();
} else {
LOG(FATAL) << "Unsupported config type: " << config.index();
}
return res;
}
absl::Status DumpOriginalFusion(AutotunerCompileUtil& util,
const HloFusionInstruction& fusion,
int fusion_id) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> module,
util.ExtractModule([&](const DebugOptions& debug_opts) {
return FusionExtractor(fusion, debug_opts);
}));
module->set_name(std::string(fusion.name()));
std::string rendered_graph_name =
absl::StrCat("gemm_fusion_", fusion_id, ".", module->name(), ".dot");
std::string rendered_graph = RenderGraph(rendered_graph_name, *module,
RenderedGraphFormat::kDot, true);
DumpToFileInDir(
*fusion.GetModule(),
"",
rendered_graph_name,
rendered_graph);
DumpToFileInDirOrStdout(
*fusion.GetModule(),
"",
absl::StrCat("gemm_fusion_", fusion_id, ".", module->name(), ".txt"),
module->ToString());
return absl::OkStatus();
}
absl::Status DumpAutotunedFusion(const AutotuneConfig& autotune_config,
const se::SemanticVersion& toolkit_version,
AutotunerCompileUtil& util,
const AutotuneResult result,
const HloFusionInstruction* fusion,
int fusion_id) {
TritonGemmConfig triton_gemm_config;
if (result.has_triton()) {
TF_ASSIGN_OR_RETURN(triton_gemm_config,
TritonGemmConfig::FromProto(result.triton()));
}
const se::DeviceDescription& device_desc =
autotune_config.GetExecutor()->GetDeviceDescription();
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModule> module,
util.ExtractModule([&](const DebugOptions& debug_opts) {
if (result.has_algorithm()) {
return CuDnnFusionExtractor(*fusion, debug_opts,
result.algorithm().algo_id());
} else if (result.has_triton()) {
return TritonGemmAutotuneExtractor(
triton_gemm_config, device_desc, fusion, debug_opts,
true);
} else if (result.has_gemm()) {
return CublasGemmAutotuneExtractor(autotune_config, device_desc,
toolkit_version, fusion,
debug_opts);
} else {
LOG(FATAL) << "Unknown result type: " << result.DebugString();
}
}));
module->set_name(std::string(fusion->name()));
DumpToFileInDirOrStdout(
*fusion->GetModule(),
"",
absl::StrCat("gemm_fusion_", fusion_id, ".", module->name(),
".optimized.txt"),
module->ToString());
return absl::OkStatus();
}
std::string Serialize(const BackendConfig& config) {
if (auto triton_config = std::get_if<TritonGemmConfig>(&config)) {
tsl::protobuf::TextFormat::Printer printer;
printer.SetSingleLineMode(true);
std::string result;
printer.PrintToString(triton_config->ToProto(), &result);
return result;
}
return GemmFusionAutotunerImpl::ToString(config);
}
}
absl::Status RewriteGemmFusionToCall(HloInstruction* fusion_instr) {
HloComputation* const computation = fusion_instr->parent();
HloInstruction* const call =
computation->AddInstruction(HloInstruction::CreateCall(
fusion_instr->shape(), fusion_instr->operands(),
fusion_instr->fused_instructions_computation()));
return computation->ReplaceInstruction(fusion_instr, call);
}
absl::Status RewriteGemmFusionToCustomKernelFusion(
HloInstruction* fusion_instr, se::DeviceDescription device_description,
int64_t kernel_index) {
HloComputation* const computation = fusion_instr->parent();
HloInstruction* const call =
computation->AddInstruction(HloInstruction::CreateCall(
fusion_instr->shape(), fusion_instr->operands(),
fusion_instr->fused_instructions_computation()));
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(fusion_instr, call));
HloPassPipeline pipeline("autotuner_custom_kernel_fusion_rewriter");
pipeline.AddPass<CallInliner>();
pipeline.AddPass<CustomKernelFusionRewriter>(&device_description,
kernel_index);
HloModule* hlo_module = call->GetModule();
return pipeline.Run(hlo_module).status();
}
absl::Status HandleTritonGemm(HloInstruction* fusion_instr,
FusionBackendConfig& fusion_backend_config) {
TF_ASSIGN_OR_RETURN(
const TritonGemmConfig config,
TritonGemmConfig::FromProto(fusion_backend_config.triton_gemm_config()));
if (config.split_k > 1) {
TF_RETURN_IF_ERROR(MakeDotSplitKBatch(fusion_instr, config));
}
return absl::OkStatus();
}
absl::Status GemmFusionAutotunerRewriterVisitor::HandleFusion(
HloInstruction* fusion_instr) {
TF_ASSIGN_OR_RETURN(auto gpu_config,
fusion_instr->backend_config<GpuBackendConfig>());
FusionBackendConfig& fusion_backend_config =
*gpu_config.mutable_fusion_backend_config();
if (fusion_backend_config.kind() != kTritonGemmFusionKind &&
fusion_backend_config.kind() != kCuDnnFusionKind &&
fusion_backend_config.kind() != kCustomFusionKind) {
return absl::OkStatus();
}
if (fusion_backend_config.has_triton_gemm_config()) {
TF_RETURN_IF_ERROR(HandleTritonGemm(fusion_instr, fusion_backend_config));
MarkAsChanged();
return absl::OkStatus();
}
if (fusion_backend_config.has_cudnn_fusion_config() ||
fusion_backend_config.has_custom_fusion_config()) {
return absl::OkStatus();
}
VLOG(4) << "Autotuning fusion instruction: " << fusion_instr->ToString();
TF_ASSIGN_OR_RETURN(
AutotuneResult autotune_result,
AutotunerUtil::Autotune(
fusion_instr, config_, [&]() -> absl::StatusOr<AutotuneResult> {
if (config_.IsDeviceless()) {
return absl::InternalError(absl::StrCat(
"Expect autotune result cache hit for deviceless "
"compilation (HLO: ",
fusion_instr->ToString(), ")"));
}
return absl::InternalError("Expect autotune result cache hit.");
}));
VLOG(4) << "Autotuning result: " << autotune_result.ShortDebugString();
if (autotune_result.has_triton()) {
*fusion_backend_config.mutable_triton_gemm_config() =
autotune_result.triton();
TF_RETURN_IF_ERROR(fusion_instr->set_backend_config(gpu_config));
TF_RETURN_IF_ERROR(HandleTritonGemm(fusion_instr, fusion_backend_config));
MarkAsChanged();
return absl::OkStatus();
}
if (autotune_result.has_gemm()) {
TF_RETURN_IF_ERROR(RewriteGemmFusionToCall(fusion_instr));
MarkAsChanged();
return absl::OkStatus();
}
if (autotune_result.has_custom_kernel_fusion()) {
TF_RETURN_IF_ERROR(RewriteGemmFusionToCustomKernelFusion(
fusion_instr, config_.GetExecutor()->GetDeviceDescription(),
autotune_result.custom_kernel_fusion().kernel_index()));
MarkAsChanged();
return absl::OkStatus();
}
CHECK(autotune_result.has_algorithm());
fusion_backend_config.set_kind(std::string(kCuDnnFusionKind));
fusion_backend_config.mutable_cudnn_fusion_config()->set_plan_id(
autotune_result.algorithm().algo_id());
TF_RETURN_IF_ERROR(fusion_instr->set_backend_config(gpu_config));
MarkAsChanged();
return absl::OkStatus();
}
bool GemmFusionAutotunerImpl::CuBlasConfig::operator<(
const CuBlasConfig& other) const {
return false;
}
bool GemmFusionAutotunerImpl::CuDnnConfig::operator<(
const CuDnnConfig& other) const {
return plan_id < other.plan_id;
}
bool GemmFusionAutotunerImpl::CustomKernelFusionConfig::operator<(
const CustomKernelFusionConfig& other) const {
return false;
}
bool GemmFusionAutotunerImpl::IsAutotuningEnabled() const {
return debug_options_.xla_gpu_autotune_level() > 0 &&
!debug_options_.xla_gpu_deterministic_ops();
}
std::string GemmFusionAutotunerImpl::ToString(
const BackendConfig& config) {
if (std::holds_alternative<TritonGemmConfig>(config)) {
return std::get<TritonGemmConfig>(config).ToString();
} else if (std::holds_alternative<CuDnnConfig>(config)) {
return absl::StrFormat("cuDNN plan %d",
std::get<CuDnnConfig>(config).plan_id);
} else if (std::holds_alternative<CuBlasConfig>(config)) {
return "reference (cublas)";
} else {
LOG(FATAL) << "Unsupported config type: " << config.index();
}
}
std::vector<BackendConfig> GenerateCustomKernelFusionConfigs(
const HloFusionInstruction& fusion,
se::DeviceDescription device_description) {
std::vector<BackendConfig> configs;
const CustomKernelFusionPatternRegistry* patterns =
CustomKernelFusionPatternRegistry::Default();
HloComputation* computation = fusion.called_computation();
HloInstruction* dot_instruction =
hlo_query::GetFirstInstructionWithOpcode(*computation, HloOpcode::kDot);
std::vector<CustomKernelFusionPattern::Match> match =
patterns->Match(device_description, dot_instruction);
if (match.size() == 1) {
CustomKernelFusionRegistry* registry =
CustomKernelFusionRegistry::Default();
auto* custom_kernel_fusion = registry->Lookup(match[0].config().name());
if (custom_kernel_fusion != nullptr) {
const HloComputation* fusion_computation = fusion.called_computation();
std::unique_ptr<HloModule> new_module =
ExtractComputationIntoNewModule(*fusion_computation);
CustomKernelFusionRewriter rewriter(&device_description);
absl::StatusOr<bool> changed = rewriter.Run(new_module.get());
if (!changed.ok() || !changed.value()) {
VLOG(2) << "Skip custom kernel config. Failed to rewrite custom kernel "
"fusion: "
<< changed.status();
return configs;
}
HloInstruction* custom_kernel_fusion_instr =
hlo_query::GetFirstInstructionWithOpcode(
*new_module->entry_computation(), HloOpcode::kFusion);
if (custom_kernel_fusion_instr == nullptr) {
VLOG(2) << "Skip custom kernel config. Failed to find custom kernel "
"fusion instruction in the rewritten module.";
return configs;
}
absl::StatusOr<std::vector<CustomKernel>> kernels =
custom_kernel_fusion->LoadKernels(
device_description,
custom_kernel_fusion_instr->fused_instructions_computation());
if (!kernels.ok()) {
VLOG(2) << "Skip custom kernel config. Failed to load custom kernels: "
<< kernels.status();
} else {
for (int i = 0; i < kernels.value().size(); ++i) {
GemmFusionAutotunerImpl::CustomKernelFusionConfig config{
i};
configs.push_back(config);
}
}
}
}
return configs;
}
absl::StatusOr<std::vector<BackendConfig>>
GemmFusionAutotunerImpl::GenerateConfigs(const HloFusionInstruction& fusion) {
const HloDotInstruction* dot =
Cast<HloDotInstruction>(hlo_query::GetFirstInstructionWithOpcode(
*fusion.called_computations().at(0), HloOpcode::kDot));
std::vector<BackendConfig> configs;
if (!debug_options_.xla_gpu_experimental_disable_binary_libraries()) {
if (algorithm_util::IsSupportedByCublasOrCublasLt(
dot->precision_config().algorithm(), GetComputeCapability()) &&
!dot->sparse_operands() && IsAutotuningEnabled()) {
configs.push_back(CuBlasConfig{});
}
bool is_hopper =
!config_.IsDeviceless() && GetComputeCapability().IsAtLeastHopper();
bool is_cudnn_enabled =
debug_options_.xla_gpu_cudnn_gemm_fusion_level() > 0 && is_hopper &&
GetDnnVersionInfoOrDefault(config_.GetExecutor()).major_version() >= 9;
if ((IsFusionKind(fusion, kCuDnnFusionKind) && IsAutotuningEnabled()) ||
(IsFusionKind(fusion, kTritonGemmFusionKind) && is_cudnn_enabled &&
algorithm_util::IsSupportedByCudnn(
dot->precision_config().algorithm()) &&
!dot->sparse_operands() && IsAutotuningEnabled())) {
const int plan_count = GetCuDnnPlanCount(fusion, config_);
for (int plan_id = 0; plan_id < plan_count; ++plan_id) {
configs.push_back(CuDnnConfig{plan_id});
}
}
if (IsFusionKind(fusion, kCuDnnFusionKind)) {
if (!IsAutotuningEnabled()) {
configs.push_back(CuDnnConfig{-1});
}
return configs;
}
}
if ((IsFusionKind(fusion, kCustomFusionKind) ||
IsFusionKind(fusion, kTritonGemmFusionKind)) &&
IsAutotuningEnabled() && !config_.IsDeviceless()) {
std::vector<BackendConfig> custom_kernel_fusion_configs =
GenerateCustomKernelFusionConfigs(
fusion, config_.GetExecutor()->GetDeviceDescription());
configs.insert(configs.end(), custom_kernel_fusion_configs.begin(),
custom_kernel_fusion_configs.end());
}
TF_ASSIGN_OR_RETURN(std::vector<TritonGemmConfig> triton_configs,
GenerateTritonConfigs(*dot));
for (TritonGemmConfig& config : triton_configs) {
configs.push_back(std::move(config));
}
return configs;
}
absl::StatusOr<std::vector<TritonGemmConfig>>
GemmFusionAutotunerImpl::GenerateTritonConfigs(const HloDotInstruction& dot) {
std::vector<const HloInstruction*> converts =
HloBfsFindAll({&dot}, [&](const HloInstruction* node) {
return node->opcode() == HloOpcode::kConvert;
});
int minBitWidth = primitive_util::BitWidth(dot.shape().element_type());
for (auto convert : converts) {
auto in_type = convert->operand(0)->shape().element_type();
auto out_type = convert->shape().element_type();
minBitWidth = std::min({minBitWidth, primitive_util::BitWidth(in_type),
primitive_util::BitWidth(out_type)});
}
std::vector<TritonGemmConfig> result_configs;
TF_ASSIGN_OR_RETURN(TileSizeLimit limits, GetLimits(dot));
if (triton_configs_.empty()) {
triton_configs_ = !IsAutotuningEnabled()
? std::vector(1, kDefaultGemmTiling)
: debug_options_.xla_gpu_exhaustive_tiling_search()
? GetExhaustiveTritonConfigs()
: GetDefaultTritonConfigs();
}
constexpr int kMinGemmElements = 32 * 32;
bool small_dot =
ShapeUtil::ElementsIn(dot.operand(0)->shape()) <= kMinGemmElements &&
ShapeUtil::ElementsIn(dot.operand(1)->shape()) <= kMinGemmElements;
std::vector<TritonGemmConfig> triton_configs =
small_dot ? std::vector(1, kDefaultGemmTiling) : triton_configs_;
const int kCoreCount =
!config_.IsDeviceless()
? config_.GetExecutor()->GetDeviceDescription().core_count()
: 100;
const int64_t kSufficientNumberOfTiles = kMaxWavesForSplitK * kCoreCount;
const int64_t result_size = ShapeUtil::ElementsIn(dot.shape());
absl::flat_hash_set<TritonGemmConfig> added;
bool is_hopper =
!config_.IsDeviceless() && GetComputeCapability().IsAtLeastHopper();
for (TritonGemmConfig& config : triton_configs) {
config.block_m = std::min(config.block_m, limits.block_m);
config.block_n = std::min(config.block_n, limits.block_n);
config.block_k = std::min(config.block_k, limits.block_k);
int max_split_k = 1;
if (debug_options_.xla_gpu_enable_split_k_autotuning()) {
int64_t ratio = kSufficientNumberOfTiles * config.block_m *
config.block_n / result_size;
max_split_k = 1 << std::max<int>(tsl::Log2Floor64(ratio), 0);
}
config.split_k = std::min(config.split_k, max_split_k);
constexpr int kLdmatrixGranularity = 256;
config.block_k =
std::max(config.block_k, kLdmatrixGranularity / minBitWidth);
if (dot.sparse_operands()) {
if (is_hopper) {
config.block_m = std::max(config.block_m, 64);
config.num_warps = std::max(config.num_warps, 4);
}
config.block_k = std::max(
config.block_k,
2 * std::max(kMinTileSize, kLdmatrixGranularity / minBitWidth));
int meta_elements = config.block_m * config.block_k / 16;
config.num_warps =
std::min<int>(config.num_warps, meta_elements / WarpSize());
}
if (added.insert(config).second) {
result_configs.push_back(config);
}
}
return result_configs;
}
absl::StatusOr<absl::flat_hash_map<
const HloFusionInstruction*,
std::vector<GemmFusionAutotunerImpl::ExecutableCandidate>>>
GemmFusionAutotunerImpl::CompileAll(AutotunerCompileUtil& compile_util,
const BackendConfigs& task) {
tsl::profiler::ScopedAnnotation annotation("XlaAutotunerCompilation");
absl::Mutex results_mu;
absl::flat_hash_map<const HloFusionInstruction*,
std::vector<ExecutableCandidate>>
results;
if (task.empty()) {
return results;
}
const int log_every_n = GetLogEveryN();
int64_t config_count = 0;
for (const auto& [unused, configs] : task) {
config_count += configs.size();
}
std::atomic<int> done_count = 0;
std::atomic<int> good_count = 0;
auto log = [&](bool success) {
const int done_so_far = done_count.fetch_add(1) + 1;
const int good_so_far =
success ? good_count.fetch_add(1) + 1 : good_count.load();
if (done_so_far % log_every_n == 0) {
VLOG(2) << "Compiled " << done_so_far << " of " << config_count
<< " configs (successful: " << good_so_far << ")";
}
};
auto compile = [&](const HloFusionInstruction* fusion,
const BackendConfig& config,
bool allow_filtering_kernels_spilling_registers)
-> absl::StatusOr<bool> {
std::unique_ptr<Executable> executable;
if (std::holds_alternative<TritonGemmConfig>(config)) {
TF_ASSIGN_OR_RETURN(
executable, compile_util.Compile([&](const DebugOptions& opts) {
return TritonGemmAutotuneExtractor(
std::get<TritonGemmConfig>(config),
config_.GetExecutor()->GetDeviceDescription(), fusion, opts,
allow_filtering_kernels_spilling_registers);
}));
} else if (std::holds_alternative<CuDnnConfig>(config)) {
executable =
compile_util
.Compile([&](const DebugOptions& opts) {
return CuDnnFusionExtractor(
*fusion, opts, std::get<CuDnnConfig>(config).plan_id);
})
.value_or(nullptr);
} else if (std::holds_alternative<CuBlasConfig>(config)) {
TF_ASSIGN_OR_RETURN(
executable, compile_util.Compile([&](const DebugOptions& opts) {
return CublasGemmAutotuneExtractor(
config_, config_.GetExecutor()->GetDeviceDescription(),
toolkit_version_, fusion, opts);
}));
} else if (std::holds_alternative<CustomKernelFusionConfig>(config)) {
TF_ASSIGN_OR_RETURN(executable,
compile_util.Compile([&](const DebugOptions& opts) {
return CustomFusionKernelAutotuneExtractor(
std::get<CustomKernelFusionConfig>(config),
config_, toolkit_version_, fusion, opts);
}));
} else {
LOG(FATAL) << "Unsupported config type: " << config.index();
}
if (executable != nullptr) {
absl::MutexLock lock(&results_mu);
results[fusion].push_back({config, std::move(executable)});
return true;
}
return false;
};
if (thread_pool_ && thread_pool_->NumThreads() > 1 &&
debug_options_.xla_gpu_force_compilation_parallelism() != 1) {
if (task.size() == 1) {
absl::string_view fusion_name = task.begin()->first->name();
VLOG(1) << "Compiling " << config_count << " configs for " << fusion_name
<< " on " << thread_pool_->NumThreads() << " threads.";
} else {
VLOG(1) << "Compiling " << config_count << " configs for " << task.size()
<< " fusions on " << thread_pool_->NumThreads() << " threads.";
}
tsl::BlockingCounter counter(config_count);
for (const auto& key_value : task) {
const HloFusionInstruction* fusion = key_value.first;
const std::vector<BackendConfig>& gemm_config_set = key_value.second;
VLOG(10) << "Compiling fusion: " << fusion->name();
VLOG(10) << "Dumping fusion computation: "
<< fusion->called_computation()->ToString();
for (const BackendConfig& config : gemm_config_set) {
thread_pool_->Schedule([&, fusion] {
VLOG(10) << "Trying configuration forceable through: "
"--xla_gpu_override_gemm_autotuner='"
<< Serialize(config) << "'";
VLOG(10) << "WARNING: you are running in multithreaded-mode, the "
"last configuration printed out might not be the one "
"causing issues! Use "
"--xla_gpu_force_compilation_parallelism=1 to fix.";
absl::StatusOr<bool> has_executable =
compile(fusion, config, gemm_config_set.size() > 1);
TF_CHECK_OK(has_executable.status())
<< "Failure occured when compiling fusion " << fusion->name()
<< " with config '" << ToString(config)
<< "'\nFused HLO computation:\n"
<< fusion->fused_instructions_computation()->ToString();
log(has_executable.value());
counter.DecrementCount();
});
}
}
counter.Wait();
} else {
if (task.size() == 1) {
absl::string_view fusion_name = task.begin()->first->name();
LOG(WARNING) << "Compiling " << config_count << " configs for "
<< fusion_name << " on a single thread.";
} else {
LOG(WARNING) << "Compiling " << config_count << " configs for "
<< task.size() << " fusions on a single thread.";
}
for (const auto& [fusion, gemm_config_set] : task) {
VLOG(10) << "Compiling fusion: " << fusion->name();
VLOG(10) << "Dumping fusion computation: "
<< fusion->called_computation()->ToString();
for (const BackendConfig& config : gemm_config_set) {
VLOG(10) << "Trying configuration forceable through: "
"--xla_gpu_override_gemm_autotuner='"
<< Serialize(config) << "'";
TF_ASSIGN_OR_RETURN(
bool has_executable,
compile(fusion, config, gemm_config_set.size() > 1));
log(has_executable);
}
}
}
VLOG(1) << "Done compiling (successful: " << good_count.load() << ").";
return results;
}
absl::StatusOr<std::vector<AutotuneResult>> GemmFusionAutotunerImpl::Profile(
AutotunerCompileUtil& compile_util, const HloFusionInstruction& fusion,
absl::Span<const ExecutableCandidate> candidates) {
const HloComputation* fusion_computation = fusion.called_computations().at(0);
se::StreamExecutor* stream_exec = config_.GetExecutor();
if (!stream_exec->SynchronizeAllActivity()) {
return Internal("Failed to synchronize GPU for autotuning.");
}
tsl::profiler::ScopedAnnotation annotation([&] {
return absl::StrFormat("XlaAutotunerMeasurement:#hlo_op=%s#",
fusion.name());
});
se::DeviceMemoryAllocator* allocator = config_.GetAllocator();
std::unique_ptr<se::DeviceMemoryAllocator> owned_allocator;
if (allocator == nullptr) {
owned_allocator =
std::make_unique<se::StreamExecutorMemoryAllocator>(stream_exec);
allocator = owned_allocator.get();
}
TF_ASSIGN_OR_RETURN(se::Stream* const stream, config_.GetStream());
const HloInstruction& root = *fusion_computation->root_instruction();
BufferComparator comparator(root.shape(),
debug_options_.xla_gpu_autotune_gemm_rtol());
TF_ASSIGN_OR_RETURN(auto rz_buffers,
RedzoneBuffers::FromInstruction(
*fusion_computation->FusionInstruction(), config_,
debug_options_, RedzoneBuffers::kAllInputs));
const int log_every_n = GetLogEveryN();
std::vector<AutotuneResult> results;
std::optional<ScopedShapedBuffer> reference_buffer;
for (const ExecutableCandidate& candidate : candidates) {
VLOG(5) << "Trying : " << ToString(candidate.config);
AutotuneResult res = FromConfig(candidate.config);
std::optional<ProfilingOutput> profiling_output;
if (IsAutotuningEnabled()) {
TF_ASSIGN_OR_RETURN(
profiling_output,
compile_util.ProfileExecutable(candidate.executable.get(), stream,
rz_buffers.input_buffers(),
rz_buffers.input_shapes()));
if (std::holds_alternative<CuBlasConfig>(candidate.config) &&
config_.should_check_correctness()) {
reference_buffer = std::move(profiling_output->output);
}
int ran_so_far = results.size() + 1;
if (ran_so_far % log_every_n == 0) {
VLOG(2) << "Ran " << ran_so_far << " configs of " << candidates.size()
<< ".";
}
if (!profiling_output) {
VLOG(5) << "Skipping this tiling.";
continue;
}
VLOG(5) << "Running the kernel took: " << profiling_output->duration;
if (profiling_output->duration >= absl::Seconds(1)) {
LOG(WARNING) << "Slow kernel for "
<< fusion.called_computations()[0]->ToString()
<< " took: " << profiling_output->duration << ". "
<< ToString(candidate.config);
}
*res.mutable_run_time() =
tsl::proto_utils::ToDurationProto(profiling_output->duration);
}
if (reference_buffer.has_value() &&
!std::holds_alternative<CuBlasConfig>(candidate.config)) {
TF_ASSIGN_OR_RETURN(
se::RedzoneAllocator::RedzoneCheckStatus rz_check_status,
rz_buffers.RedzoneAllocator().CheckRedzones());
if (!rz_check_status.ok()) {
LOG(ERROR) << "Red zone modified";
res.mutable_failure()->set_kind(AutotuneResult::REDZONE_MODIFIED);
res.mutable_failure()->set_msg(rz_check_status.RedzoneFailureMsg());
CHECK(!config_.should_crash_on_check_failure());
continue;
}
TF_ASSIGN_OR_RETURN(
bool outputs_match,
comparator.CompareEqual(
stream, profiling_output->output.root_buffer(),
reference_buffer->root_buffer()));
if (!outputs_match) {
const char kMessage[] =
"Results do not match the reference. This is likely a "
"bug/unexpected loss of precision.";
LOG(ERROR) << kMessage;
CHECK(!config_.should_crash_on_check_failure());
res.mutable_failure()->set_kind(AutotuneResult::DISQUALIFIED);
res.mutable_failure()->set_msg(kMessage);
}
}
results.push_back(std::move(res));
}
VLOG(2) << "Done running.";
return results;
}
std::vector<TritonGemmConfig>
GemmFusionAutotunerImpl::GetExhaustiveTritonConfigs() const {
std::vector<TritonGemmConfig> configs;
se::CudaComputeCapability cc = GetComputeCapability();
bool should_tune_ctas =
debug_options_.xla_gpu_exhaustive_tiling_search() && cc.IsAtLeastHopper();
for (int num_stages : kNumStages) {
for (int tile_m : kBlockSizes) {
for (int tile_n : kBlockSizes) {
for (int tile_k : kBlockSizes) {
const int tile_lhs = tile_m * tile_k;
const int tile_rhs = tile_k * tile_n;
for (int num_warps : kNumWarps) {
if (num_warps * WarpSize() > std::min(tile_lhs, tile_rhs)) {
break;
}
for (int split_k : kSplitK) {
if (!debug_options_.xla_gpu_enable_split_k_autotuning() &&
split_k > 1) {
break;
}
if (should_tune_ctas) {
for (int num_ctas : kNumCtas) {
if (num_ctas <= num_warps) {
configs.push_back(TritonGemmConfig(tile_m, tile_n, tile_k,
split_k, num_stages,
num_warps, num_ctas));
}
}
} else {
configs.push_back(TritonGemmConfig(tile_m, tile_n, tile_k,
split_k, num_stages,
num_warps, 1));
}
}
}
}
}
}
}
return configs;
}
std::vector<TritonGemmConfig> GemmFusionAutotunerImpl::GetDefaultTritonConfigs()
const {
using Config = TritonGemmConfig;
std::vector<Config> configs = {
Config(32, 32, 256, 1, 1, 4), Config(64, 32, 32, 16, 1, 4),
Config(32, 64, 64, 4, 1, 4), Config(128, 128, 64, 4, 1, 4),
Config(16, 16, 256, 1, 1, 4), Config(16, 128, 32, 16, 1, 4),
Config(16, 64, 128, 1, 1, 4), Config(16, 128, 32, 8, 1, 4),
Config(16, 16, 512, 1, 1, 4), Config(32, 16, 512, 1, 1, 4),
Config(64, 32, 64, 1, 2, 8), Config(128, 256, 32, 1, 3, 8),
Config(256, 128, 32, 1, 3, 8), Config(256, 64, 32, 1, 4, 4),
Config(64, 256, 32, 1, 4, 4), Config(128, 64, 32, 1, 4, 4),
Config(64, 128, 32, 1, 4, 4), Config(256, 128, 128, 1, 3, 8),
Config(256, 64, 128, 1, 4, 4), Config(64, 256, 128, 1, 4, 4),
Config(128, 128, 128, 1, 4, 4), Config(128, 64, 64, 1, 4, 4),
Config(64, 128, 64, 1, 4, 4), Config(128, 32, 64, 1, 4, 4),
Config(64, 32, 64, 1, 4, 4), Config(32, 128, 32, 1, 4, 4),
Config(128, 128, 32, 1, 4, 4), Config(16, 16, 256, 1, 3, 4),
Config(128, 128, 64, 2, 1, 8), Config(64, 64, 64, 1, 2, 4),
Config(16, 64, 256, 8, 1, 4), Config(256, 256, 128, 1, 3, 8)};
if (GetComputeCapability().IsAtLeastHopper()) {
absl::c_copy(
std::vector<Config>{
Config(16, 32, 32, 8, 1, 2),
Config(16, 64, 128, 8, 1, 4),
Config(16, 64, 128, 16, 3, 4),
},
std::back_inserter(configs));
}
return configs;
}
absl::Status DumpAutotuningLogs(const DebugOptions& debug_opts,
const AutotuningLogs& autotuning_logs) {
if (absl::string_view file_path = debug_opts.xla_gpu_dump_autotune_logs_to();
!file_path.empty()) {
std::string resolved_path;
if (!tsl::io::ResolveTestPrefixes(file_path, resolved_path)) {
return FailedPrecondition("File path can not be resolved: %s", file_path);
}
std::string textproto;
tsl::protobuf::TextFormat::PrintToString(autotuning_logs, &textproto);
TF_RETURN_IF_ERROR(
tsl::WriteStringToFile(tsl::Env::Default(), resolved_path, textproto));
LOG(INFO) << "Autotune logs serialized to file: " << resolved_path;
}
return absl::OkStatus();
}
absl::Status GemmFusionAutotunerImpl::Autotune(
AutotunerCompileUtil& compile_util, const BackendConfigs& gemm_config_sets,
AutoTuneCacheKeyCount fusion_count_map) {
TF_ASSIGN_OR_RETURN(auto executable_sets,
CompileAll(compile_util, gemm_config_sets));
for (auto& [unused, candidates] : executable_sets) {
absl::c_sort(candidates, [](const auto& a, const auto& b) {
return a.config < b.config;
});
}
AutotuningLogs autotuning_logs;
int fusion_id = 0;
for (const auto& [fusion, candidates] : executable_sets) {
TF_ASSIGN_OR_RETURN(std::vector<AutotuneResult> results,
Profile(compile_util, *fusion, candidates));
if (!debug_options_.xla_gpu_cublas_fallback() &&
results.front().has_gemm()) {
results.erase(results.begin());
}
const HloInstruction* root =
fusion->called_computations().at(0)->root_instruction();
TF_ASSIGN_OR_RETURN(
AutotuneResult best,
PickBestResult(results, root->ToString(), root->GetModule()->config()));
VLOG(2) << "Best time: "
<< tsl::proto_utils::FromDurationProto(best.run_time());
if (debug_options_.xla_gpu_dump_autotuned_gemm_fusions()) {
TF_RETURN_IF_ERROR(DumpOriginalFusion(compile_util, *fusion, fusion_id));
TF_RETURN_IF_ERROR(DumpAutotunedFusion(
config_, toolkit_version_, compile_util, best, fusion, fusion_id++));
}
const AutotuneCacheKey key = AutotunerUtil::GetKey(fusion, config_);
TF_ASSIGN_OR_RETURN(
bool added, AutotunerUtil::AddResult(key, std::move(best), config_));
if (!added) {
LOG(WARNING) << "AutotunerUtil::AddResult already existed: "
<< key.ToString();
}
if (!debug_options_.xla_gpu_dump_autotune_logs_to().empty()) {
auto autotuning_log = autotuning_logs.add_logs();
autotuning_log->set_fusion_name(std::string(fusion->name()));
for (const auto& autotune_result : results) {
auto log_result = autotuning_log->add_results();
log_result->CopyFrom(autotune_result);
}
if (auto fusion_key_count = fusion_count_map.find(key);
fusion_key_count != fusion_count_map.end()) {
auto fusion_key = fusion_key_count->first;
auto fusion_count = fusion_key_count->second;
autotuning_log->set_fusion_count(fusion_count);
}
}
}
TF_RETURN_IF_ERROR(DumpAutotuningLogs(debug_options_, autotuning_logs));
return absl::OkStatus();
}
static BackendConfigs TrimConfigs(const BackendConfigs& gemm_config_sets,
const int shard_index,
const int shard_count) {
const uint64_t bucket_size =
(gemm_config_sets.size() + shard_count - 1) / shard_count;
const uint64_t start = bucket_size * shard_index;
const uint64_t end = std::min(start + bucket_size, gemm_config_sets.size());
if (start >= end) {
return {};
}
return BackendConfigs(gemm_config_sets.cbegin() + start,
gemm_config_sets.cbegin() + end);
}
absl::Status ExchangeResults(KeyValueStoreInterface& key_value_store,
const int module_id, const int shard_index,
const int shard_count) {
AutotuneResults results;
TF_RETURN_IF_ERROR(AutotunerUtil::SerializeAutotuneResults(&results));
TF_ASSIGN_OR_RETURN(std::string results_str,
AutotuneResultsToString(results, true));
constexpr absl::string_view kKeyPrefix = "gemm_fusion_autotuning_results";
TF_RETURN_IF_ERROR(key_value_store.Set(
absl::StrFormat("%s_%d_%d", kKeyPrefix, module_id, shard_index),
results_str));
VLOG(2) << "Rank " << shard_index << ": published results";
for (int i = 0; i < shard_count; ++i) {
if (i == shard_index) {
continue;
}
VLOG(2) << "Rank " << shard_index << ": waiting for results from rank " << i
<< " / " << shard_count;
TF_ASSIGN_OR_RETURN(
std::string autotune_results_str,
key_value_store.Get(
absl::StrFormat("%s_%d_%d", kKeyPrefix, module_id, i),
absl::Hours(24)));
TF_RETURN_IF_ERROR(
AutotunerUtil::LoadAutotuneResults(autotune_results_str, true));
}
return absl::OkStatus();
}
absl::StatusOr<bool> GemmFusionAutotuner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_SCOPED_LOGGING_TIMER("GEMM fusion autotuner");
const DebugOptions& debug_options = module->config().debug_options();
GemmFusionAutotunerImpl autotuner(config_, toolkit_version_, debug_options,
thread_pool_);
GemmConfigSetCollector gemm_config_set_collector(&autotuner);
TF_ASSIGN_OR_RETURN(BackendConfigs gemm_config_sets,
gemm_config_set_collector.CollectGemmConfigSets(
module, execution_threads));
const int total_fusion_count = gemm_config_sets.size();
AutoTuneCacheKeyCount fusion_count_map =
gemm_config_set_collector.GetFusionsCount();
if (!autotuner.IsAutotuningEnabled()) {
for (const auto& [fusion, tilings] : gemm_config_sets) {
const AutotuneCacheKey key = AutotunerUtil::GetKey(fusion, config_);
AutotuneResult res = FromConfig(tilings[0]);
*res.mutable_run_time() =
tsl::proto_utils::ToDurationProto(absl::ZeroDuration());
TF_RETURN_IF_ERROR(AutotunerUtil::AddResult(key, res, config_).status());
}
} else if (!debug_options.xla_gpu_override_gemm_autotuner().empty()) {
AutotuneResult::TritonGemmKey gemm_key;
CHECK(tsl::protobuf::TextFormat::ParseFromString(
debug_options.xla_gpu_override_gemm_autotuner(), &gemm_key));
VLOG(1) << "Overriding GEMM autotuner with the following config: "
<< gemm_key.DebugString();
for (const auto& [fusion, unused] : gemm_config_sets) {
const AutotuneCacheKey key = AutotunerUtil::GetKey(fusion, config_);
AutotuneResult res;
*res.mutable_triton() = gemm_key;
*res.mutable_run_time() =
tsl::proto_utils::ToDurationProto(absl::ZeroDuration());
TF_RETURN_IF_ERROR(AutotunerUtil::AddResult(key, res, config_).status());
}
} else if (!config_.IsDeviceless()) {
TF_ASSIGN_OR_RETURN(std::optional<AutotunerCompileUtil> opt_compile_util,
AutotunerCompileUtil::Create(config_, debug_options));
TF_RET_CHECK(opt_compile_util.has_value());
std::string correctness_check_str = config_.should_check_correctness()
? "(with correctness check)"
: "(without correctness check)";
const bool shard_autotuning = debug_options.xla_gpu_shard_autotuning() &&
key_value_store_.process_count > 1 &&
total_fusion_count > 0;
if (shard_autotuning) {
if (key_value_store_.key_value_store == nullptr) {
return absl::FailedPreconditionError(
"Sharded autotuning requested but key-value store is missing.");
}
gemm_config_sets =
TrimConfigs(gemm_config_sets, key_value_store_.process_index,
key_value_store_.process_count);
}
VLOG(1) << absl::StrFormat(
"Shard %d / %d: autotuning %d / %d fusions for %s %s.",
key_value_store_.process_index + 1, key_value_store_.process_count,
gemm_config_sets.size(), total_fusion_count, module->name(),
correctness_check_str);
TF_RETURN_IF_ERROR(autotuner.Autotune(*opt_compile_util, gemm_config_sets,
std::move(fusion_count_map)));
VLOG(1) << "Done autotuning.";
if (shard_autotuning) {
TF_RETURN_IF_ERROR(ExchangeResults(
*key_value_store_.key_value_store, module->unique_id(),
key_value_store_.process_index, key_value_store_.process_count));
}
}
return GemmFusionAutotunerRewriterVisitor(config_).RunOnModule(
module, execution_threads);
}
}
} | #include "xla/service/gpu/autotuning/gemm_fusion_autotuner.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/autotuning.pb.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/pjrt/distributed/key_value_store_interface.h"
#include "xla/service/call_inliner.h"
#include "xla/service/dump.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/autotuning/autotuner_compile_util.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/transforms/gemm_fusion.h"
#include "xla/service/gpu/transforms/gemm_rewriter.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_description.pb.h"
#include "xla/stream_executor/semantic_version.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/tools/hlo_decomposer.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using HloExtractionTest = HloTestBase;
TEST_F(HloExtractionTest, InstructionExtractionIsCorrect) {
std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(R"(
HloModule module
triton_gemm_dot {
p0 = s8[10,10] parameter(0)
p1 = f32[10,10] parameter(1)
c0 = f32[10,10] convert(p0)
ROOT dot.0 = f32[10,10] dot(c0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY entry {
p0 = s8[10,10] parameter(0)
p1 = f32[10,10] parameter(1)
s = f32[10,10] sqrt(p1)
d = f32[10,10] fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot
ROOT r = f32[10,10] add(d, s)
})")
.value();
std::unique_ptr<HloModule> extracted_module = ExtractInstructionIntoNewModule(
*module->entry_computation()->root_instruction()->operand(0));
module = nullptr;
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter(), m::Parameter())));
EXPECT_EQ(extracted_module->entry_computation()->instruction_count(), 3);
TF_EXPECT_OK(VerifyHloModule(extracted_module.get(),
true,
false));
}
TEST_F(HloExtractionTest, ComputationExtractionIsCorrect) {
std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(R"(
HloModule module
triton_gemm_dot {
p0 = s8[10,10] parameter(0)
p1 = f32[10,10] parameter(1)
c0 = f32[10,10] convert(p0)
ROOT dot.0 = f32[10,10] dot(c0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY entry {
p0 = s8[10,10] parameter(0)
p1 = f32[10,10] parameter(1)
s = f32[10,10] sqrt(p1)
d = f32[10,10] fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot
ROOT r = f32[10,10] add(d, s)
})")
.value();
std::unique_ptr<HloModule> extracted_module =
ExtractComputationIntoNewModule(*module->entry_computation()
->root_instruction()
->operand(0)
->fused_instructions_computation());
module = nullptr;
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
GmockMatch(m::Dot(m::Convert(m::Parameter()), m::Parameter())));
EXPECT_EQ(extracted_module->entry_computation()->instruction_count(), 4);
TF_EXPECT_OK(VerifyHloModule(extracted_module.get(),
true,
false));
}
class StatelessAutotunerTest : public HloTestBase {
public:
StatelessAutotunerTest()
: HloTestBase(true,
false) {}
se::SemanticVersion GetToolkitVersion() const {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.runtime_version();
}
void SetUp() override {
AutotunerUtil::ClearAutotuneResults();
HloTestBase::SetUp();
}
void TearDown() override {
AutotunerUtil::ClearAutotuneResults();
HloTestBase::TearDown();
}
absl::StatusOr<std::vector<GemmFusionAutotunerImpl::BackendConfig>>
GetPossibleMatmulAutotuneConfigs(
const HloModule& module,
const se::CudaComputeCapability& compute_capability,
const se::SemanticVersion& toolkit_version,
const DebugOptions& debug_options) {
const HloFusionInstruction& fusion = *Cast<HloFusionInstruction>(
module.entry_computation()->root_instruction());
se::GpuDeviceInfoProto deviceless_proto;
auto ccc = deviceless_proto.mutable_cuda_compute_capability();
ccc->set_major(compute_capability.major);
ccc->set_minor(compute_capability.minor);
DeviceConfig test_config{backend().default_stream_executor(),
backend().memory_allocator()};
AutotuneConfig autotune_config{test_config, debug_options};
GemmFusionAutotunerImpl autotuner(autotune_config, toolkit_version,
debug_options, nullptr);
return autotuner.GenerateConfigs(fusion);
}
se::CudaComputeCapability GetCudaComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
absl::StatusOr<std::vector<GemmFusionAutotunerImpl::BackendConfig>>
GetPossibleMatmulAutotuneConfigs(const HloModule& module) {
DeviceConfig device_config{backend().default_stream_executor(),
backend().memory_allocator()};
AutotuneConfig autotune_config{device_config, GetDebugOptionsForTest()};
GemmFusionAutotunerImpl autotuner(autotune_config, GetToolkitVersion(),
GetDebugOptionsForTest(), nullptr);
const HloFusionInstruction& fusion = *Cast<HloFusionInstruction>(
module.entry_computation()->root_instruction());
return autotuner.GenerateConfigs(fusion);
}
bool hasCublasConfig(
const std::vector<GemmFusionAutotunerImpl::BackendConfig>& configs) {
return std::any_of(
configs.begin(), configs.end(),
[](const GemmFusionAutotunerImpl::BackendConfig& config) {
return std::holds_alternative<GemmFusionAutotunerImpl::CuBlasConfig>(
config);
});
}
};
constexpr absl::string_view kHloDotFusionWithAlgorithm = R"(
HloModule module
computation {
p0 = f32[1024,1024] parameter(0)
p1 = f32[1024,1024] parameter(1)
ROOT r = f32[1024,1024] dot(p0, p1),
algorithm=$0,
lhs_contracting_dims={1},
rhs_contracting_dims={0}
}
ENTRY main {
p0 = f32[1024,1024] parameter(0)
p1 = f32[1024,1024] parameter(1)
ROOT computation = f32[1024,1024] fusion(f32[1024,1024] p0,f32[1024,1024] p1),
kind=kCustom,
calls=computation
}
)";
TEST_F(StatelessAutotunerTest, NoCublasFallbackForTf32Tf32F32X3Algorithm) {
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(absl::Substitute(
kHloDotFusionWithAlgorithm, "dot_tf32_tf32_f32_x3")));
TF_ASSERT_OK_AND_ASSIGN(auto configs,
GetPossibleMatmulAutotuneConfigs(*module));
EXPECT_FALSE(hasCublasConfig(configs))
<< "There is no cublas implementation for dot_tf32_tf32_f32_x3. That is "
"why we don't want to fallback to cublas.";
}
TEST_F(StatelessAutotunerTest,
NoCublasFallbackForBf16Bf16F32AlgorithmOnHopper) {
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(absl::Substitute(
kHloDotFusionWithAlgorithm, "dot_bf16_bf16_f32")));
TF_ASSERT_OK_AND_ASSIGN(auto configs,
GetPossibleMatmulAutotuneConfigs(*module));
switch (GetCudaComputeCapability().major) {
case se::CudaComputeCapability::AMPERE:
EXPECT_TRUE(hasCublasConfig(configs))
<< "There is a cublas implementation for dot_bf16_bf16_f32 on Ampere";
break;
case se::CudaComputeCapability::HOPPER:
EXPECT_FALSE(hasCublasConfig(configs))
<< "There is no cublas implementation for dot_bf16_bf16_f32 on "
"Hopper. That is why we don't want to fallback to cublas.";
break;
default:
EXPECT_FALSE(hasCublasConfig(configs));
}
}
class GemmFusionAutotunerTest : public StatelessAutotunerTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options =
StatelessAutotunerTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_triton_gemm(true);
debug_options.set_xla_gpu_cublas_fallback(false);
debug_options.set_xla_gpu_cudnn_gemm_fusion_level(0);
return debug_options;
}
void CheckTritonAutotuning(absl::string_view hlo,
absl::string_view expected) {
HloPassPipeline pipeline("gemm_rewrite");
pipeline.AddPass<GemmFusion>(backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability());
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "",
tsl::port::MaxParallelism());
DebugOptions opts;
MultiProcessKeyValueStore key_value_store;
pipeline.AddPass<GemmFusionAutotuner>(
AutotuneConfig{DeviceConfig{backend().default_stream_executor(),
backend().memory_allocator()},
opts},
GetToolkitVersion(), &thread_pool, key_value_store);
RunAndFilecheckHloRewrite(
hlo, std::move(pipeline), expected, [](const HloModule* m) {
VLOG(5) << m->ToString();
const HloInstruction* dot_fusion =
m->entry_computation()->root_instruction();
if (dot_fusion->opcode() == HloOpcode::kReduce) {
dot_fusion = dot_fusion->operand(0);
}
CHECK_EQ(dot_fusion->opcode(), HloOpcode::kFusion);
if (!dot_fusion->backend_config<GpuBackendConfig>()
->fusion_backend_config()
.has_cudnn_fusion_config()) {
CHECK_GT(dot_fusion->backend_config<GpuBackendConfig>()
.value()
.fusion_backend_config()
.triton_gemm_config()
.block_m(),
0);
}
});
}
};
class GemmFusionAutotunerTestWithMorePreciseReduction
: public GemmFusionAutotunerTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options =
GemmFusionAutotunerTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_triton_gemm_disable_reduced_precision_reduction(
true);
return debug_options;
}
};
absl::StatusOr<std::vector<TritonGemmConfig>>
GetPossibleMatmulAutotuneTritonConfigs(
const HloDotInstruction& dot,
const se::CudaComputeCapability& compute_capability,
const se::SemanticVersion& toolkit_version,
const DebugOptions& debug_options) {
se::GpuDeviceInfoProto deviceless_proto;
auto ccc = deviceless_proto.mutable_cuda_compute_capability();
ccc->set_major(compute_capability.major);
ccc->set_minor(compute_capability.minor);
DevicelessConfig test_config{se::DeviceDescription{deviceless_proto}};
AutotuneConfig autotune_config{test_config, debug_options};
GemmFusionAutotunerImpl autotuner(autotune_config, toolkit_version,
debug_options, nullptr);
return autotuner.GenerateTritonConfigs(dot);
}
TEST_F(GemmFusionAutotunerTest, AmpereUsesMoreThanTwoStages) {
std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = f32[1024,1024] parameter(0)
p1 = f32[1024,1024] parameter(1)
ROOT r = f32[1024,1024] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})")
.value();
const se::CudaComputeCapability compute_capability{
se::CudaComputeCapability::AMPERE, 0};
TF_ASSERT_OK_AND_ASSIGN(
const std::vector<TritonGemmConfig> configs,
GetPossibleMatmulAutotuneTritonConfigs(
*Cast<HloDotInstruction>(
module->entry_computation()->root_instruction()),
compute_capability, GetToolkitVersion(), GetDebugOptionsForTest()));
EXPECT_TRUE(std::any_of(
configs.begin(), configs.end(),
[](const TritonGemmConfig& config) { return config.num_stages > 2; }));
}
TEST_F(GemmFusionAutotunerTest, SmallOutputCanUseLargeSplitK) {
std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = f32[1024,1024] parameter(0)
p1 = f32[1024,1024] parameter(1)
ROOT r = f32[1024,1024] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})")
.value();
const se::CudaComputeCapability compute_capability{
se::CudaComputeCapability::AMPERE, 0};
TF_ASSERT_OK_AND_ASSIGN(
const std::vector<TritonGemmConfig> configs,
GetPossibleMatmulAutotuneTritonConfigs(
*Cast<HloDotInstruction>(
module->entry_computation()->root_instruction()),
compute_capability, GetToolkitVersion(), GetDebugOptionsForTest()));
EXPECT_TRUE(std::any_of(
configs.begin(), configs.end(),
[](const TritonGemmConfig& config) { return config.split_k >= 4; }));
}
TEST_F(GemmFusionAutotunerTest, LargeOutputDoesNotUseLargeSplitK) {
std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = f32[20480,20480] parameter(0)
p1 = f32[20480,20480] parameter(1)
ROOT r = f32[20480,20480] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})")
.value();
const se::CudaComputeCapability compute_capability{
se::CudaComputeCapability::AMPERE, 0};
TF_ASSERT_OK_AND_ASSIGN(
const std::vector<TritonGemmConfig> configs,
GetPossibleMatmulAutotuneTritonConfigs(
*Cast<HloDotInstruction>(
module->entry_computation()->root_instruction()),
compute_capability, GetToolkitVersion(), GetDebugOptionsForTest()));
EXPECT_FALSE(std::any_of(
configs.begin(), configs.end(),
[](const TritonGemmConfig& config) { return config.split_k > 1; }));
}
TEST_F(GemmFusionAutotunerTest, Int8FusedGemm) {
const std::string hlo = R"(
HloModule module
ENTRY e {
x = s8[128,64] parameter(0)
c = f16[128,64] convert(x)
y = f16[64,6144] parameter(1)
ROOT out = f16[128,6144] dot(c, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
CheckTritonAutotuning(hlo, R"(
)");
EXPECT_TRUE(RunAndCompare(hlo, ErrorSpec{5e-3, 5e-3}));
}
TEST_F(GemmFusionAutotunerTest, Int8FusedGemm256) {
const std::string hlo = R"(
HloModule module
ENTRY e {
x = s8[128,256] parameter(0)
c = f16[128,256] convert(x)
y = f16[256,6144] parameter(1)
ROOT out = f16[128,6144] dot(c, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
CheckTritonAutotuning(hlo, R"(
)");
EXPECT_TRUE(RunAndCompare(hlo, ErrorSpec{1e-2, 1e-2}));
}
TEST_F(GemmFusionAutotunerTest, SelectsSplitK) {
const std::string kHloText = R"(
HloModule t
ENTRY e {
p0 = s8[7,8192] parameter(0)
p0c = f16[7,8192] convert(p0)
p1 = f16[8192,18] parameter(1)
ROOT dot.0 = f16[7,18] dot(p0c, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
MatchOptimizedHlo(kHloText, R"(
; CHECK: reduce
; CHECK: ENTRY
; CHECK-NEXT: parameter
; CHECK-NEXT: parameter
; CHECK-NEXT: kCustom
; CHECK-NEXT: kLoop
)");
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1, 0.5}));
}
TEST_F(GemmFusionAutotunerTestWithMorePreciseReduction, SelectsSplitK) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
p0 = s8[7,8192] parameter(0)
p0c = f16[7,8192] convert(p0)
p1 = f16[8192,18] parameter(1)
ROOT dot.0 = f16[7,18] dot(p0c, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
MatchOptimizedHlo(kHloText, R"(
; CHECK: reduce
; CHECK: ENTRY
; CHECK-NEXT: parameter
; CHECK-NEXT: parameter
; CHECK-NEXT: kCustom
; CHECK-NEXT: kLoop
)");
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-2, 1e-3}));
}
TEST_F(GemmFusionAutotunerTest, ApplySplitKWithoutAlteringTiling) {
const std::string kHloText = R"(
triton_dot {
p0 = f16[55,120] parameter(0)
p1 = f16[120,20] parameter(1)
ROOT dot = f16[55,20] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f16[55,120]{1,0} parameter(0)
p1 = f16[120,20]{1,0} parameter(1)
ROOT _ = f16[55,20] fusion(p0, p1), kind=kCustom, calls=triton_dot,
backend_config={"fusion_backend_config":{kind: "__triton_gemm", triton_gemm_config: {"block_m":16,"block_n":64,"block_k":32,"split_k":3,"num_stages":1,"num_warps":2,"num_ctas":1}}}
})";
MatchOptimizedHlo(kHloText, R"(
; CHECK: f16[3,55,20]
; CHECK: {"block_m":16,"block_n":64,"block_k":32,"split_k":3,"num_stages":1,"num_warps":2,"num_ctas":1}
; CHECK: f16[55,20]{1,0} {{(reduce|fusion)}}
)");
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));
}
TEST_F(GemmFusionAutotunerTest, DoNotRunAutotuningKernelSpillingRegisters) {
const std::string kHloText = R"(
HloModule m
%triton_gemm_dot {
%p1 = s8[4,12288]{1,0} parameter(1)
%p0 = s8[12288,1536]{1,0} parameter(0)
%convert.p0 = f16[12288,1536]{1,0} convert(s8[12288,1536]{1,0} %p0)
%convert.p1 = f16[4,12288]{1,0} convert(s8[4,12288]{1,0} %p1)
%dot = f16[4,1536]{1,0} dot(f16[4,12288]{1,0} %convert.p1, f16[12288,1536]{1,0} %convert.p0), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT %convert = s8[4,1536]{1,0} convert(f16[4,1536]{1,0} %dot)
}
ENTRY %e {
%get-tuple-element.7020 = s8[12288,1536]{1,0} parameter(0)
%convert = s8[4,12288]{1,0} parameter(1)
ROOT %triton = s8[4,1536]{1,0} fusion(s8[12288,1536]{1,0} %get-tuple-element.7020, s8[4,12288]{1,0} %convert), kind=kCustom, calls=%triton_gemm_dot,
backend_config={"fusion_backend_config":{"kind":"__triton_gemm","triton_gemm_config":{"block_m":"256","block_n":"256","block_k":"16","split_k":"1","num_stages":"1","num_warps":"16","num_ctas":"1"}}}
})";
auto module = ParseAndReturnVerifiedModule(kHloText).value();
EXPECT_THAT(backend().compiler()->RunBackend(
std::move(module), backend().default_stream_executor(),
{nullptr,
nullptr,
{},
true}),
::testing::AnyOf(
tsl::testing::StatusIs(
tsl::error::CANCELLED,
"Compilation result discarded due to register spilling"),
tsl::testing::StatusIs(
tsl::error::RESOURCE_EXHAUSTED,
::testing::HasSubstr("Register allocation failed")),
tsl::testing::StatusIs(
tsl::error::RESOURCE_EXHAUSTED,
::testing::HasSubstr("Insufficient registers"))));
}
TEST_F(GemmFusionAutotunerTest,
DoNotFilterOutAutotuningKernelSpillingRegisters) {
if (GetCudaComputeCapability().IsAtLeastHopper()) {
GTEST_SKIP() << "Hopper and newer runs out of registers for such HLOs";
}
const std::string kHloText = R"(
HloModule m
%triton_gemm_dot {
%p1 = s8[4,12288]{1,0} parameter(1)
%p0 = s8[12288,1536]{1,0} parameter(0)
%convert.p0 = f16[12288,1536]{1,0} convert(s8[12288,1536]{1,0} %p0)
%convert.p1 = f16[4,12288]{1,0} convert(s8[4,12288]{1,0} %p1)
%dot = f16[4,1536]{1,0} dot(f16[4,12288]{1,0} %convert.p1, f16[12288,1536]{1,0} %convert.p0), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT %convert = s8[4,1536]{1,0} convert(f16[4,1536]{1,0} %dot)
}
ENTRY %e {
%get-tuple-element.7020 = s8[12288,1536]{1,0} parameter(0)
%convert = s8[4,12288]{1,0} parameter(1)
ROOT %triton = s8[4,1536]{1,0} fusion(s8[12288,1536]{1,0} %get-tuple-element.7020, s8[4,12288]{1,0} %convert), kind=kCustom, calls=%triton_gemm_dot,
backend_config={"fusion_backend_config":{"kind":"__triton_gemm","triton_gemm_config":{"block_m":"256","block_n":"256","block_k":"16","split_k":"1","num_stages":"1","num_warps":"16","num_ctas":"1"}}}
})";
auto module = ParseAndReturnVerifiedModule(kHloText).value();
HloModuleConfig config = module->config();
DebugOptions debug_options = config.debug_options();
debug_options.set_xla_gpu_filter_kernels_spilling_registers_on_autotuning(
false);
config.set_debug_options(debug_options);
module->set_config(config);
std::unique_ptr<Executable> executable =
backend()
.compiler()
->RunBackend(std::move(module), backend().default_stream_executor(),
{nullptr,
nullptr,
{},
true})
.value();
EXPECT_NE(executable, nullptr);
}
TEST_F(GemmFusionAutotunerTest, RunAutotuningKernelNotSpillingRegisters) {
const std::string kHloText = R"(
HloModule m
%triton_gemm_dot {
%p1 = f16[4,12288]{1,0} parameter(1)
%p0 = s8[12288,1536]{1,0} parameter(0)
%convert.10406 = f16[12288,1536]{1,0} convert(s8[12288,1536]{1,0} %p0)
ROOT %dot = f16[4,1536]{1,0} dot(f16[4,12288]{1,0} %p1, f16[12288,1536]{1,0} %convert.10406), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY %e {
%p0 = s8[12288,1536]{1,0} parameter(0)
%p1 = f16[4,12288]{1,0} parameter(1)
ROOT %triton_dot = f16[4,1536]{1,0} fusion(s8[12288,1536]{1,0} %p0, f16[4,12288]{1,0} %p1), kind=kCustom, calls=%triton_gemm_dot,
backend_config={"fusion_backend_config":{"kind":"__triton_gemm","triton_gemm_config":{"block_m":"16","block_n":"32","block_k":"16","split_k":"1","num_stages":"1","num_warps":"2","num_ctas":"1"}}}
})";
auto module = ParseAndReturnVerifiedModule(kHloText).value();
std::unique_ptr<Executable> executable =
backend()
.compiler()
->RunBackend(std::move(module), backend().default_stream_executor(),
{nullptr,
nullptr,
{},
true})
.value();
EXPECT_NE(executable, nullptr);
}
using GemmFusionAutotunerDumpTest = GemmFusionAutotunerTest;
TEST_F(GemmFusionAutotunerDumpTest, Fp8CublasltFallbackSupport) {
const std::string kHloText = R"(
HloModule o
gemm_fusion {
p0 = f8e4m3fn[64,6144]{1,0} parameter(0)
p1 = f8e4m3fn[64,6144]{1,0} parameter(1)
ROOT %dot.0 = f32[64,64]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY main {
p0 = f8e4m3fn[64,6144]{1,0} parameter(0)
p1 = f8e4m3fn[64,6144]{1,0} parameter(1)
ROOT %dot.0 = f32[64,64]{1,0} fusion(p0, p1), kind=kCustom, calls=gemm_fusion, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[],"fusion_backend_config":{"kind":"__triton_gemm"},"force_earliest_schedule":false}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
DebugOptions opts;
AutotuneConfig autotune_config{
DeviceConfig{backend().default_stream_executor(),
backend().memory_allocator()},
opts};
AutotuneCacheKey cache_key(autotune_config.GetModelStr(),
*module->entry_computation()->root_instruction());
TF_ASSERT_OK_AND_ASSIGN(AutotuneResults autotune_results_override,
ParseTextProto<AutotuneResults>(R"pb(
version: 3
results {
device: "..."
hlo: "..."
result {
gemm { algorithm: -1 }
run_time { nanos: 14 }
}
})pb"));
autotune_results_override.mutable_results(0)->set_device(
std::string(cache_key.GetModelStr()));
autotune_results_override.mutable_results(0)->set_hlo(
std::string(cache_key.GetHlo()));
CHECK_OK(AutotunerUtil::LoadAutotuneResults(autotune_results_override));
HloPassPipeline pipeline("gemm_autotune");
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "",
tsl::port::MaxParallelism());
MultiProcessKeyValueStore key_value_store;
pipeline.AddPass<GemmFusionAutotuner>(autotune_config, GetToolkitVersion(),
&thread_pool, key_value_store);
pipeline.AddPass<CallInliner>();
for (GemmRewriterOptions::DType dtype :
{GemmRewriterOptions::DType::kFp8Only,
GemmRewriterOptions::DType::kNonFp8Only}) {
pipeline.AddPass<GemmRewriter>(autotune_config.GetGpuComputeCapability(),
GetToolkitVersion(),
GemmRewriterOptions{dtype});
}
TF_EXPECT_OK(HloTestBase::RunHloPass(&pipeline, module.get()));
const bool is_at_least_hopper =
std::holds_alternative<se::CudaComputeCapability>(
autotune_config.GetGpuComputeCapability()) &&
std::get<se::CudaComputeCapability>(
autotune_config.GetGpuComputeCapability())
.IsAtLeastHopper();
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matches,
RunFileCheck(module->ToString(), is_at_least_hopper
? "
: "
EXPECT_TRUE(filecheck_matches);
}
TEST_F(GemmFusionAutotunerDumpTest, DumpingWorks) {
HloModuleConfig config;
DebugOptions options = GetDebugOptionsForTest();
options.set_xla_gpu_cublas_fallback(true);
options.set_xla_gpu_dump_autotuned_gemm_fusions(true);
std::string output_directory;
if (!tsl::io::GetTestUndeclaredOutputsDir(&output_directory)) {
output_directory = tsl::testing::TmpDir();
}
options.set_xla_dump_to(output_directory);
config.set_debug_options(options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion1 {
p0 = f32[333,333] parameter(0)
s = f32[333,333] sine(p0)
p1 = f32[333,333] parameter(1)
c = f32[333,333] cosine(p1)
ROOT dot = f32[333,333] dot(s, c),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f32[333,333] parameter(0)
p1 = f32[333,333] parameter(1)
ROOT rr = f32[333,333] fusion(p0, p1), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__triton_gemm"}}
})",
config));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(std::move(module)));
std::string dump;
TF_EXPECT_OK(tsl::ReadFileToString(
tsl::Env::Default(),
tsl::io::JoinPath(output_directory,
FilenameFor(*optimized_module, "",
"gemm_fusion_0.rr.txt")),
&dump));
EXPECT_TRUE(*RunFileCheck(dump, R"(
CHECK: HloModule rr
CHECK-NOT: cublas
CHECK: __triton_gemm
CHECK-NOT: block_m
)"));
dump.clear();
TF_EXPECT_OK(tsl::ReadFileToString(
tsl::Env::Default(),
tsl::io::JoinPath(
output_directory,
FilenameFor(*optimized_module, "",
"gemm_fusion_0.rr.optimized.txt")),
&dump));
EXPECT_TRUE(*RunFileCheck(dump, R"(
CHECK: HloModule rr
CHECK-NOT: triton
CHECK: cublas
)"));
}
TEST_F(GemmFusionAutotunerTest, AutotuneCuDnnFusion) {
const std::string kHlo = R"(
fusion1 {
p0 = f32[3,28,32] parameter(0)
p1 = f32[3,28,32] parameter(1)
ROOT d = f32[3,32,32] dot(p0, p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = f32[3,28,32] parameter(0)
p1 = f32[3,28,32] parameter(1)
ROOT _ = f32[3,32,32] fusion(p0, p1), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})";
CheckTritonAutotuning(kHlo, R"(
)");
}
class GemmFusionAutotunerLevelTest : public StatelessAutotunerTest,
public ::testing::WithParamInterface<int> {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options =
StatelessAutotunerTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_autotune_level(GetParam());
debug_options.set_xla_gpu_cublas_fallback(false);
return debug_options;
}
};
TEST_P(GemmFusionAutotunerLevelTest, AllAutotuningLevelsWorkCorrectly) {
const std::string kHloText = R"(
HloModule m
ENTRY e {
p0 = pred[64,10] parameter(0)
p0c = f32[64,10] convert(p0)
p1 = f32[10,128] parameter(1)
ROOT r = f32[64,128] dot(p0c, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
MatchOptimizedHlo(kHloText, R"(
; CHECK: kind=kCustom
; CHECK-SAME: block_m
)");
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));
}
TEST_P(GemmFusionAutotunerLevelTest, Deviceless) {
const std::string hlo = R"(
HloModule module
ENTRY e {
x = s8[16,16] parameter(0)
c = f16[16,16] convert(x)
y = f16[16,16] parameter(1)
ROOT out = f16[16,16] dot(c, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
HloPassPipeline pipeline("gemm_rewrite_deviceless");
pipeline.AddPass<GemmFusion>(backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability());
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "",
tsl::port::MaxParallelism());
DebugOptions opts;
MultiProcessKeyValueStore key_value_store;
pipeline.AddPass<GemmFusionAutotuner>(
AutotuneConfig{
DevicelessConfig{
backend().default_stream_executor()->GetDeviceDescription()},
opts},
GetToolkitVersion(), &thread_pool, key_value_store);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo));
if (GetDebugOptionsForTest().xla_gpu_autotune_level() == 0) {
TF_ASSERT_OK_AND_ASSIGN(bool changed,
HloTestBase::RunHloPass(&pipeline, module.get()));
EXPECT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matches,
RunFileCheck(
module->ToString(HloPrintOptions{}.set_print_operand_shape(false)),
R"(
)"));
EXPECT_TRUE(filecheck_matches);
} else {
EXPECT_THAT(HloTestBase::RunHloPass(&pipeline, module.get()),
tsl::testing::StatusIs(
tsl::error::INTERNAL,
::testing::HasSubstr(
"Expect autotune result cache hit for deviceless")));
}
}
INSTANTIATE_TEST_SUITE_P(GemmFusionAutotunerLevelSweep,
GemmFusionAutotunerLevelTest, ::testing::Range(0, 5));
class GemmFusionAutotunerExhaustiveTest : public GemmFusionAutotunerTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options =
GemmFusionAutotunerTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_exhaustive_tiling_search(true);
return debug_options;
}
};
TEST_F(GemmFusionAutotunerExhaustiveTest, DISABLED_CompileOnly) {
const std::string hlo = R"(
HloModule module
ENTRY e {
x = s8[16,16] parameter(0)
c = f16[16,16] convert(x)
y = f16[16,16] parameter(1)
ROOT out = f16[16,16] dot(c, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
CheckTritonAutotuning(hlo, R"(
)");
}
TEST_F(GemmFusionAutotunerExhaustiveTest, SkipsCrashingTileKConfig) {
std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(R"(
HloModule module
ENTRY e {
x = s8[33,33]{1,0} parameter(0)
c = f16[33,33]{1,0} convert(x)
y = f16[33,33]{1,0} parameter(1)
ROOT out = f16[33,33]{1,0} dot(c, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)")
.value();
const se::CudaComputeCapability compute_capability{
se::CudaComputeCapability::AMPERE, 0};
TF_ASSERT_OK_AND_ASSIGN(
const std::vector<TritonGemmConfig> configs,
GetPossibleMatmulAutotuneTritonConfigs(
*Cast<HloDotInstruction>(
module->entry_computation()->root_instruction()),
compute_capability, GetToolkitVersion(), GetDebugOptionsForTest()));
EXPECT_TRUE(std::all_of(
configs.begin(), configs.end(),
[](const TritonGemmConfig& config) { return config.block_k > 16; }));
}
class GemmFusionAutotunerDisableSplitK : public GemmFusionAutotunerTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options =
GemmFusionAutotunerTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_split_k_autotuning(false);
return debug_options;
}
};
TEST_F(GemmFusionAutotunerDisableSplitK, SplitKIsDisabled) {
std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = f32[1024,1024] parameter(0)
p1 = f32[1024,1024] parameter(1)
ROOT r = f32[1024,1024] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})")
.value();
const se::CudaComputeCapability compute_capability{
se::CudaComputeCapability::AMPERE, 0};
TF_ASSERT_OK_AND_ASSIGN(
const std::vector<TritonGemmConfig> configs,
GetPossibleMatmulAutotuneTritonConfigs(
*Cast<HloDotInstruction>(
module->entry_computation()->root_instruction()),
compute_capability, GetToolkitVersion(), GetDebugOptionsForTest()));
EXPECT_TRUE(std::all_of(
configs.begin(), configs.end(),
[](const TritonGemmConfig& config) { return config.split_k == 1; }));
}
class GemmFusionAutotunerConfigTest
: public StatelessAutotunerTest,
public ::testing::WithParamInterface<bool> {};
TEST_P(GemmFusionAutotunerConfigTest, SparseDotDiscardsUnsupportedTiles) {
const std::string kHloText = R"(
HloModule test
ENTRY wais {
lhs = f16[5,1600] parameter(0)
rhs = f16[3200,10] parameter(1)
meta = u16[5,200] parameter(2)
ROOT dot = f32[5,10] dot(lhs, rhs, meta),
lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHloText));
const se::CudaComputeCapability compute_capability{
se::CudaComputeCapability::AMPERE, 0};
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_exhaustive_tiling_search(GetParam());
TF_ASSERT_OK_AND_ASSIGN(
const std::vector<TritonGemmConfig> configs,
GetPossibleMatmulAutotuneTritonConfigs(
*Cast<HloDotInstruction>(
module->entry_computation()->root_instruction()),
compute_capability, GetToolkitVersion(), debug_options));
for (const auto& config : configs) {
int metadata_size = config.block_m * config.block_k / 16;
EXPECT_LE(config.num_warps * WarpSize(), metadata_size);
EXPECT_GT(config.block_k, 16);
}
}
INSTANTIATE_TEST_SUITE_P(GemmFusionAutotunerConfigSweep,
GemmFusionAutotunerConfigTest, ::testing::Bool());
TEST_F(StatelessAutotunerTest,
ExhaustiveAutotuningTunesNumberOfCtasFromHopper) {
const std::string kHloText = R"(
HloModule test
ENTRY main {
lhs = f32[5,1600] parameter(0)
rhs = f32[1600,10] parameter(1)
ROOT dot = f32[5,10] dot(lhs, rhs),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHloText));
DebugOptions debug_options_with_exhaustive_autotuning =
GetDebugOptionsForTest();
debug_options_with_exhaustive_autotuning.set_xla_gpu_exhaustive_tiling_search(
true);
auto get_configs = [&](const se::CudaComputeCapability& cc,
const DebugOptions& debug_options) {
return GetPossibleMatmulAutotuneTritonConfigs(
*Cast<HloDotInstruction>(
module->entry_computation()->root_instruction()),
cc, GetToolkitVersion(), debug_options)
.value();
};
for (const auto& config :
get_configs(se::CudaComputeCapability::Ampere(),
debug_options_with_exhaustive_autotuning)) {
EXPECT_EQ(config.num_ctas, 1);
}
absl::flat_hash_set<int> config_num_ctas;
for (const auto& config :
get_configs(se::CudaComputeCapability::Hopper(),
debug_options_with_exhaustive_autotuning)) {
config_num_ctas.insert(config.num_ctas);
}
EXPECT_GT(config_num_ctas.size(), 1);
DebugOptions debug_options_without_exhaustive_autotuning =
GetDebugOptionsForTest();
debug_options_without_exhaustive_autotuning
.set_xla_gpu_exhaustive_tiling_search(false);
for (const auto& config :
get_configs(se::CudaComputeCapability::Hopper(),
debug_options_without_exhaustive_autotuning)) {
EXPECT_EQ(config.num_ctas, 1);
}
}
TEST_F(GemmFusionAutotunerTest, SplitKFLoatNormalization) {
if (!GetCudaComputeCapability().IsAtLeastHopper()) {
GTEST_SKIP() << "f8 types are only supported from Hopper onwards.";
}
const se::CudaComputeCapability compute_capability =
GetCudaComputeCapability();
se::GpuDeviceInfoProto deviceless_proto;
auto ccc = deviceless_proto.mutable_cuda_compute_capability();
ccc->set_major(compute_capability.major);
ccc->set_minor(compute_capability.minor);
DeviceConfig test_config{backend().default_stream_executor(),
backend().memory_allocator()};
AutotuneConfig autotune_config{test_config, GetDebugOptionsForTest()};
GemmFusionAutotunerImpl autotuner(autotune_config, GetToolkitVersion(),
GetDebugOptionsForTest(), nullptr);
TF_ASSERT_OK_AND_ASSIGN(
auto compile_util,
AutotunerCompileUtil::Create(autotune_config, GetDebugOptionsForTest()))
std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(R"(
HloModule module
%gemm_fusion_dot_computation (parameter_0: f8e5m2[256,256], parameter_1: f8e4m3fn[128,256]) -> f8e5m2[256,128] {
%parameter_0 = f8e5m2[256,256]{1,0} parameter(0)
%parameter_1 = f8e4m3fn[128,256]{1,0} parameter(1)
%dot.1 = f32[256,128]{1,0} dot(f8e5m2[256,256]{1,0} %parameter_0, f8e4m3fn[128,256]{1,0} %parameter_1), lhs_contracting_dims={0}, rhs_contracting_dims={1}
ROOT %convert.2 = f8e5m2[256,128]{1,0} convert(f32[256,128]{1,0} %dot.1)
}
ENTRY entry {
%p0 = f8e5m2[256,256]{1,0} parameter(0)
%p1 = f8e4m3fn[128,256]{1,0} parameter(1)
ROOT r = f8e5m2[256,128]{1,0} fusion(f8e5m2[256,256]{1,0} %p0, f8e4m3fn[128,256]{1,0} %p1), kind=kCustom, calls=%gemm_fusion_dot_computation, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[],"fusion_backend_config":{"kind":"__triton_gemm"},"force_earliest_schedule":false}
})")
.value();
GemmFusionAutotunerImpl::BackendConfigs configs;
configs.emplace_back(
DynCast<HloFusionInstruction>(
module->entry_computation()->root_instruction()),
std::vector<GemmFusionAutotunerImpl::BackendConfig>{
GemmFusionAutotunerImpl::BackendConfig(TritonGemmConfig(
32,
64,
64,
4,
1,
4,
1))});
CHECK_OK(autotuner.CompileAll(*compile_util, configs));
}
TEST_F(GemmFusionAutotunerTest, CreatesCustomKernelFusionConfigs) {
const std::string kHlo = R"(
HloModule module, entry_computation_layout={(bf16[1024,1024]{1,0}, bf16[1024,1024]{1,0})->f32[1024,1024]{1,0}}
%gemm_fusion_r_computation {
%parameter_0 = bf16[1024,1024]{1,0} parameter(0)
%convert.2 = f32[1024,1024]{1,0} convert(%parameter_0)
%parameter_1 = bf16[1024,1024]{1,0} parameter(1)
%convert.3 = f32[1024,1024]{1,0} convert(%parameter_1)
ROOT %r.1 = f32[1024,1024]{1,0} dot(%convert.2, %convert.3), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY main {
%p0 = bf16[1024,1024]{1,0} parameter(0)
%p1 = bf16[1024,1024]{1,0} parameter(1)
ROOT %gemm_fusion_r = f32[1024,1024]{1,0} fusion(%p0, %p1), kind=kCustom, calls=gemm_fusion_r_computation, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[],"fusion_backend_config":{"kind":"__triton_gemm"},"force_earliest_schedule":false}
})";
std::unique_ptr<VerifiedHloModule> module =
ParseAndReturnVerifiedModule(kHlo).value();
const se::CudaComputeCapability compute_capability{
se::CudaComputeCapability::AMPERE, 0};
TF_ASSERT_OK_AND_ASSIGN(
const std::vector<GemmFusionAutotunerImpl::BackendConfig> configs,
GetPossibleMatmulAutotuneConfigs(*module, compute_capability,
GetToolkitVersion(),
GetDebugOptionsForTest()));
EXPECT_TRUE(std::any_of(
configs.begin(), configs.end(),
[](const GemmFusionAutotunerImpl::BackendConfig& config) {
return std::holds_alternative<
GemmFusionAutotunerImpl::CustomKernelFusionConfig>(config);
}));
}
TEST_F(GemmFusionAutotunerTest, GeneratesConfigForUpcastGemmWithPrologue) {
const std::string kHlo = R"(
HloModule module
%gemm_fusion_r_computation (parameter_0.1: f32[1,256,4,4096], parameter_1.1: bf16[1,4,4096,4096]) -> f32[256,4096] {
%parameter_0.1 = f32[1,256,4,4096]{3,2,1,0} parameter(0)
%bitcast.60 = f32[256,16384]{1,0} bitcast(f32[1,256,4,4096]{3,2,1,0} %parameter_0.1)
%parameter_1.1 = bf16[1,4,4096,4096]{3,2,1,0} parameter(1)
%bitcast.61 = bf16[16384,4096]{1,0} bitcast(bf16[1,4,4096,4096]{3,2,1,0} %parameter_1.1)
%convert.22 = f32[16384,4096]{1,0} convert(bf16[16384,4096]{1,0} %bitcast.61)
ROOT r = f32[256,4096]{1,0} dot(f32[256,16384]{1,0} %bitcast.60, f32[16384,4096]{1,0} %convert.22), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY main {
%p0 = f32[1,256,4,4096] parameter(0)
%p1 = bf16[1,4,4096,4096] parameter(1)
ROOT %gemm_fusion_r = f32[256,4096] fusion(%p0, %p1), kind=kCustom,
calls=gemm_fusion_r_computation,
backend_config={"operation_queue_id":"0","wait_on_operation_queues":[],"fusion_backend_config":{"kind":"__triton_gemm"},"force_earliest_schedule":false}
}
)";
std::unique_ptr<VerifiedHloModule> module =
ParseAndReturnVerifiedModule(kHlo).value();
const se::CudaComputeCapability compute_capability{
se::CudaComputeCapability::AMPERE, 0};
TF_ASSERT_OK_AND_ASSIGN(
const std::vector<GemmFusionAutotunerImpl::BackendConfig> configs,
GetPossibleMatmulAutotuneConfigs(*module, compute_capability,
GetToolkitVersion(),
GetDebugOptionsForTest()));
EXPECT_TRUE(std::any_of(
configs.begin(), configs.end(),
[](const GemmFusionAutotunerImpl::BackendConfig& config) {
return std::holds_alternative<
GemmFusionAutotunerImpl::CustomKernelFusionConfig>(config);
}));
}
TEST_F(GemmFusionAutotunerTest,
GeneratesConfigForUpcastGemmWithPrologueAndEpilogue) {
const std::string kHlo = R"(
HloModule module
%gemm_fusion_r_computation (parameter_0.1: f32[1,256,4,4096], parameter_1.1: bf16[1,4,4096,4096]) -> bf16[1048576] {
%parameter_0.1 = f32[1,256,4,4096]{3,2,1,0} parameter(0)
%bitcast.60 = f32[256,16384]{1,0} bitcast(f32[1,256,4,4096]{3,2,1,0} %parameter_0.1)
%parameter_1.1 = bf16[1,4,4096,4096]{3,2,1,0} parameter(1)
%bitcast.61 = bf16[16384,4096]{1,0} bitcast(bf16[1,4,4096,4096]{3,2,1,0} %parameter_1.1)
%convert.22 = f32[16384,4096]{1,0} convert(bf16[16384,4096]{1,0} %bitcast.61)
%dot.5 = f32[256,4096]{1,0} dot(f32[256,16384]{1,0} %bitcast.60, f32[16384,4096]{1,0} %convert.22), lhs_contracting_dims={1}, rhs_contracting_dims={0}
%convert.23 = bf16[256,4096]{1,0} convert(f32[256,4096]{1,0} %dot.5)
%bitcast.62 = bf16[1,256,4096]{2,1,0} bitcast(bf16[256,4096]{1,0} %convert.23)
%transpose.18 = bf16[1,4096,256]{2,1,0} transpose(bf16[1,256,4096]{2,1,0} %bitcast.62), dimensions={0,2,1}
ROOT %bitcast.63 = bf16[1048576]{0} bitcast(bf16[1,4096,256]{2,1,0} %transpose.18)
}
ENTRY main {
%p0 = f32[1,256,4,4096] parameter(0)
%p1 = bf16[1,4,4096,4096] parameter(1)
ROOT %gemm_fusion_r = bf16[1048576] fusion(%p0, %p1), kind=kCustom,
calls=gemm_fusion_r_computation,
backend_config={"operation_queue_id":"0","wait_on_operation_queues":[],"fusion_backend_config":{"kind":"__triton_gemm"},"force_earliest_schedule":false}
}
)";
std::unique_ptr<VerifiedHloModule> module =
ParseAndReturnVerifiedModule(kHlo).value();
const se::CudaComputeCapability compute_capability{
se::CudaComputeCapability::AMPERE, 0};
TF_ASSERT_OK_AND_ASSIGN(
const std::vector<GemmFusionAutotunerImpl::BackendConfig> configs,
GetPossibleMatmulAutotuneConfigs(*module, compute_capability,
GetToolkitVersion(),
GetDebugOptionsForTest()));
EXPECT_TRUE(std::any_of(
configs.begin(), configs.end(),
[](const GemmFusionAutotunerImpl::BackendConfig& config) {
return std::holds_alternative<
GemmFusionAutotunerImpl::CustomKernelFusionConfig>(config);
}));
}
TEST_F(GemmFusionAutotunerTest, RewritesGemmFusionToCustomKernelFusion) {
const std::string kHlo = R"(
HloModule module, entry_computation_layout={(bf16[1024,1024]{1,0}, bf16[1024,1024]{1,0})->f32[1024,1024]{1,0}}
%gemm_fusion_r_computation {
%parameter_0 = bf16[1024,1024]{1,0} parameter(0)
%convert.2 = f32[1024,1024]{1,0} convert(%parameter_0)
%parameter_1 = bf16[1024,1024]{1,0} parameter(1)
%convert.3 = f32[1024,1024]{1,0} convert(%parameter_1)
ROOT %r.1 = f32[1024,1024]{1,0} dot(%convert.2, %convert.3), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY main {
%p0 = bf16[1024,1024]{1,0} parameter(0)
%p1 = bf16[1024,1024]{1,0} parameter(1)
ROOT %gemm_fusion_r = f32[1024,1024]{1,0} fusion(%p0, %p1), kind=kCustom, calls=gemm_fusion_r_computation, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[],"fusion_backend_config":{"kind":"__triton_gemm"},"force_earliest_schedule":false}
}
)";
std::unique_ptr<VerifiedHloModule> module =
ParseAndReturnVerifiedModule(kHlo).value();
DebugOptions opts;
AutotuneConfig autotune_config{
DeviceConfig{backend().default_stream_executor(),
backend().memory_allocator()},
opts};
AutotuneCacheKey cache_key(autotune_config.GetModelStr(),
*module->entry_computation()->root_instruction());
TF_ASSERT_OK_AND_ASSIGN(AutotuneResults autotune_results_override,
ParseTextProto<AutotuneResults>(R"pb(
version: 3
results {
device: "..."
hlo: "..."
result {
custom_kernel_fusion { kernel_index: 1 }
run_time { nanos: 14 }
}
})pb"));
autotune_results_override.mutable_results(0)->set_device(
std::string(cache_key.GetModelStr()));
autotune_results_override.mutable_results(0)->set_hlo(
std::string(cache_key.GetHlo()));
GemmFusionAutotunerRewriterVisitor visitor(autotune_config);
CHECK_OK(AutotunerUtil::LoadAutotuneResults(autotune_results_override));
visitor.RunOnModule(module.get(), {}).value();
std::string pattern = R"(
CHECK: ROOT %cutlass_gemm_with_upcast
CHECK-SAME: fusion
CHECK-SAME: kind=kCustom
CHECK-SAME: "kernel_index":1
)";
TF_ASSERT_OK_AND_ASSIGN(bool file_check_matches,
RunFileCheck(module->ToString(), pattern));
EXPECT_TRUE(file_check_matches);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/autotuning/gemm_fusion_autotuner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/autotuning/gemm_fusion_autotuner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7d875bf1-7d72-49ea-bcaf-436820c6b061 | cpp | tensorflow/tensorflow | gemm_algorithm_picker | third_party/xla/xla/service/gpu/autotuning/gemm_algorithm_picker.cc | third_party/xla/xla/service/gpu/autotuning/gemm_algorithm_picker_test.cc | #include "xla/service/gpu/autotuning/gemm_algorithm_picker.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/autotuning/autotuner_compile_util.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/buffer_comparator.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/gpu/variant_visitor.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/blas.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#include "xla/tsl/util/proto/proto_utils.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/scoped_annotation.h"
namespace xla {
namespace gpu {
namespace {
using se::gpu::BlasLt;
absl::StatusOr<BlasLt::Epilogue> AsBlasLtEpilogue(
GemmBackendConfig_Epilogue epilogue) {
switch (epilogue) {
case GemmBackendConfig::DEFAULT:
return BlasLt::Epilogue::kDefault;
case GemmBackendConfig::RELU:
return BlasLt::Epilogue::kReLU;
case GemmBackendConfig::GELU:
return BlasLt::Epilogue::kGELU;
case GemmBackendConfig::GELU_AUX:
return BlasLt::Epilogue::kGELUWithAux;
case GemmBackendConfig::BIAS:
return BlasLt::Epilogue::kBias;
case GemmBackendConfig::BIAS_RELU:
return BlasLt::Epilogue::kBiasThenReLU;
case GemmBackendConfig::BIAS_GELU:
return BlasLt::Epilogue::kBiasThenGELU;
case GemmBackendConfig::BIAS_GELU_AUX:
return BlasLt::Epilogue::kBiasThenGELUWithAux;
default:
return Internal("Unsupported Epilogue.");
}
}
class GemmAutotuner {
const AutotuneConfig& autotune_config_;
RedzoneBuffers rz_buffers_;
se::Stream* stream_ = nullptr;
bool deterministic_ops_ = false;
size_t solutions_limit_ = 0;
size_t num_algorithms_left_ = 0;
public:
explicit GemmAutotuner(const AutotuneConfig& autotune_config)
: autotune_config_(autotune_config) {}
const AutotuneConfig& config() const { return autotune_config_; }
size_t num_algorithms_left() const { return num_algorithms_left_; }
absl::StatusOr<AutotuneResult> operator()(const HloInstruction* gemm,
const AutotuneCacheKey& key) {
num_algorithms_left_ = 0;
if (autotune_config_.IsDeviceless()) {
return AutotuneResult{};
}
VLOG(3) << "Starting autotune of GemmThunk " << gemm->ToString();
TF_ASSIGN_OR_RETURN(stream_, autotune_config_.GetStream());
const DebugOptions& debug_options =
gemm->GetModule()->config().debug_options();
deterministic_ops_ = RequireDeterminism(gemm->GetModule()->config());
solutions_limit_ = debug_options.xla_gpu_autotune_max_solutions();
TF_ASSIGN_OR_RETURN(auto gemm_config, GemmConfig::For(gemm));
absl::MutexLock gpu_lock(&GetGpuMutex(stream_->parent()));
TF_ASSIGN_OR_RETURN(rz_buffers_, RedzoneBuffers::FromInstruction(
*gemm, autotune_config_, debug_options,
RedzoneBuffers::kAllInputsAllOutputs));
return IsCublasLtMatmul(*gemm) || IsCublasLtMatmulF8(*gemm)
? TuneGpuBlasLt(gemm, gemm_config)
: TuneGpuBlas(gemm, gemm_config);
}
private:
se::DeviceMemoryBase LhsBuffer() { return rz_buffers_.input_buffers().at(0); }
se::DeviceMemoryBase RhsBuffer() { return rz_buffers_.input_buffers().at(1); }
se::DeviceMemoryBase OutputBuffer() {
return rz_buffers_.output_buffers().at(0);
}
const Shape& GetOutputShape(const HloInstruction* gemm) {
return gemm->shape().IsTuple() ? gemm->shape().tuple_shapes(0)
: gemm->shape();
}
absl::StatusOr<AutotuneResult> TuneGpuBlasLt(const HloInstruction* gemm,
const GemmConfig& gemm_config) {
auto workspace_buffer =
rz_buffers_.output_buffers().at(gemm->shape().tuple_shapes_size() - 1);
GpuBackendConfig gpu_config =
gemm->backend_config<GpuBackendConfig>().value();
const GemmBackendConfig& backend_config = gpu_config.gemm_backend_config();
bool has_matrix_bias = gemm_config.beta != 0.;
TF_ASSIGN_OR_RETURN(
bool has_vector_bias,
gpublas_lt::EpilogueAddsVectorBias(backend_config.epilogue()));
TF_ASSIGN_OR_RETURN(
bool has_aux_output,
gpublas_lt::EpilogueHasAuxiliaryOutput(backend_config.epilogue()));
TF_ASSIGN_OR_RETURN(auto epilogue,
AsBlasLtEpilogue(backend_config.epilogue()));
se::DeviceMemoryBase a_scale_buffer, b_scale_buffer, c_scale_buffer,
d_scale_buffer, d_amax_buffer, bias_buffer, aux_buffer;
if (has_vector_bias) {
bias_buffer = rz_buffers_.input_buffers().at(has_matrix_bias ? 3 : 2);
}
if (has_aux_output) {
aux_buffer = rz_buffers_.output_buffers().at(1);
}
TF_ASSIGN_OR_RETURN(auto plan,
BlasLt::GetMatmulPlan(stream_, gemm_config, epilogue));
TF_ASSIGN_OR_RETURN(
auto algorithms,
plan->GetAlgorithms( 128,
workspace_buffer.size()));
auto tuned_func = [&](const BlasLt::MatmulAlgorithm& algorithm)
-> absl::StatusOr<se::blas::ProfileResult> {
TF_RETURN_IF_ERROR(plan->ExecuteOnStream(
stream_, LhsBuffer(), RhsBuffer(), OutputBuffer(), OutputBuffer(),
bias_buffer, aux_buffer, a_scale_buffer, b_scale_buffer,
c_scale_buffer, d_scale_buffer, d_amax_buffer, algorithm,
workspace_buffer));
se::blas::ProfileResult profile_result;
profile_result.set_warmup_run_executed(true);
TF_RETURN_IF_ERROR(plan->ExecuteOnStream(
stream_, LhsBuffer(), RhsBuffer(), OutputBuffer(), OutputBuffer(),
bias_buffer, aux_buffer, a_scale_buffer, b_scale_buffer,
c_scale_buffer, d_scale_buffer, d_amax_buffer, algorithm,
workspace_buffer, &profile_result));
return std::move(profile_result);
};
return GetBestAlgorithm<BlasLt::MatmulAlgorithm>(
gemm, algorithms, gemm_config.beta, true,
tuned_func);
}
absl::StatusOr<AutotuneResult> TuneGpuBlas(const HloInstruction* gemm,
const GemmConfig& gemm_config) {
auto workspace_buffer = rz_buffers_.output_buffers().at(1);
std::vector<se::blas::AlgorithmType> algorithms;
TF_ASSIGN_OR_RETURN(GemmConfig::DescriptorsTuple desc,
gemm_config.GetMatrixDescriptors(
LhsBuffer(), RhsBuffer(), OutputBuffer()));
auto blas = stream_->parent()->AsBlas();
if (blas == nullptr) {
return absl::InternalError("No BLAS support for stream");
}
blas->GetBlasGemmAlgorithms(stream_, desc.lhs, desc.rhs, &desc.output,
&gemm_config.alpha, &gemm_config.beta,
&algorithms);
auto tuned_func = [&](const se::blas::AlgorithmType& algorithm)
-> absl::StatusOr<se::blas::ProfileResult> {
static_cast<void>(RunGemm(gemm_config, LhsBuffer(), RhsBuffer(),
OutputBuffer(), workspace_buffer,
deterministic_ops_, stream_, algorithm));
se::blas::ProfileResult profile_result;
profile_result.set_warmup_run_executed(true);
TF_RETURN_IF_ERROR(RunGemm(gemm_config, LhsBuffer(), RhsBuffer(),
OutputBuffer(), workspace_buffer,
deterministic_ops_, stream_, algorithm,
&profile_result));
return std::move(profile_result);
};
return GetBestAlgorithm<se::blas::AlgorithmType>(
gemm, algorithms, gemm_config.beta, false,
tuned_func);
}
template <typename AlgoT, typename TunedFunc>
absl::StatusOr<AutotuneResult> GetBestAlgorithm(
const HloInstruction* gemm, absl::Span<const AlgoT> algorithms,
double beta, bool return_algo_index, TunedFunc&& run_benchmark) {
static_assert(std::is_invocable_r_v<absl::StatusOr<se::blas::ProfileResult>,
TunedFunc, const AlgoT&>,
"Tuned function has incorrect prototype!");
if (!stream_->parent()->SynchronizeAllActivity()) {
return Internal("Failed to synchronize GPU for autotuning.");
}
tsl::profiler::ScopedAnnotation annotation([&] {
return absl::StrFormat("XlaAutotunerMeasurement:#hlo_op=%s#",
gemm->name());
});
auto& hlo_module_config = gemm->GetModule()->mutable_config();
const auto& output_shape = GetOutputShape(gemm);
se::DeviceMemoryBase reference_buffer;
if (autotune_config_.should_check_correctness()) {
TF_ASSIGN_OR_RETURN(reference_buffer,
rz_buffers_.RedzoneAllocator().AllocateBytes(
ShapeUtil::ByteSizeOf(output_shape)));
}
BufferComparator comparator(
output_shape,
hlo_module_config.debug_options().xla_gpu_autotune_gemm_rtol(),
!autotune_config_.should_skip_wrong_results());
std::vector<AutotuneResult> results;
results.reserve(algorithms.size());
std::optional<int64_t> reference_algorithm;
auto num = algorithms.size();
if (solutions_limit_ > 0) num = std::min(num, solutions_limit_);
for (size_t i = 0; i < num; i++) {
const AlgoT& algorithm = algorithms[i];
if (autotune_config_.should_reinit_output_buffer() && beta != 0) {
int64_t rng_state = 0;
InitializeBuffer(stream_, output_shape.element_type(), &rng_state,
OutputBuffer());
}
TF_ASSIGN_OR_RETURN(auto profile_result, run_benchmark(algorithm));
AutotuneResult& result = results.emplace_back();
result.mutable_gemm()->set_algorithm(profile_result.algorithm());
if (!profile_result.is_valid()) {
result.mutable_failure()->set_kind(AutotuneResult::DISQUALIFIED);
continue;
}
VLOG(2) << "gemm algorithm " << profile_result.algorithm() << " took "
<< profile_result.elapsed_time_in_ms() << "ms";
*result.mutable_run_time() = tsl::proto_utils::ToDurationProto(
absl::Milliseconds(profile_result.elapsed_time_in_ms()));
if (!autotune_config_.should_check_correctness()) {
num_algorithms_left_++;
continue;
}
TF_ASSIGN_OR_RETURN(
se::RedzoneAllocator::RedzoneCheckStatus rz_check_status,
rz_buffers_.RedzoneAllocator().CheckRedzones());
if (!rz_check_status.ok()) {
result.mutable_failure()->set_kind(AutotuneResult::REDZONE_MODIFIED);
*result.mutable_failure()->mutable_msg() =
rz_check_status.RedzoneFailureMsg();
LOG(ERROR) << "Detected out-of-bounds write in gemm buffer";
CHECK(!autotune_config_.should_crash_on_check_failure());
continue;
}
num_algorithms_left_++;
if (!reference_algorithm) {
TF_RETURN_IF_ERROR(stream_->Memcpy(&reference_buffer, OutputBuffer(),
OutputBuffer().size()));
reference_algorithm = profile_result.algorithm();
continue;
}
TF_ASSIGN_OR_RETURN(
bool outputs_match,
comparator.CompareEqual(stream_, OutputBuffer(),
reference_buffer));
if (!outputs_match) {
LOG(ERROR) << "Results mismatch between different GEMM algorithms. "
<< "This is likely a bug/unexpected loss of precision.";
CHECK(!autotune_config_.should_crash_on_check_failure());
auto kind = AutotuneResult::WRONG_RESULT;
if (autotune_config_.should_skip_wrong_results()) {
kind = AutotuneResult::DISQUALIFIED;
num_algorithms_left_--;
}
result.mutable_failure()->set_kind(kind);
result.mutable_failure()->mutable_reference_gemm()->set_algorithm(
*reference_algorithm);
}
}
absl::StatusOr<AutotuneResult> best =
PickBestResult(results, gemm->ToString(), hlo_module_config);
if (best.ok()) {
if (!return_algo_index) return best;
for (size_t i = 0; i < results.size(); ++i) {
if (best->gemm().algorithm() == results[i].gemm().algorithm()) {
best->mutable_gemm()->set_algorithm(i);
return best;
}
}
return Internal("unknown best algorithm");
}
LOG(WARNING) << "Failed to find best cuBLAS algorithm, GEMM performance "
"might be suboptimal: "
<< best.status();
return AutotuneResult{};
}
};
absl::StatusOr<bool> RunOnInstruction(HloInstruction* gemm,
GemmAutotuner& autotuner) {
VLOG(3) << "Loading the autotune result of GemmThunk " << gemm->ToString();
GpuBackendConfig gpu_config =
gemm->backend_config<GpuBackendConfig>().value();
GemmBackendConfig& backend_config = *gpu_config.mutable_gemm_backend_config();
if (backend_config.alpha_real() == 0.0 &&
backend_config.alpha_imag() == 0.0 && backend_config.beta() == 0.0) {
VLOG(3) << "Skip degenerate gemm instruction auto tuning";
return false;
}
const AutotuneConfig& config = autotuner.config();
AutotuneCacheKey key(config.GetModelStr(), *gemm);
TF_ASSIGN_OR_RETURN(AutotuneResult algorithm,
AutotunerUtil::Autotune(
gemm, config, [&] { return autotuner(gemm, key); }));
auto old_algorithm = backend_config.selected_algorithm();
bool update_algorithm =
IsCublasLtMatmulF8(*gemm) ||
std::visit(VariantVisitor{[](const se::CudaComputeCapability& cc) {
return !cc.IsAtLeast(
se::CudaComputeCapability::AMPERE);
},
[](const se::RocmComputeCapability&) {
return true;
}},
config.GetGpuComputeCapability());
if (update_algorithm) {
int64_t new_algorithm{};
if (algorithm.has_gemm()) {
new_algorithm = algorithm.gemm().algorithm();
} else {
new_algorithm = se::blas::kDefaultAlgorithm;
}
if (new_algorithm == old_algorithm &&
backend_config.has_selected_algorithm()) {
return false;
}
backend_config.set_selected_algorithm(new_algorithm);
TF_RETURN_IF_ERROR(gemm->set_backend_config(gpu_config));
return true;
}
return false;
}
absl::StatusOr<bool> RunOnComputation(HloComputation* computation,
GemmAutotuner& autotuner,
size_t* num_algorithms_left) {
bool changed = false;
for (HloInstruction* instr : computation->instructions()) {
if (IsCublasGemm(*instr)) {
TF_ASSIGN_OR_RETURN(bool result, RunOnInstruction(instr, autotuner));
*num_algorithms_left =
std::max(*num_algorithms_left, autotuner.num_algorithms_left());
changed |= result;
}
}
return changed;
}
}
absl::StatusOr<bool> GemmAlgorithmPicker::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_SCOPED_LOGGING_TIMER(
absl::StrCat("GemmAlgorithmPicker for ", module->name()));
num_algorithms_left_ = 0;
if (module->config().debug_options().xla_gpu_autotune_level() == 0) {
VLOG(2) << "GEMM auto-tuning disabled, GemmAlgorithmPicker returning early";
return false;
}
GemmAutotuner autotuner(config_);
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool result, RunOnComputation(computation, autotuner,
&num_algorithms_left_));
changed |= result;
}
return changed;
}
}
} | #include "xla/service/gpu/autotuning/gemm_algorithm_picker.h"
#include <cstddef>
#include <cstdint>
#include <string>
#include <variant>
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/autotune_results.pb.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/transforms/gemm_rewriter.h"
#include "xla/service/gpu/variant_visitor.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/semantic_version.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/protobuf/dnn.pb.h"
#include "xla/xla.pb.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::gpu {
namespace {
namespace m = ::xla::match;
class GemmAlgorithmPickerTest : public HloTestBase,
public ::testing::WithParamInterface<bool> {
public:
GemmAlgorithmPickerTest() { AutotunerUtil::ClearAutotuneResults(); }
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_cublaslt(GetParam());
debug_options.set_xla_gpu_enable_triton_gemm(false);
return debug_options;
}
se::StreamExecutor* stream_exec() {
return backend().default_stream_executor();
}
const se::DeviceDescription& device_desc() {
return stream_exec()->GetDeviceDescription();
}
const se::GpuComputeCapability& gpu_comp() {
return device_desc().gpu_compute_capability();
}
void SetUp() override {
std::string_view name =
::testing::UnitTest::GetInstance()->current_test_info()->name();
bool blas_get_version = name.rfind("BlasGetVersion") == 0;
std::visit(
VariantVisitor{
[&](const se::CudaComputeCapability& cc) {
if (!blas_get_version && cc.IsAtLeastAmpere()) {
GTEST_SKIP()
<< "Skipping this test for Ampere+ as it is supported "
"and recommended with the Nvidia Volta+ GPUs.";
}
},
[&](const se::RocmComputeCapability& cc) {
if (blas_get_version) {
if (device_desc().runtime_version() <
stream_executor::SemanticVersion{6, 2, 0}) {
GTEST_SKIP()
<< "This API is not available on ROCM 6.1 and below.";
}
} else if (GetDebugOptionsForTest().xla_gpu_enable_cublaslt() &&
!cc.has_hipblaslt()) {
GTEST_SKIP() << "No gpublas-lt support on this architecture!";
}
}},
gpu_comp());
}
};
TEST_P(GemmAlgorithmPickerTest, BlasGetVersion) {
auto* blas = stream_exec()->AsBlas();
ASSERT_TRUE(blas != nullptr);
std::string version;
ASSERT_TRUE(blas->GetVersion(&version).ok());
VLOG(0) << "Blas version: " << version;
ASSERT_TRUE(!version.empty());
}
TEST_P(GemmAlgorithmPickerTest, SkipAlgorithmsWithAccuracyCheck) {
constexpr absl::string_view kHlo = R"(
HloModule module
ENTRY main {
%arg0 = f32[100,100]{1,0} parameter(0)
%arg1 = f32[100,100]{1,0} parameter(1)
ROOT %dot = f32[100,100]{1,0} dot(arg0, arg1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
auto module_cfg = GetModuleConfigForTest();
auto debug_opts = module_cfg.debug_options();
size_t num_left1 = 0, num_left2 = 0;
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHlo, module_cfg));
{
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
RunHloPass(
GemmRewriter(
gpu_comp(),
stream_executor::SemanticVersion{12, 4, 0}),
module.get()));
AutotuneConfig cfg{DeviceConfig{stream_exec(), nullptr}, debug_opts};
GemmAlgorithmPicker gpicker(cfg);
TF_ASSERT_OK_AND_ASSIGN(changed, RunHloPass(gpicker, module.get()));
num_left1 = gpicker.num_algorithms_left();
if (num_left1 < 2) {
GTEST_SKIP() << "Too few algorithms left after the first step";
}
auto* blas = stream_exec()->AsBlas();
ASSERT_TRUE(blas != nullptr);
TF_ASSERT_OK_AND_ASSIGN(bool is_main_stream, blas->IsMainStreamSet());
if (std::holds_alternative<se::RocmComputeCapability>(gpu_comp())) {
ASSERT_TRUE(is_main_stream);
}
}
AutotunerUtil::ClearAutotuneResults();
{
debug_opts.set_xla_gpu_autotune_gemm_rtol(1e-12);
debug_opts.set_xla_gpu_autotune_level(5);
module->mutable_config().set_debug_options(debug_opts);
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
RunHloPass(
GemmRewriter(
gpu_comp(),
stream_executor::SemanticVersion{12, 4, 0}),
module.get()));
AutotuneConfig cfg{DeviceConfig{stream_exec(), nullptr}, debug_opts};
GemmAlgorithmPicker gpicker(cfg);
TF_ASSERT_OK_AND_ASSIGN(changed, RunHloPass(gpicker, module.get()));
num_left2 = gpicker.num_algorithms_left();
}
ASSERT_TRUE(num_left1 > num_left2);
}
TEST_P(GemmAlgorithmPickerTest, SetAlgorithm) {
constexpr absl::string_view kHlo = R"(
HloModule module
ENTRY main {
%arg0 = f32[100,100]{1,0} parameter(0)
%arg1 = f32[100,100]{1,0} parameter(1)
ROOT %dot = f32[100,100]{1,0} dot(arg0, arg1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
auto module_cfg = GetModuleConfigForTest();
TF_ASSERT_OK_AND_ASSIGN(auto m,
ParseAndReturnVerifiedModule(kHlo, module_cfg));
bool changed = false;
TF_ASSERT_OK_AND_ASSIGN(
changed,
RunHloPass(
GemmRewriter(
gpu_comp(),
stream_executor::SemanticVersion{12, 4, 0}),
m.get()));
changed = false;
DebugOptions opts;
AutotuneConfig cfg{DeviceConfig{stream_exec(), nullptr}, opts};
TF_ASSERT_OK_AND_ASSIGN(changed,
RunHloPass(GemmAlgorithmPicker(cfg), m.get()));
ASSERT_TRUE(changed);
AutotuneResults results;
TF_ASSERT_OK(AutotunerUtil::SerializeAutotuneResults(&results));
ASSERT_EQ(results.results_size(), 1);
auto& result = *results.mutable_results(0)->mutable_result();
int64_t old_algo_id = result.algorithm().algo_id();
int64_t new_algo_id = old_algo_id + 1;
result.mutable_gemm()->set_algorithm(new_algo_id);
AutotunerUtil::ClearAutotuneResults();
TF_ASSERT_OK(AutotunerUtil::LoadAutotuneResults(results));
TF_ASSERT_OK_AND_ASSIGN(m, ParseAndReturnVerifiedModule(kHlo, module_cfg));
changed = false;
TF_ASSERT_OK_AND_ASSIGN(
changed,
RunHloPass(
GemmRewriter(gpu_comp(),
se::SemanticVersion{12, 4, 0}),
m.get()));
changed = false;
TF_ASSERT_OK_AND_ASSIGN(changed,
RunHloPass(GemmAlgorithmPicker(cfg), m.get()));
ASSERT_TRUE(changed);
SCOPED_TRACE(m->ToString());
HloInstruction* dot;
ASSERT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(&dot), 0)));
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
dot->backend_config<GpuBackendConfig>());
const GemmBackendConfig& config = gpu_config.gemm_backend_config();
EXPECT_EQ(config.selected_algorithm(), new_algo_id);
}
TEST_P(GemmAlgorithmPickerTest, GetAlgorithmWithoutDevice) {
constexpr absl::string_view kHlo = R"(
HloModule module
ENTRY main {
%arg0 = f32[100,100]{1,0} parameter(0)
%arg1 = f32[100,100]{1,0} parameter(1)
ROOT %dot = f32[100,100]{1,0} dot(arg0, arg1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto m, ParseAndReturnVerifiedModule(kHlo, GetModuleConfigForTest()));
bool changed = false;
TF_ASSERT_OK_AND_ASSIGN(
changed,
RunHloPass(
GemmRewriter(
gpu_comp(),
stream_executor::SemanticVersion{12, 4, 0}),
m.get()));
changed = false;
DebugOptions opts;
AutotuneConfig cfg{DeviceConfig{stream_exec(), nullptr}, opts};
TF_ASSERT_OK_AND_ASSIGN(changed,
RunHloPass(GemmAlgorithmPicker(cfg), m.get()));
ASSERT_TRUE(changed);
AutotuneResults results;
TF_ASSERT_OK(AutotunerUtil::SerializeAutotuneResults(&results));
ASSERT_EQ(results.results_size(), 1);
auto& result = *results.mutable_results(0)->mutable_result();
int64_t old_algo_id = result.algorithm().algo_id();
int64_t new_algo_id = old_algo_id + 1;
result.mutable_gemm()->set_algorithm(new_algo_id);
AutotunerUtil::ClearAutotuneResults();
TF_ASSERT_OK(AutotunerUtil::LoadAutotuneResults(results));
auto module_cfg = GetModuleConfigForTest();
TF_ASSERT_OK_AND_ASSIGN(m, ParseAndReturnVerifiedModule(kHlo, module_cfg));
changed = false;
DevicelessConfig deviceless_config{device_desc()};
AutotuneConfig deviceless_cfg{deviceless_config, opts};
TF_ASSERT_OK_AND_ASSIGN(
changed,
RunHloPass(
GemmRewriter(
gpu_comp(),
stream_executor::SemanticVersion{12, 4, 0}),
m.get()));
changed = false;
TF_ASSERT_OK_AND_ASSIGN(
changed, RunHloPass(GemmAlgorithmPicker(deviceless_cfg), m.get()))
ASSERT_TRUE(changed);
SCOPED_TRACE(m->ToString());
HloInstruction* dot;
ASSERT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(&dot), 0)));
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
dot->backend_config<GpuBackendConfig>());
const GemmBackendConfig& config = gpu_config.gemm_backend_config();
EXPECT_EQ(config.selected_algorithm(), new_algo_id);
}
INSTANTIATE_TEST_SUITE_P(GemmAlgorithmPickerTestSuite, GemmAlgorithmPickerTest,
::testing::Bool());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/autotuning/gemm_algorithm_picker.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/autotuning/gemm_algorithm_picker_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1ef25b3e-ad9d-4e72-9756-a70137f03a02 | cpp | tensorflow/tensorflow | autotuner_util | third_party/xla/xla/service/gpu/autotuning/autotuner_util.cc | third_party/xla/xla/service/gpu/autotuning/autotuner_util_test.cc | #include "xla/service/gpu/autotuning/autotuner_util.h"
#include <algorithm>
#include <array>
#include <cmath>
#include <cstdint>
#include <limits>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include "absl/base/const_init.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/SHA256.h"
#include "xla/autotune_results.pb.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/dump.h"
#include "xla/service/gpu/gpu_asm_opts_util.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#include "xla/stream_executor/stream.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/base64.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
constexpr int kVersion = 3;
}
using AutotuneCacheMap = absl::flat_hash_map<AutotuneCacheKey, AutotuneResult>;
static absl::Mutex autotune_cache_mu(absl::kConstInit);
static auto& autotune_cache ABSL_GUARDED_BY(autotune_cache_mu) =
*new AutotuneCacheMap();
static AutotunerUtil::CacheStats autotune_cache_stats
ABSL_GUARDED_BY(autotune_cache_mu);
absl::StatusOr<std::string> GetBase64EncodedSha256Hash(absl::string_view s) {
llvm::SHA256 sha256;
sha256.update(llvm::StringRef(s));
std::array<uint8_t, 32> hash = sha256.final();
absl::string_view hash_view(reinterpret_cast<const char*>(hash.data()),
hash.size());
std::string base64_encoded_hash;
TF_RETURN_IF_ERROR(tsl::Base64Encode(hash_view, &base64_encoded_hash));
return base64_encoded_hash;
}
namespace {
absl::StatusOr<std::string> GetCacheFilePath(absl::string_view cache_dir,
absl::string_view key_hash) {
if (cache_dir.empty()) {
return absl::InvalidArgumentError("autotune_cache_dir should not be empty");
}
return tsl::io::JoinPath(cache_dir, absl::StrCat(key_hash, ".textproto"));
}
struct ResultAndInserted {
AutotuneResult result;
bool inserted;
};
ResultAndInserted AddResultToInMemoryCache(const AutotuneCacheKey& key,
AutotuneResult result)
ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
absl::MutexLock lock(&autotune_cache_mu);
auto [it, inserted] = autotune_cache.emplace(key, std::move(result));
return {it->second, inserted};
}
absl::Status AddResultToFileBasedCacheIfEnabled(
const AutotuneCacheKey& key, AutotuneResult result,
std::string_view cache_dir,
DebugOptions::AutotuneCacheMode autotune_cache_mode)
ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
if (cache_dir.empty() ||
autotune_cache_mode == DebugOptions::AUTOTUNE_CACHE_MODE_READ) {
return absl::OkStatus();
}
tsl::Env* default_env = tsl::Env::Default();
TF_RETURN_IF_ERROR(CreateDirIfNeeded(std::string(cache_dir), default_env));
TF_ASSIGN_OR_RETURN(std::string key_hash,
GetBase64EncodedSha256Hash(key.ToString()));
TF_ASSIGN_OR_RETURN(const std::string file_path,
GetCacheFilePath(cache_dir, key_hash));
VLOG(1) << "Writing autotune result to file: " << file_path;
std::string result_str;
if (!tsl::protobuf::TextFormat::PrintToString(result, &result_str)) {
return absl::InternalError("Failed to serialize autotune result.");
}
std::string tmp_dir = tsl::io::JoinPath(cache_dir, "tmp");
TF_RETURN_IF_ERROR(CreateDirIfNeeded(tmp_dir, default_env));
int64_t time_stamp = absl::GetCurrentTimeNanos();
std::string temp_file_path = tsl::io::JoinPath(
tmp_dir, absl::StrCat("tmp_per_fusion_cache_", key_hash, "_",
std::to_string(time_stamp), ".textproto"));
TF_RETURN_IF_ERROR(
tsl::WriteStringToFile(default_env, temp_file_path, result_str));
return default_env->RenameFile(temp_file_path, file_path);
}
absl::StatusOr<ResultAndInserted> AddResultToCaches(
const AutotuneCacheKey& key, AutotuneResult result,
std::string_view cache_dir,
DebugOptions::AutotuneCacheMode autotune_cache_mode)
ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
ResultAndInserted result_and_inserted = AddResultToInMemoryCache(key, result);
if (result_and_inserted.inserted) {
TF_RETURN_IF_ERROR(AddResultToFileBasedCacheIfEnabled(
key, result_and_inserted.result, cache_dir, autotune_cache_mode));
}
return result_and_inserted;
}
std::optional<AutotuneResult> TryToFindInInMemoryCache(
const AutotuneCacheKey& key) ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
absl::MutexLock lock(&autotune_cache_mu);
auto it = autotune_cache.find(key);
if (it == autotune_cache.end()) {
return std::nullopt;
}
return it->second;
}
absl::StatusOr<std::optional<AutotuneResult>>
TryToFindInFileBasedCacheIfEnabled(const AutotuneCacheKey& key,
absl::string_view cache_dir)
ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
if (cache_dir.empty()) {
return std::nullopt;
}
TF_ASSIGN_OR_RETURN(std::string key_hash,
GetBase64EncodedSha256Hash(key.ToString()));
TF_ASSIGN_OR_RETURN(const std::string file_path,
GetCacheFilePath(cache_dir, key_hash));
if (!tsl::Env::Default()->FileExists(file_path).ok()) {
VLOG(1) << "Autotune result file not found: " << file_path;
return std::nullopt;
}
VLOG(1) << "Autotune result file found: " << file_path;
std::string autotune_result_str;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(), file_path,
&autotune_result_str));
AutotuneResult result;
if (!tsl::protobuf::TextFormat::ParseFromString(autotune_result_str,
&result)) {
return absl::InvalidArgumentError("Failed to parse autotune result.");
}
return result;
}
void SortAutotuneResults(AutotuneResults* results) {
std::sort(results->mutable_results()->pointer_begin(),
results->mutable_results()->pointer_end(),
[](const auto* a, const auto* b) {
return std::make_pair(absl::string_view(a->device()),
absl::string_view(a->hlo())) <
std::make_pair(absl::string_view(b->device()),
absl::string_view(b->hlo()));
});
}
}
absl::StatusOr<std::string> AutotuneResultsToString(
const AutotuneResults& results, bool as_textproto) {
if (as_textproto) {
std::string textproto;
if (tsl::protobuf::TextFormat::PrintToString(results, &textproto)) {
return textproto;
} else {
return Internal("Failed to serialize autotune results.");
}
}
return results.SerializeAsString();
}
namespace {
void SerializeAutotuneEntry(AutotuneResults* results, const AutotuneCacheKey& k,
const AutotuneResult* res) {
auto& entry = *results->add_results();
entry.set_device(std::string(k.GetModelStr()));
entry.set_hlo(std::string(k.GetHlo()));
*entry.mutable_result() = *res;
}
}
absl::Status AutotunerUtil::SerializeAutotuneResults(
AutotuneResults* results) {
absl::MutexLock lock(&autotune_cache_mu);
for (const auto& [k, result] : autotune_cache) {
SerializeAutotuneEntry(results, k, &result);
}
results->set_version(kVersion);
SortAutotuneResults(results);
return absl::OkStatus();
}
absl::Status AutotunerUtil::LoadAutotuneResults(
const AutotuneResults& results) {
absl::MutexLock lock(&autotune_cache_mu);
for (const AutotuneResults::Entry& result : results.results()) {
if (auto [it, inserted] = autotune_cache.emplace(
AutotuneCacheKey(result.device(), result.hlo()), result.result());
!inserted) {
return absl::InternalError(absl::StrCat(
"Duplicate autotuning result for ", it->first.ToString()));
}
}
return absl::OkStatus();
}
void AutotunerUtil::ClearAutotuneResults() {
absl::MutexLock lock(&autotune_cache_mu);
autotune_cache.clear();
}
bool AutotunerUtil::ResultCacheIsEmpty() {
absl::MutexLock lock(&autotune_cache_mu);
return autotune_cache.empty();
}
absl::StatusOr<se::DeviceMemoryBase> AutotunerUtil::CreateBuffer(
se::RedzoneAllocator& allocator, const Shape& shape,
const AutotuneConfig& config, int64_t& rng_state) {
TF_ASSIGN_OR_RETURN(se::DeviceMemoryBase buffer,
allocator.AllocateBytes(ShapeUtil::ByteSizeOf(shape)));
if (config.should_init_buffers()) {
InitializeBuffer(allocator.stream(), shape.element_type(), &rng_state,
buffer);
}
return buffer;
}
namespace {
std::string ToCanonicalString(const HloInstruction* instr) {
auto options = HloPrintOptions::Canonical();
if (instr->opcode() != HloOpcode::kFusion) {
options.set_print_backend_config(true);
options.set_sort_backend_config(true);
return instr->ToString(options);
}
options.set_print_subcomputation_mode(
HloPrintOptions::PrintSubcomputationMode::kOff);
options.set_print_infeed_outfeed_config(false);
options.set_print_only_essential_constants(true);
options.set_print_operand_shape(true);
options.set_print_ids(false);
options.set_canonicalize_computations(true);
return instr->called_computations()[0]->ToString(options);
}
}
AutotuneCacheKey::AutotuneCacheKey(absl::string_view model_str,
const HloInstruction& instr)
: AutotuneCacheKey(model_str, ToCanonicalString(&instr)) {}
std::string AutotuneCacheKey::DeviceDescriptionToCacheKey(
const se::DeviceDescription& device_description) {
std::string compute_capability;
if (auto* ccc = std::get_if<se::CudaComputeCapability>(
&device_description.gpu_compute_capability())) {
compute_capability = absl::StrCat("CUDA: ", ccc->major, ".", ccc->minor);
} else {
auto* rcc = std::get_if<se::RocmComputeCapability>(
&device_description.gpu_compute_capability());
CHECK(rcc != nullptr) << "Unknown compute capability type";
compute_capability = absl::StrCat("ROCM: ", rcc->gfx_version());
}
double memory_bandwidth = device_description.memory_bandwidth() / 1e9;
memory_bandwidth = std::round(memory_bandwidth);
constexpr double kBytesPerMegabyte = 1 << 20;
double l2_cache_size = device_description.l2_cache_size() / kBytesPerMegabyte;
return absl::StrCat(compute_capability,
", Cores: ", device_description.core_count(),
", GPU clock: ", device_description.clock_rate_ghz(),
" GHz, Memory bandwidth: ", memory_bandwidth,
" GB/s, L2 cache: ", l2_cache_size, " MB");
}
namespace {
enum class CacheType { kNone, kInMemory, kOnDisk };
absl::StatusOr<std::pair<CacheType, std::optional<AutotuneResult>>>
TryFindInAllCacheTypes(const AutotuneCacheKey& key, absl::string_view cache_dir)
ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
std::optional<AutotuneResult> opt_result = TryToFindInInMemoryCache(key);
if (opt_result.has_value()) {
return std::make_pair(CacheType::kInMemory, opt_result);
}
TF_ASSIGN_OR_RETURN(opt_result,
TryToFindInFileBasedCacheIfEnabled(key, cache_dir));
if (opt_result.has_value()) {
AddResultToInMemoryCache(key, opt_result.value());
return std::make_pair(CacheType::kOnDisk, opt_result);
}
return std::make_pair(CacheType::kNone, std::nullopt);
}
absl::StatusOr<std::optional<AutotuneResult>> TryFindInCache(
const AutotuneCacheKey& key, absl::string_view cache_dir)
ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
TF_ASSIGN_OR_RETURN(auto cached, TryFindInAllCacheTypes(key, cache_dir));
if (VLOG_IS_ON(1)) {
std::string logged_key =
(VLOG_IS_ON(2)) ? absl::StrCat(": key = ", key.ToString()) : "";
switch (cached.first) {
case CacheType::kNone:
LOG(INFO) << "Autotune cache miss" << logged_key;
break;
case CacheType::kInMemory:
LOG(INFO) << "In-memory autotune cache hit" << logged_key;
break;
case CacheType::kOnDisk:
LOG(INFO) << "File-based autotune cache hit" << logged_key;
break;
}
}
{
auto cache_hit = cached.second.has_value();
absl::MutexLock lock(&autotune_cache_mu);
autotune_cache_stats.cache_hits += cache_hit ? 1 : 0;
autotune_cache_stats.cache_misses += cache_hit ? 0 : 1;
}
return std::move(cached.second);
}
}
AutotuneCacheKey AutotunerUtil::GetKey(
const HloInstruction* instr, const AutotuneConfig& config) {
return AutotuneCacheKey(config.GetModelStr(), *instr);
}
absl::StatusOr<bool> AutotunerUtil::IsInCache(
const AutotuneCacheKey& key, const AutotuneConfig& config) {
TF_ASSIGN_OR_RETURN(std::optional<AutotuneResult> opt_res,
TryFindInCache(key, config.autotune_cache_dir()));
return opt_res.has_value();
}
absl::StatusOr<bool> AutotunerUtil::AddResult(
const AutotuneCacheKey& key, AutotuneResult result,
const AutotuneConfig& config) {
TF_ASSIGN_OR_RETURN(
ResultAndInserted result_and_inserted,
AddResultToCaches(key, std::move(result), config.autotune_cache_dir(),
config.autotune_cache_mode()));
return result_and_inserted.inserted;
}
absl::StatusOr<AutotuneResult> AutotunerUtil::Autotune(
const HloInstruction* instr, const AutotuneConfig& config,
const AutotuneNoCacheFn& autotune_fn) {
const AutotuneCacheKey key = GetKey(instr, config);
TF_ASSIGN_OR_RETURN(std::optional<AutotuneResult> opt_res,
TryFindInCache(key, config.autotune_cache_dir()));
if (opt_res.has_value()) {
return opt_res.value();
}
if (config.should_require_complete_aot_autotune_results()) {
return NotFound(
"Complete XLA AOT autotuning results are required, but no AOT result "
"was found for key: %s",
key.ToString());
}
TF_ASSIGN_OR_RETURN(AutotuneResult autotune_result, autotune_fn());
TF_ASSIGN_OR_RETURN(ResultAndInserted result_and_inserted,
AddResultToCaches(key, std::move(autotune_result),
config.autotune_cache_dir(),
config.autotune_cache_mode()));
return result_and_inserted.result;
}
namespace {
bool IsTextProtoPath(absl::string_view file_path) {
return absl::EndsWith(file_path, ".txt") ||
absl::EndsWith(file_path, ".textproto") ||
absl::EndsWith(file_path, ".prototxt") ||
absl::EndsWith(file_path, ".pbtxt");
}
}
absl::Status AutotunerUtil::LoadAutotuneResults(
absl::string_view data, bool as_textproto) {
AutotuneResults results;
bool parse_success =
as_textproto ? tsl::protobuf::TextFormat::ParseFromString(
std::string(data), &results)
: results.ParseFromString(std::string(data));
if (!parse_success) {
return absl::InvalidArgumentError(
"Failed to parse autotune results string.");
}
if (results.version() != kVersion) {
return absl::InvalidArgumentError(absl::StrFormat(
"Version mismatch in autotune results. Expected %d but was %d",
kVersion, results.version()));
}
TF_RETURN_IF_ERROR(LoadAutotuneResults(results));
return absl::OkStatus();
}
absl::StatusOr<std::string> AutotunerUtil::SerializeAutotuneResults(
bool as_textproto) {
AutotuneResults results;
TF_RETURN_IF_ERROR(SerializeAutotuneResults(&results));
return AutotuneResultsToString(results, as_textproto);
}
absl::Status AutotunerUtil::SerializeAutotuneResultsToFile(
const AutotuneResults& results, absl::string_view file_path) {
TF_RET_CHECK(!file_path.empty());
TF_RET_CHECK(results.version() > 0)
<< "Did you call SerializeAutotuneResults to get this AutotuneResults?";
std::string resolved_path;
if (!tsl::io::ResolveTestPrefixes(file_path, resolved_path)) {
return FailedPrecondition("File path can not be resolved: %s", file_path);
}
TF_ASSIGN_OR_RETURN(
std::string autotune_results_str,
AutotuneResultsToString(results, IsTextProtoPath(resolved_path)));
TF_RETURN_IF_ERROR(tsl::WriteStringToFile(tsl::Env::Default(), resolved_path,
autotune_results_str));
LOG(INFO) << "Autotune results serialized to file: " << resolved_path;
return absl::OkStatus();
}
absl::Status AutotunerUtil::SerializeAutotuneResultsToFile(
absl::string_view file_path) {
AutotuneResults results;
TF_RETURN_IF_ERROR(SerializeAutotuneResults(&results));
return SerializeAutotuneResultsToFile(results, file_path);
}
absl::Status AutotunerUtil::LoadAutotuneResultsFromFile(
absl::string_view file_path) {
TF_RET_CHECK(!file_path.empty());
std::string resolved_path;
if (!tsl::io::ResolveTestPrefixes(file_path, resolved_path)) {
return FailedPrecondition("File path can not be resolved: %s", file_path);
}
if (!tsl::Env::Default()->FileExists(resolved_path).ok()) {
return FailedPrecondition("Autotune results file does not exist: %s",
resolved_path);
}
std::string autotune_results_str;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(), resolved_path,
&autotune_results_str));
TF_RETURN_IF_ERROR(LoadAutotuneResults(autotune_results_str,
IsTextProtoPath(resolved_path)));
LOG(INFO) << "Autotune results loaded from file: " << resolved_path;
return absl::OkStatus();
}
absl::StatusOr<se::RedzoneAllocator>
AutotunerUtil::CreateRedzoneAllocator(const AutotuneConfig& config,
const DebugOptions& opts) {
TF_ASSIGN_OR_RETURN(se::Stream * stream, config.GetStream());
return se::RedzoneAllocator(
stream, config.GetAllocator(), PtxOptsFromDebugOptions(opts),
std::numeric_limits<int64_t>::max(),
config.should_check_correctness()
? opts.xla_gpu_redzone_padding_bytes()
: 0);
}
AutotunerUtil::CacheStats AutotunerUtil::GetCacheStats() {
absl::MutexLock lock(&autotune_cache_mu);
return autotune_cache_stats;
}
void AutotunerUtil::ClearCacheStats() {
absl::MutexLock lock(&autotune_cache_mu);
autotune_cache_stats = CacheStats();
}
}
} | #include "xla/service/gpu/autotuning/autotuner_util.h"
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/autotune_results.pb.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/dump.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_description.pb.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::Not;
using ::testing::TempDir;
using ::testing::UnorderedElementsAre;
using ::tsl::testing::StatusIs;
class AutotunerUtilTest : public HloTestBase {
protected:
static constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
p0 = f16[1,16,17,3] parameter(0)
p1 = s8[16,17,3] parameter(1)
cp1 = f16[16,17,3] convert(p1)
ROOT _ = f16[1,16,16] dot(p0, cp1),
lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2}
})";
static constexpr absl::string_view kResultText = R"(
version: 3
results {
device: "CUDA: 8.0, Cores: 108, GPU clock: 1.41 GHz, Memory bandwidth: 1555 GB/s, L2 cache: 40 MB"
hlo: "{\n tmp_0 = f16[1,16,17,3]{3,2,1,0} parameter(0)\n tmp_1 = f16[16,51]{1,0} bitcast(f16[1,16,17,3]{3,2,1,0} tmp_0)\n tmp_2 = s8[16,17,3]{2,1,0} parameter(1)\n tmp_3 = s8[51,16]{0,1} bitcast(s8[16,17,3]{2,1,0} tmp_2)\n tmp_4 = f16[51,16]{0,1} convert(s8[51,16]{0,1} tmp_3)\n tmp_5 = f16[16,16]{1,0} dot(f16[16,51]{1,0} tmp_1, f16[51,16]{0,1} tmp_4), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n ROOT tmp_6 = f16[1,16,16]{2,1,0} bitcast(f16[16,16]{1,0} tmp_5)\n}"
result {
run_time {
nanos: 31744
}
triton {
block_m: 32
block_n: 32
block_k: 32
split_k: 1
num_stages: 1
num_warps: 4
num_ctas: 1
}
}
})";
void SetUp() override {
AutotunerUtil::ClearAutotuneResults();
AutotunerUtil::ClearCacheStats();
}
std::string GetUniqueTempFilePath(absl::string_view suffix) {
std::string filename = TempDir();
CHECK(tsl::Env::Default()->CreateUniqueFileName(&filename,
std::string(suffix)));
return filename;
}
std::string ExpectToReadNonEmptyFile(absl::string_view file_path) {
std::string str;
tsl::Env* env = tsl::Env::Default();
TF_EXPECT_OK(tsl::ReadFileToString(env, std::string(file_path), &str));
EXPECT_THAT(str, Not(IsEmpty()));
return str;
}
static stream_executor::StreamExecutor* NewStreamExecutor() {
stream_executor::Platform* platform =
stream_executor::PlatformManager::PlatformWithName("Host").value();
return platform->ExecutorForDevice(0).value();
}
absl::Status PopulateResultCache() {
EXPECT_TRUE(AutotunerUtil::ResultCacheIsEmpty());
TF_RETURN_IF_ERROR(AutotunerUtil::LoadAutotuneResults(kResultText, true));
EXPECT_FALSE(AutotunerUtil::ResultCacheIsEmpty());
return absl::OkStatus();
}
};
TEST_F(AutotunerUtilTest, SerializeAutotuneResultsToFile_TextProto1) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".txt");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
std::string autotune_results_str = ExpectToReadNonEmptyFile(kFilePath);
AutotuneResults results;
EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(autotune_results_str,
&results));
EXPECT_GT(results.results_size(), 0);
}
TEST_F(AutotunerUtilTest, SerializeAutotuneResultsToFile_TextProto2) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".textproto");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
std::string autotune_results_str = ExpectToReadNonEmptyFile(kFilePath);
AutotuneResults results;
EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(autotune_results_str,
&results));
}
TEST_F(AutotunerUtilTest, SerializeAutotuneResultsToFile_Protobuf) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".pb");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
std::string autotune_results_str = ExpectToReadNonEmptyFile(kFilePath);
AutotuneResults results;
EXPECT_TRUE(results.ParseFromString(autotune_results_str));
}
TEST_F(AutotunerUtilTest, LoadAutotuneResultsFromFile_TextProto1) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".txt");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
AutotunerUtil::ClearAutotuneResults();
EXPECT_TRUE(AutotunerUtil::ResultCacheIsEmpty());
TF_EXPECT_OK(AutotunerUtil::LoadAutotuneResultsFromFile(kFilePath));
EXPECT_FALSE(AutotunerUtil::ResultCacheIsEmpty());
}
TEST_F(AutotunerUtilTest, LoadAutotuneResultsFromFile_TextProto2) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".textproto");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
AutotunerUtil::ClearAutotuneResults();
EXPECT_TRUE(AutotunerUtil::ResultCacheIsEmpty());
TF_EXPECT_OK(AutotunerUtil::LoadAutotuneResultsFromFile(kFilePath));
EXPECT_FALSE(AutotunerUtil::ResultCacheIsEmpty());
}
TEST_F(AutotunerUtilTest, LoadAutotuneResultsFromFile_Protobuf) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".pb");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
AutotunerUtil::ClearAutotuneResults();
EXPECT_TRUE(AutotunerUtil::ResultCacheIsEmpty());
TF_EXPECT_OK(AutotunerUtil::LoadAutotuneResultsFromFile(kFilePath));
EXPECT_FALSE(AutotunerUtil::ResultCacheIsEmpty());
}
TEST_F(AutotunerUtilTest, ResultConflictsAreDetected) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".pb");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
EXPECT_THAT(AutotunerUtil::LoadAutotuneResultsFromFile(kFilePath),
StatusIs(absl::StatusCode::kInternal,
HasSubstr("Duplicate autotuning result")));
}
TEST_F(AutotunerUtilTest, FailIfRequireCompleteAotAutotuning) {
std::string kFilePath = GetUniqueTempFilePath(".txt");
auto hlo_module = GetOptimizedModule(kHloText);
TF_EXPECT_OK(hlo_module.status());
std::vector<HloComputation*> computations =
(*hlo_module)
->MakeNonfusionComputations(absl::flat_hash_set<absl::string_view>());
EXPECT_THAT(computations, Not(IsEmpty()));
const HloInstruction* instruction = *computations[0]->instructions().begin();
stream_executor::StreamExecutor* executor = NewStreamExecutor();
auto options = DebugOptions();
options.set_xla_gpu_require_complete_aot_autotune_results(true);
AutotuneConfig config(DeviceConfig{executor}, options);
EXPECT_THAT(
AutotunerUtil::Autotune(instruction, config,
[&] { return AutotuneResult(); }),
StatusIs(
absl::StatusCode::kNotFound,
HasSubstr("Complete XLA AOT autotuning results are required, but "
"no AOT result was found for key: <key model")));
EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_hits, 0);
EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_misses, 1);
}
TEST_F(AutotunerUtilTest, OkIfJitAutotuningDisabledButAlreadyLoadedAOT) {
auto hlo_module = GetOptimizedModule(kHloText);
std::vector<HloComputation*> computations =
(*hlo_module)
->MakeNonfusionComputations(absl::flat_hash_set<absl::string_view>());
EXPECT_THAT(computations, Not(IsEmpty()));
const HloInstruction* instruction = *computations[0]->instructions().begin();
stream_executor::StreamExecutor* executor = NewStreamExecutor();
{
AutotuneConfig config(DeviceConfig{executor}, DebugOptions());
TF_EXPECT_OK(AutotunerUtil::Autotune(instruction, config, [&] {
return AutotuneResult();
}).status());
EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_hits, 0);
EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_misses, 1);
}
auto options = DebugOptions();
options.set_xla_gpu_require_complete_aot_autotune_results(true);
AutotuneConfig config(DeviceConfig{executor}, options);
TF_EXPECT_OK(AutotunerUtil::Autotune(instruction, config, [&] {
return AutotuneResult();
}).status());
EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_hits, 1);
EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_misses, 1);
}
class FileBasedCacheTest : public AutotunerUtilTest {
public:
static std::string ToString(const AutotuneResult& message) {
std::string textproto;
CHECK(tsl::protobuf::TextFormat::PrintToString(message, &textproto));
return textproto;
}
static std::vector<std::string> GetFilesInDir(
const absl::string_view cache_dir) {
std::vector<std::string> files_in_cache;
if (!tsl::Env::Default()
->GetChildren(std::string(cache_dir), &files_in_cache)
.ok()) {
files_in_cache.clear();
}
return files_in_cache;
}
static std::string Read(const absl::string_view filepath) {
std::string file_content;
TF_CHECK_OK(tsl::ReadFileToString(tsl::Env::Default(),
std::string(filepath), &file_content));
return file_content;
}
void Write(const absl::string_view filepath,
const absl::string_view content) {
TF_CHECK_OK(CreateDirIfNeeded(cache_dir_, tsl::Env::Default()));
TF_CHECK_OK(tsl::WriteStringToFile(tsl::Env::Default(),
std::string(filepath), content));
}
stream_executor::StreamExecutor* executor_ = NewStreamExecutor();
std::unique_ptr<HloModule> module_ =
ParseAndReturnVerifiedModule(kHloText).value();
const HloInstruction* dot_ = hlo_query::GetFirstInstructionWithOpcode(
*module_->entry_computation(), HloOpcode::kDot);
std::string cache_dir_ = [] {
tsl::Env* default_env = tsl::Env::Default();
std::string cache_dir;
CHECK(default_env->LocalTempFilename(&cache_dir));
return cache_dir;
}();
DebugOptions::AutotuneCacheMode GetCacheMode() const { return cache_mode_; }
void SetCacheMode(DebugOptions::AutotuneCacheMode cache_mode) {
cache_mode_ = cache_mode;
}
AutotuneConfig GetConfig() const {
DebugOptions options;
options.set_xla_gpu_per_fusion_autotune_cache_dir(cache_dir_);
options.set_xla_gpu_experimental_autotune_cache_mode(GetCacheMode());
return AutotuneConfig(DeviceConfig{executor_}, options);
}
AutotuneCacheKey GetCacheKey() const {
return AutotunerUtil::GetKey(dot_, GetConfig());
}
std::string GetCacheFilename() const {
absl::StatusOr<std::string> key_hash =
GetBase64EncodedSha256Hash(GetCacheKey().ToString());
CHECK_OK(key_hash.status());
return absl::StrCat(key_hash.value(), ".textproto");
}
std::string GetCacheFilePath() const {
return tsl::io::JoinPath(cache_dir_, GetCacheFilename());
}
const AutotuneResult result1_ = [] {
AutotuneResult result;
result.set_scratch_bytes(1);
return result;
}();
const AutotuneResult result2_ = [] {
AutotuneResult result;
result.set_scratch_bytes(2);
return result;
}();
private:
DebugOptions::AutotuneCacheMode cache_mode_ =
DebugOptions::AUTOTUNE_CACHE_MODE_UPDATE;
};
TEST_F(FileBasedCacheTest, AutotuneCreatesTmpAndWritesResultToTheCacheDir) {
TF_ASSERT_OK_AND_ASSIGN(
AutotuneResult result,
AutotunerUtil::Autotune(dot_, GetConfig(), [&] { return result1_; }));
EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_hits, 0);
EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_misses, 1);
EXPECT_EQ(ToString(result), ToString(result1_));
ASSERT_THAT(GetFilesInDir(cache_dir_),
UnorderedElementsAre(GetCacheFilename(), "tmp"));
EXPECT_EQ(Read(GetCacheFilePath()), ToString(result1_));
}
TEST_F(FileBasedCacheTest, AutotuneReadsResultFromTheCacheDir) {
Write(GetCacheFilePath(), ToString(result1_));
bool cache_hit = true;
TF_ASSERT_OK_AND_ASSIGN(AutotuneResult result,
AutotunerUtil::Autotune(dot_, GetConfig(), [&] {
cache_hit = false;
return result2_;
}));
EXPECT_TRUE(cache_hit);
EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_hits, 1);
EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_misses, 0);
EXPECT_EQ(ToString(result), ToString(result1_));
}
TEST_F(FileBasedCacheTest,
RepeatedAutotuneCallsDontReadOrWriteTheCacheFileAgain) {
auto check_autotune_cache_hit = [](const HloInstruction* instr,
const AutotuneConfig& config,
const AutotuneResult& expected_result) {
bool cache_hit = true;
TF_ASSERT_OK_AND_ASSIGN(AutotuneResult result,
AutotunerUtil::Autotune(instr, config, [&] {
cache_hit = false;
AutotuneResult new_result;
new_result.set_scratch_bytes(2);
return new_result;
}));
EXPECT_TRUE(cache_hit);
EXPECT_EQ(ToString(result), ToString(expected_result));
};
const std::string cache_file_path = GetCacheFilePath();
const AutotuneConfig config = GetConfig();
Write(cache_file_path, ToString(result1_));
check_autotune_cache_hit(dot_, config, result1_);
EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_hits, 1);
EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_misses, 0);
constexpr absl::string_view kPlaceholderContent = "placeholder content";
Write(cache_file_path, kPlaceholderContent);
check_autotune_cache_hit(dot_, config, result1_);
EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_hits, 2);
EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_misses, 0);
EXPECT_EQ(Read(cache_file_path), kPlaceholderContent);
}
TEST_F(FileBasedCacheTest,
IsInCacheReturnsTrueIfTheResultIsInTheFileBasedCache) {
Write(GetCacheFilePath(), ToString(result1_));
TF_ASSERT_OK_AND_ASSIGN(bool is_in_cache,
AutotunerUtil::IsInCache(GetCacheKey(), GetConfig()));
EXPECT_TRUE(is_in_cache);
EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_hits, 1);
EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_misses, 0);
}
TEST_F(FileBasedCacheTest, IsInCacheReturnsFalseIfTheResultIsNotInEitherCache) {
TF_ASSERT_OK_AND_ASSIGN(bool is_in_cache,
AutotunerUtil::IsInCache(GetCacheKey(), GetConfig()));
EXPECT_FALSE(is_in_cache);
EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_hits, 0);
EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_misses, 1);
}
TEST_F(FileBasedCacheTest, AddResultAddsTheResultToTheFileBasedCache) {
TF_ASSERT_OK_AND_ASSIGN(
bool added,
AutotunerUtil::AddResult(GetCacheKey(), result1_, GetConfig()));
EXPECT_TRUE(added);
ASSERT_THAT(GetFilesInDir(cache_dir_),
UnorderedElementsAre(GetCacheFilename(), "tmp"));
EXPECT_EQ(Read(GetCacheFilePath()), ToString(result1_));
}
TEST_F(FileBasedCacheTest, RepeatedAddResultDoesNotWriteTheFileAgain) {
const std::string cache_file_path = GetCacheFilePath();
const AutotuneCacheKey cache_key = GetCacheKey();
const AutotuneConfig config = GetConfig();
{
TF_ASSERT_OK_AND_ASSIGN(
bool added, AutotunerUtil::AddResult(cache_key, result1_, config));
EXPECT_TRUE(added);
}
ASSERT_THAT(GetFilesInDir(cache_dir_),
UnorderedElementsAre(GetCacheFilename(), "tmp"));
EXPECT_EQ(Read(cache_file_path), ToString(result1_));
constexpr absl::string_view kPlaceholderContent = "placeholder content";
Write(cache_file_path, kPlaceholderContent);
{
TF_ASSERT_OK_AND_ASSIGN(
bool added, AutotunerUtil::AddResult(cache_key, result1_, config));
EXPECT_FALSE(added);
}
EXPECT_EQ(Read(cache_file_path), kPlaceholderContent);
}
TEST(AutotuneCacheKeyTest, DeviceDescriptionToCacheKey) {
auto device_description =
[](absl::string_view spec_file_name) -> se::DeviceDescription {
se::GpuTargetConfigProto proto;
std::string spec_string;
CHECK_OK(tsl::ReadFileToString(
tsl::Env::Default(),
tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools", "hlo_opt",
"gpu_specs", spec_file_name),
&spec_string));
EXPECT_TRUE(
tsl::protobuf::TextFormat::ParseFromString(spec_string, &proto));
return se::DeviceDescription(proto.gpu_device_info());
};
EXPECT_EQ(AutotuneCacheKey::DeviceDescriptionToCacheKey(
device_description("a100_sxm_40.txtpb")),
"CUDA: 8.0, Cores: 108, GPU clock: 1.41 GHz, Memory bandwidth: "
"1555 GB/s, L2 cache: 40 MB");
EXPECT_EQ(AutotuneCacheKey::DeviceDescriptionToCacheKey(
device_description("a100_sxm_80.txtpb")),
"CUDA: 8.0, Cores: 108, GPU clock: 1.41 GHz, Memory bandwidth: "
"2039 GB/s, L2 cache: 40 MB");
EXPECT_EQ(AutotuneCacheKey::DeviceDescriptionToCacheKey(
device_description("mi200.txtpb")),
"ROCM: gfx90a, Cores: 110, GPU clock: 1.7 GHz, Memory bandwidth: "
"1638 GB/s, L2 cache: 8 MB");
}
TEST_F(FileBasedCacheTest, AddResultDoesNotWriteTheFileInReadMode) {
SetCacheMode(DebugOptions::AUTOTUNE_CACHE_MODE_READ);
TF_ASSERT_OK_AND_ASSIGN(
bool added,
AutotunerUtil::AddResult(GetCacheKey(), result1_, GetConfig()));
EXPECT_TRUE(added);
EXPECT_EQ(GetFilesInDir(cache_dir_).size(),
0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/autotuning/autotuner_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/autotuning/autotuner_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a7e207fa-a826-4269-ba9a-fa266b94b2de | cpp | tensorflow/tensorflow | custom_kernel_fusion_autotuner | third_party/xla/xla/service/gpu/autotuning/custom_kernel_fusion_autotuner.cc | third_party/xla/xla/service/gpu/autotuning/custom_kernel_fusion_autotuner_test.cc | #include "xla/service/gpu/autotuning/custom_kernel_fusion_autotuner.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <tuple>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/autotuning/autotuner_compile_util.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/kernels/custom_kernel.h"
#include "xla/service/gpu/kernels/custom_kernel_fusion.h"
#include "xla/service/shaped_buffer.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "xla/tools/hlo_decomposer.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
absl::StatusOr<std::unique_ptr<HloModule>> ExtractFusionModule(
HloInstruction* fusion_instruction, int64_t kernel_index) {
std::unique_ptr<HloModule> hlo_module =
ExtractInstructionIntoNewModule(*fusion_instruction);
HloInstruction* instruction =
hlo_module->entry_computation()->root_instruction();
GpuBackendConfig gpu_config =
instruction->backend_config<GpuBackendConfig>().value();
gpu_config.mutable_fusion_backend_config()
->mutable_custom_fusion_config()
->set_kernel_index(kernel_index);
TF_RETURN_IF_ERROR(instruction->set_backend_config(gpu_config));
return hlo_module;
}
absl::StatusOr<std::vector<std::tuple<int, absl::Duration>>> ProfileKernels(
std::vector<CustomKernel>& kernels, HloInstruction* fusion_instruction,
AutotunerCompileUtil& compile_util, const AutotuneConfig& autotune_config,
const DebugOptions& debug_options) {
se::StreamExecutor* stream_exec = autotune_config.GetExecutor();
std::vector<std::tuple<int, absl::Duration>> results;
for (int i = 0; i < kernels.size(); ++i) {
TF_ASSIGN_OR_RETURN(absl::StatusOr<std::unique_ptr<Executable>> executable,
compile_util.Compile([&](const DebugOptions& opt) {
return ExtractFusionModule(fusion_instruction, i);
}));
se::DeviceMemoryAllocator* allocator = autotune_config.GetAllocator();
std::unique_ptr<se::DeviceMemoryAllocator> owned_allocator;
if (allocator == nullptr) {
owned_allocator =
std::make_unique<se::StreamExecutorMemoryAllocator>(stream_exec);
allocator = owned_allocator.get();
}
TF_ASSIGN_OR_RETURN(se::Stream* const stream, autotune_config.GetStream());
TF_ASSIGN_OR_RETURN(auto rz_buffers,
RedzoneBuffers::FromInstruction(
*fusion_instruction, autotune_config, debug_options,
RedzoneBuffers::kAllInputs));
std::optional<ScopedShapedBuffer> reference_buffer;
std::optional<AutotunerCompileUtil::ProfilingOutput> profiling_output;
TF_ASSIGN_OR_RETURN(profiling_output, compile_util.ProfileExecutable(
executable->get(), stream,
rz_buffers.input_buffers(),
rz_buffers.input_shapes()));
results.push_back({i, profiling_output->duration});
}
return results;
}
absl::StatusOr<int> FindFastestKernel(
const std::vector<std::tuple<int, absl::Duration>>& results) {
auto iter = absl::c_min_element(
results, [](const std::tuple<int, absl::Duration>& lhs,
const std::tuple<int, absl::Duration>& rhs) {
return std::get<1>(lhs) < std::get<1>(rhs);
});
if (iter == results.end()) {
return absl::InternalError("Failed to find fastest kernel.");
}
return std::get<0>(*iter);
}
absl::Status UpdateFusionInstructionKernelIndex(
HloInstruction* fusion_instruction, int kernel_index) {
GpuBackendConfig gpu_config =
fusion_instruction->backend_config<GpuBackendConfig>().value();
gpu_config.mutable_fusion_backend_config()
->mutable_custom_fusion_config()
->set_kernel_index(kernel_index);
TF_RETURN_IF_ERROR(fusion_instruction->set_backend_config(gpu_config));
return absl::OkStatus();
}
absl::StatusOr<std::vector<CustomKernel>> LoadKernels(
const HloInstruction* fusion_instruction,
const AutotuneConfig& autotune_config) {
auto config = fusion_instruction->backend_config<GpuBackendConfig>()
->fusion_backend_config()
.custom_fusion_config();
auto* registry = CustomKernelFusionRegistry::Default();
auto* custom_kernel_fusion = registry->Lookup(config.name());
if (custom_kernel_fusion == nullptr) {
return absl::InternalError(
absl::StrCat("Custom kernel fusion ", config.name(),
" not found in a default registry."));
}
se::StreamExecutor* stream_exec = autotune_config.GetExecutor();
if (!stream_exec->SynchronizeAllActivity()) {
return Internal("Failed to synchronize GPU for autotuning.");
}
se::DeviceDescription device_description =
stream_exec->GetDeviceDescription();
TF_ASSIGN_OR_RETURN(
std::vector<CustomKernel> kernels,
custom_kernel_fusion->LoadKernels(
device_description,
fusion_instruction->fused_instructions_computation()));
return kernels;
}
absl::StatusOr<bool> AutotuneCustomKernelFusion(
HloInstruction* fusion_instruction, const AutotuneConfig& autotune_config,
AutotunerCompileUtil& compile_util, const DebugOptions& debug_options) {
int previous_kernel_index =
fusion_instruction->backend_config<GpuBackendConfig>()
->fusion_backend_config()
.custom_fusion_config()
.kernel_index();
TF_ASSIGN_OR_RETURN(std::vector<CustomKernel> kernels,
LoadKernels(fusion_instruction, autotune_config));
std::vector<std::tuple<int, absl::Duration>> results;
TF_ASSIGN_OR_RETURN(results,
ProfileKernels(kernels, fusion_instruction, compile_util,
autotune_config, debug_options));
TF_ASSIGN_OR_RETURN(int fastest_kernel_index, FindFastestKernel(results));
TF_RETURN_IF_ERROR(UpdateFusionInstructionKernelIndex(fusion_instruction,
fastest_kernel_index));
return previous_kernel_index != fastest_kernel_index;
}
bool IsCustomFusion(const HloComputation* computation) {
if (!computation->IsFusionComputation()) {
return false;
}
HloInstruction* instruction = computation->FusionInstruction();
absl::StatusOr<GpuBackendConfig> gpu_backend_config =
instruction->backend_config<GpuBackendConfig>();
if (!gpu_backend_config.ok()) {
return false;
}
if (instruction->fusion_kind() != HloInstruction::FusionKind::kCustom) {
return false;
}
if (!gpu_backend_config->has_fusion_backend_config()) {
return false;
}
return gpu_backend_config->fusion_backend_config().kind() ==
kCustomFusionKind;
}
}
absl::StatusOr<bool> CustomKernelFusionAutotuner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (config_.IsDeviceless()) {
return false;
}
const DebugOptions& debug_options = module->config().debug_options();
TF_ASSIGN_OR_RETURN(std::optional<AutotunerCompileUtil> compile_util,
AutotunerCompileUtil::Create(config_, debug_options));
TF_RET_CHECK(compile_util.has_value());
bool hlo_changed = false;
for (const HloComputation* computation : module->computations()) {
if (IsCustomFusion(computation)) {
TF_ASSIGN_OR_RETURN(
bool instruction_changed,
AutotuneCustomKernelFusion(computation->FusionInstruction(), config_,
compile_util.value(), debug_options));
if (instruction_changed) {
hlo_changed = true;
}
}
}
return hlo_changed;
}
}
} | #include "xla/service/gpu/autotuning/custom_kernel_fusion_autotuner.h"
#include <memory>
#include <string>
#include <utility>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla.pb.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
class CustomKernelFusionAutotunerTest : public HloTestBase {
public:
CustomKernelFusionAutotunerTest()
: HloTestBase(false,
true) {}
void SetUp() override { HloTestBase::SetUp(); }
void TearDown() override { HloTestBase::TearDown(); }
};
TEST_F(CustomKernelFusionAutotunerTest, DontRunOnNonCustomFusions) {
const std::string hlo_string = R"(
HloModule test_module, entry_computation_layout={(f32[20000,20000]{1,0}, f32[20000,20000]{1,0})->(f32[20000,20000]{1,0}, f32[20000,20000]{1,0})}
%fused_computation (p0.param_0: f32[20000,20000], p1.param_1: f32[20000,20000]) -> (f32[20000,20000], f32[20000,20000]) {
%p0.param_0 = f32[20000,20000]{1,0} parameter(0)
%p1.param_1 = f32[20000,20000]{1,0} parameter(1)
%add = f32[20000,20000]{1,0} add(f32[20000,20000]{1,0} %p0.param_0, f32[20000,20000]{1,0} %p1.param_1)
%mul = f32[20000,20000]{1,0} multiply(f32[20000,20000]{1,0} %p0.param_0, f32[20000,20000]{1,0} %p1.param_1)
ROOT %tuple = (f32[20000,20000]{1,0}, f32[20000,20000]{1,0}) tuple(f32[20000,20000]{1,0} %add, f32[20000,20000]{1,0} %mul)
}
ENTRY %BroadcastIntoAdd (p0: f32[20000,20000], p1: f32[20000,20000]) -> (f32[20000,20000], f32[20000,20000]) {
%p0 = f32[20000,20000]{1,0} parameter(0)
%p1 = f32[20000,20000]{1,0} parameter(1)
ROOT %fusion = (f32[20000,20000]{1,0}, f32[20000,20000]{1,0}) fusion(f32[20000,20000]{1,0} %p0, f32[20000,20000]{1,0} %p1), kind=kLoop, calls=%fused_computation
}
)";
std::unique_ptr<HloModule> hlo_module =
ParseAndReturnVerifiedModule(hlo_string).value();
HloPassPipeline pipeline("custom_kernel_fusion_autotuner");
DebugOptions debug_options;
AutotuneConfig autotune_config =
AutotuneConfig{DeviceConfig{backend().default_stream_executor(),
backend().memory_allocator()},
debug_options};
pipeline.AddPass<CustomKernelFusionAutotuner>(autotune_config);
ASSERT_TRUE(pipeline.Run(hlo_module.get()).ok());
}
TEST_F(CustomKernelFusionAutotunerTest,
CustomKernelFusionAutotunerPassSucceeds) {
const std::string hlo_string = R"(
HloModule extracted
cutlass_gemm {
p0 = f32[15,19]{1,0} parameter(0)
p1 = f32[19,17]{1,0} parameter(1)
ROOT r = f32[15, 17]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY region_198.14436 {
p.0 = f32[15,19]{1,0} parameter(0)
p.1 = f32[19,17]{1,0} parameter(1)
ROOT cutlass_gemm = f32[15,17]{1,0} fusion(p.0, p.1), kind=kCustom, calls=cutlass_gemm, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[],"fusion_backend_config":{"kind":"__custom_fusion","custom_fusion_config":{"name":"cutlass_gemm","kernel_index":0}},"force_earliest_schedule":false}
}
)";
std::unique_ptr<HloModule> hlo_module =
ParseAndReturnVerifiedModule(hlo_string).value();
HloPassPipeline pipeline("custom_kernel_fusion_autotuner");
DebugOptions debug_options;
AutotuneConfig autotune_config =
AutotuneConfig{DeviceConfig{backend().default_stream_executor(),
backend().memory_allocator()},
debug_options};
pipeline.AddPass<CustomKernelFusionAutotuner>(autotune_config);
ASSERT_TRUE(pipeline.Run(hlo_module.get()).ok());
}
TEST_F(CustomKernelFusionAutotunerTest,
CustomKernelFusionAutotunerPassUpdatesUpdatesKernelIndex) {
const std::string hlo_string = R"(
HloModule extracted
cutlass_gemm {
p0 = f32[15,19]{1,0} parameter(0)
p1 = f32[19,17]{1,0} parameter(1)
ROOT r = f32[15, 17]{1,0} dot(p0, p1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
}
ENTRY region_198.14436 {
p.0 = f32[15,19]{1,0} parameter(0)
p.1 = f32[19,17]{1,0} parameter(1)
ROOT cutlass_gemm = f32[15,17]{1,0} fusion(p.0, p.1), kind=kCustom,
calls=cutlass_gemm,
backend_config={"operation_queue_id":"0","wait_on_operation_queues":[],"fusion_backend_config":{"kind":"__custom_fusion","custom_fusion_config":{"name":"cutlass_gemm","kernel_index":-1}},"force_earliest_schedule":false}
}
)";
HloPassPipeline pipeline("custom_kernel_fusion_autotuner");
DebugOptions debug_options;
AutotuneConfig autotune_config =
AutotuneConfig{DeviceConfig{backend().default_stream_executor(),
backend().memory_allocator()},
debug_options};
pipeline.AddPass<CustomKernelFusionAutotuner>(autotune_config);
std::string expected = R"(
CHECK: "kernel_index":0
)";
RunAndFilecheckHloRewrite(hlo_string, std::move(pipeline), expected);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/autotuning/custom_kernel_fusion_autotuner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/autotuning/custom_kernel_fusion_autotuner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
eacdd4d3-8136-4b25-b2d6-03ae16e8f6e5 | cpp | tensorflow/tensorflow | conv_algorithm_picker | third_party/xla/xla/service/gpu/autotuning/conv_algorithm_picker.cc | third_party/xla/xla/service/gpu/autotuning/conv_algorithm_picker_test.cc | #include "xla/service/gpu/autotuning/conv_algorithm_picker.h"
#include <algorithm>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/autotuning.pb.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/autotuning/autotuner_compile_util.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/service/gpu/autotuning/gpu_autotuning.pb.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/gpu_conv_runner.h"
#include "xla/service/gpu/hlo_algorithm_denylist.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/slow_operation_alarm.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/cuda/cuda_platform_id.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/lazy_op_runner.h"
#include "xla/stream_executor/numeric_options.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/rocm/rocm_platform_id.h"
#include "xla/stream_executor/scratch_allocator.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/util/env_var.h"
#include "xla/tsl/util/proto/proto_utils.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
#include "third_party/gpus/cudnn/cudnn.h"
#include "third_party/gpus/cudnn/cudnn_version.h"
#if CUDNN_VERSION >= 90000
#include "third_party/gpus/cudnn/cudnn_ops.h"
#else
#include "third_party/gpus/cudnn/cudnn_ops_infer.h"
#endif
#include "xla/service/gpu/buffer_comparator.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#endif
namespace xla {
namespace gpu {
namespace {
using se::DeviceMemoryBase;
using se::dnn::AlgorithmDesc;
using std::optional;
Shape MaybeTupleElementShape(Shape shape, int64_t tuple_idx) {
if (shape.IsTuple()) {
return shape.tuple_shapes(tuple_idx);
} else {
return shape;
}
}
class ScratchAllocator : public se::ScratchAllocator {
public:
ScratchAllocator(int device_ordinal,
se::DeviceMemoryAllocator* memory_allocator)
: device_ordinal_(device_ordinal), memory_allocator_(memory_allocator) {}
int64_t GetMemoryLimitInBytes() override {
return ScratchAllocator::GetDefaultMemoryLimitInBytes();
}
int64_t TotalAllocatedBytes() { return total_allocated_bytes_; }
static int64_t GetDefaultMemoryLimitInBytes() {
int64_t value;
TF_CHECK_OK(tsl::ReadInt64FromEnvVar("TF_CUDNN_WORKSPACE_LIMIT_IN_MB",
1LL << 12, &value));
return value * (1LL << 20);
}
absl::StatusOr<se::DeviceMemory<uint8_t>> AllocateBytes(
int64_t byte_size) override;
template <typename T>
absl::StatusOr<se::DeviceMemory<T>> Allocate(int64_t num_elements) {
TF_ASSIGN_OR_RETURN(se::DeviceMemory<uint8_t> bytes,
AllocateBytes(num_elements * sizeof(T)));
return se::DeviceMemory<T>(bytes);
}
private:
const int device_ordinal_;
se::DeviceMemoryAllocator* memory_allocator_;
std::vector<se::OwningDeviceMemory> allocated_buffers_;
int64_t total_allocated_bytes_ = 0;
};
absl::StatusOr<se::DeviceMemory<uint8_t>> ScratchAllocator::AllocateBytes(
int64_t byte_size) {
CHECK_GE(byte_size, 0) << "byte_size must be positive.";
if (byte_size > GetMemoryLimitInBytes()) {
return absl::ResourceExhaustedError(absl::StrFormat(
"Allocating %d bytes exceeds the memory limit of %d bytes.", byte_size,
GetMemoryLimitInBytes()));
}
TF_ASSIGN_OR_RETURN(se::OwningDeviceMemory allocated_buffer,
memory_allocator_->Allocate(device_ordinal_, byte_size,
false));
total_allocated_bytes_ += byte_size;
se::DeviceMemoryBase buffer_addr = *allocated_buffer;
allocated_buffers_.push_back(std::move(allocated_buffer));
return se::DeviceMemory<uint8_t>(buffer_addr);
}
absl::StatusOr<std::vector<GenericConvRunner>> GetAlgorithms(
const GpuConvConfig& config, se::Stream* stream, bool use_cudnn_frontend,
bool use_fallback, const se::NumericOptions& numeric_options) {
TF_ASSIGN_OR_RETURN(se::dnn::ConvolutionKind kind,
GetDNNConvKindFromCudnnConvKind(config.kind));
TF_ASSIGN_OR_RETURN(se::dnn::DataType input_type,
GetDNNDataTypeFromPrimitiveType(config.input_type));
TF_ASSIGN_OR_RETURN(se::dnn::DataType output_type,
GetDNNDataTypeFromPrimitiveType(config.output_type));
se::StreamExecutor* stream_exec = stream->parent();
std::vector<GenericConvRunner> result;
auto dnn = stream_exec->AsDnn();
if (dnn == nullptr) {
return absl::InvalidArgumentError("No DNN in stream executor.");
}
switch (kind) {
default:
return Internal("Unknown ConvolutionKind %d", kind);
case se::dnn::ConvolutionKind::FORWARD_BIAS_ACTIVATION: {
if (!config.fusion) {
return Internal(
"GpuConvConfig had fusion ConvolutionKind but no FusionConfig.");
}
std::vector<std::unique_ptr<const se::dnn::FusedConvRunner>> runners;
TF_RETURN_IF_ERROR(dnn->GetFusedConvolveRunners(
use_cudnn_frontend,
se::dnn::ConvolutionKind::FORWARD, input_type,
BiasTypeForInputType(input_type), output_type,
config.conv_result_scale,
config.fusion->side_input_scale,
config.fusion->leakyrelu_alpha, stream,
config.input_descriptor, config.filter_descriptor,
config.bias_descriptor, config.output_descriptor, config.conv_desc,
use_fallback, config.fusion->mode, numeric_options, &runners));
for (auto& runner : runners) {
TF_ASSIGN_OR_RETURN(
auto runner_cache,
se::dnn::LazyOpRunner<se::dnn::FusedConvOp>::FromOpRunner(
std::move(runner)));
result.emplace_back(std::move(runner_cache));
}
break;
}
case se::dnn::ConvolutionKind::FORWARD_GRAPH: {
std::vector<std::unique_ptr<const se::dnn::GraphConvRunner>> runners;
TF_RETURN_IF_ERROR(dnn->GetGraphConvolveRunners(
kind, input_type, output_type, stream, config.input_descriptor,
config.filter_descriptor, config.output_descriptor, config.conv_desc,
use_fallback, numeric_options, &runners, config.serialized_graph));
for (auto& runner : runners) {
TF_ASSIGN_OR_RETURN(
auto runner_cache,
se::dnn::LazyOpRunner<se::dnn::GraphConvOp>::FromOpRunner(
std::move(runner)));
result.emplace_back(std::move(runner_cache));
}
break;
}
case se::dnn::ConvolutionKind::FORWARD:
case se::dnn::ConvolutionKind::BACKWARD_DATA:
case se::dnn::ConvolutionKind::BACKWARD_FILTER: {
std::vector<std::unique_ptr<const se::dnn::ConvRunner>> runners;
TF_RETURN_IF_ERROR(dnn->GetConvolveRunners(
use_cudnn_frontend, kind, input_type, output_type, stream,
config.input_descriptor,
DeviceMemoryBase(nullptr),
config.filter_descriptor,
DeviceMemoryBase(nullptr),
config.output_descriptor,
DeviceMemoryBase(nullptr), config.conv_desc,
use_fallback, nullptr, numeric_options, &runners));
for (auto& runner : runners) {
TF_ASSIGN_OR_RETURN(
auto runner_cache,
se::dnn::LazyOpRunner<se::dnn::ConvOp>::FromOpRunner(
std::move(runner)));
result.emplace_back(std::move(runner_cache));
}
break;
}
}
return result;
}
absl::StatusOr<std::vector<std::unique_ptr<const se::dnn::ConvRunner>>>
GetMIOpenAlgorithms(const HloCustomCallInstruction* instr,
absl::Span<se::DeviceMemoryBase> operand_buffers,
absl::Span<se::DeviceMemoryBase> result_buffers,
se::StreamExecutor* stream_exec,
ScratchAllocator* scratch_allocator, se::Stream* stream,
const se::NumericOptions& numeric_options) {
TF_ASSIGN_OR_RETURN(GpuConvConfig config, GetGpuConvConfig(instr));
TF_ASSIGN_OR_RETURN(se::dnn::ConvolutionKind kind,
GetDNNConvKindFromCudnnConvKind(config.kind));
TF_ASSIGN_OR_RETURN(se::dnn::DataType dtype,
GetDNNDataTypeFromPrimitiveType(config.output_type));
TF_ASSIGN_OR_RETURN(
GpuConvParams params,
GetGpuConvParams(config, operand_buffers, result_buffers));
std::vector<std::unique_ptr<const se::dnn::ConvRunner>> runners;
auto dnn = stream_exec->AsDnn();
if (dnn == nullptr) {
return absl::InvalidArgumentError("No DNN in stream executor.");
}
TF_RETURN_IF_ERROR(dnn->GetConvolveRunners(
false, kind, dtype, dtype, stream,
params.config->input_descriptor, params.input_buf,
params.config->filter_descriptor, params.filter_buf,
params.config->output_descriptor, params.output_buf,
params.config->conv_desc,
false, scratch_allocator, numeric_options,
&runners));
return runners;
}
std::string NumBytesToString(int64_t bytes) {
return absl::StrCat(tsl::strings::HumanReadableNumBytes(bytes), " (", bytes,
"B)");
}
CudnnVersion GetCudnnVersion(se::StreamExecutor* stream_executor) {
se::dnn::VersionInfo version = GetDnnVersionInfoOrDefault(stream_executor);
CudnnVersion cudnn_version;
cudnn_version.set_major(version.major_version());
cudnn_version.set_minor(version.minor_version());
cudnn_version.set_patch(version.patch());
return cudnn_version;
}
ComputeCapability GetComputeCapability(se::StreamExecutor* stream_executor) {
ComputeCapability cc;
se::CudaComputeCapability se_cc =
stream_executor->GetDeviceDescription().cuda_compute_capability();
cc.set_major(se_cc.major);
cc.set_minor(se_cc.minor);
return cc;
}
void PrintPlatformInfo(const se::Stream* stream) {
auto* se = stream->parent();
const auto& desc = se->GetDeviceDescription();
LOG(ERROR) << "Device: " << desc.name();
LOG(ERROR) << "Platform: " << desc.platform_version();
LOG(ERROR) << "Driver: " << desc.driver_version();
LOG(ERROR) << "Runtime: " << desc.runtime_version();
auto dnn_version = GetDnnVersionInfo(se);
if (dnn_version.ok()) {
auto v = dnn_version.value();
LOG(ERROR) << "cudnn version: " << v.major_version() << "."
<< v.minor_version() << "." << v.patch();
}
}
absl::StatusOr<bool> CheckRedzones(const se::RedzoneAllocator& allocator,
se::Stream* stream, absl::string_view name,
std::string_view instr_str,
AutotuneResult* result) {
XLA_SCOPED_LOGGING_TIMER_LEVEL("CudnnConvAlgorithmPicker checking redzones",
2);
using RedzoneCheckStatus = se::RedzoneAllocator::RedzoneCheckStatus;
TF_ASSIGN_OR_RETURN(RedzoneCheckStatus redzone_check,
allocator.CheckRedzones());
if (redzone_check.ok()) {
return true;
}
auto* fail = result->mutable_failure();
fail->set_kind(AutotuneResult::REDZONE_MODIFIED);
*fail->mutable_msg() = redzone_check.RedzoneFailureMsg();
fail->set_buffer_address(
reinterpret_cast<uint64_t>(redzone_check.user_buffer_address));
LOG(ERROR) << absl::StreamFormat(
"Detected cudnn out-of-bounds write in conv %s buffer! This is likely a "
"cudnn bug. We will skip this algorithm in the future, but your GPU "
"state may already be corrupted, leading to incorrect results. Within "
"Google, no action is needed on your part. Outside of Google, please "
"ensure you're running the latest version of cudnn. If that doesn't fix "
"the problem, please file a bug with this full error message and we'll "
"contact nvidia.",
name);
LOG(ERROR) << redzone_check.RedzoneFailureMsg();
LOG(ERROR) << "HloInstruction " << instr_str;
PrintPlatformInfo(stream);
return false;
}
}
bool ShouldInitConvData(const HloModuleConfig& hlo_module_config) {
const int32_t conv_autotune_level =
hlo_module_config.debug_options().xla_gpu_autotune_level();
return conv_autotune_level >= 2;
}
bool ShouldCheckConv(const HloModuleConfig& hlo_module_config) {
const int32_t conv_autotune_level =
hlo_module_config.debug_options().xla_gpu_autotune_level();
return conv_autotune_level >= 4;
}
absl::StatusOr<AutotuneResult> GpuConvAlgorithmPicker::PickBestAlgorithm(
const HloCustomCallInstruction* instr) {
return AutotunerUtil::Autotune(
instr, config_, [&] { return PickBestAlgorithmNoCache(instr); });
}
absl::StatusOr<AutotuneResult> GpuConvAlgorithmPicker::PickBestAlgorithmNoCache(
const HloCustomCallInstruction* instr) {
if (config_.IsDeviceless()) {
AutotuneResult result;
result.mutable_algorithm()->set_algo_id(-1);
return result;
}
se::StreamExecutor* stream_exec = config_.GetExecutor();
absl::MutexLock lock(&GetGpuMutex(stream_exec));
if (!stream_exec->SynchronizeAllActivity()) {
return Internal(
"Failed to synchronize GPU for autotuning conv instruction");
}
absl::StatusOr<AutotuneResult> result_or(Internal("Unknown platform."));
se::Platform::Id platform_id = stream_exec->GetPlatform()->id();
if (platform_id == se::rocm::kROCmPlatformId) {
result_or = PickBestAlgorithmNoCacheRocm(instr);
} else if (platform_id == se::cuda::kCudaPlatformId) {
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
result_or = PickBestAlgorithmNoCacheCuda(instr);
#endif
}
return result_or;
}
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
absl::StatusOr<GpuConvAlgorithmPicker::AutotuneRuntimeArguments>
GpuConvAlgorithmPicker::AutotuneRuntimeArguments::FromInstruction(
const HloCustomCallInstruction* instr, const AutotuneConfig& config,
const DebugOptions& debug_options) {
TF_ASSIGN_OR_RETURN(auto rz_buffers,
RedzoneBuffers::FromInstruction(
*instr, config, debug_options,
RedzoneBuffers::kAllInputsOutputsNoScratch));
std::string canonical_hlo(
AutotuneCacheKey(config.GetExecutor()->GetDeviceDescription(), *instr)
.GetHlo());
TF_ASSIGN_OR_RETURN(GpuConvConfig gpu_conv_config, GetGpuConvConfig(instr));
GpuConvAlgorithmPicker::AutotuneRuntimeArguments runtime_arguments = {
instr->GetModule()->config(),
std::move(rz_buffers),
std::move(gpu_conv_config),
{canonical_hlo}};
return runtime_arguments;
}
struct CudnnVersionRange {
using TupleVersion = std::tuple<int, int, int>;
TupleVersion begin;
TupleVersion end;
bool IsInRange(const CudnnVersion& other) const {
TupleVersion other_version{other.major(), other.minor(), other.patch()};
return begin <= other_version && other_version < end;
}
CudnnVersionRange(const CudnnVersion& begin, const CudnnVersion& end)
: begin(begin.major(), begin.minor(), begin.patch()),
end(end.major(), end.minor(), end.patch()) {}
CudnnVersionRange(const TupleVersion& begin, const TupleVersion& end)
: begin(begin), end(end) {}
};
struct ComputeCapabilityRange {
using TupleComputeCapability = std::tuple<int, int>;
TupleComputeCapability begin;
TupleComputeCapability end;
bool IsInRange(const ComputeCapability& other) const {
TupleComputeCapability other_cc{other.major(), other.minor()};
return begin <= other_cc && other_cc < end;
}
};
struct DisabledAlgorithm {
CudnnVersionRange cudnn_version_range;
ComputeCapabilityRange compute_capability_range;
int algo_id;
};
static const DisabledAlgorithm kDisabledAlgorithms[] = {
{{{9, 0, 0}, {10, 0, 0}},
{{6, 0}, {8, 0}},
14}};
absl::StatusOr<AutotuneResult> GpuConvAlgorithmPicker::AutotuneOneConvRunner(
GenericConvRunner* const runner,
std::optional<ReferenceResult>* reference_result,
absl::Span<const AlgorithmDesc> disabled_algos,
std::optional<AutotuneCacheKey> instruction_info,
const AutotuneRuntimeArguments& runtime_arguments) {
auto alg = runner->ToAlgorithmDesc();
se::StreamExecutor* stream_exec = config_.GetExecutor();
XLA_SCOPED_LOGGING_TIMER_LEVEL(
absl::StrCat("CudnnConvAlgorithmPicker::PickBestAlgorithm algo ",
alg.ToString()),
2);
auto make_failure = [&alg](AutotuneResult::FailureKind kind,
absl::string_view msg) {
AutotuneResult result;
*result.mutable_algorithm() = alg.ToProto();
result.mutable_failure()->set_kind(kind);
result.mutable_failure()->set_msg( msg.data(), msg.size());
return result;
};
AlgorithmDesc alg_key(alg.algo_id(), alg.tensor_ops_enabled(), std::nullopt);
std::string instr_str = instruction_info.has_value()
? std::string(instruction_info->GetHlo())
: "<unknown>";
for (const auto& disabled_algo : kDisabledAlgorithms) {
if (disabled_algo.cudnn_version_range.IsInRange(
GetCudnnVersion(stream_exec)) &&
disabled_algo.compute_capability_range.IsInRange(
GetComputeCapability(stream_exec)) &&
disabled_algo.algo_id == alg.algo_id()) {
LOG(INFO) << "Omitted potentially buggy algorithm " << alg.ToString()
<< " for conv " << instr_str;
return make_failure(AutotuneResult::DISQUALIFIED,
"Disqualified for being known-buggy.");
}
}
if (absl::c_linear_search(disabled_algos, alg_key)) {
LOG(INFO) << "Omitted potentially buggy algorithm " << alg.ToString()
<< " for conv " << instr_str;
return make_failure(AutotuneResult::DISQUALIFIED,
"Disqualified for being known-buggy.");
}
GpuConvConfig config = runtime_arguments.gpu_conv_config;
auto activation_mode =
config.fusion ? config.fusion->mode : se::dnn::ActivationMode::kNone;
if (!alg.is_cudnn_frontend() &&
config.kind == CudnnConvKind::kForwardActivation &&
activation_mode == se::dnn::ActivationMode::kNone &&
alg.algo_id() != CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM) {
return make_failure(AutotuneResult::DISQUALIFIED,
"Disqualified for implicit RELU.");
}
TF_ASSIGN_OR_RETURN(
se::RedzoneAllocator scratch_allocator,
AutotunerUtil::CreateRedzoneAllocator(
config_, runtime_arguments.hlo_module_config.debug_options()));
se::dnn::ProfileResult profile_result;
VLOG(4) << "Trying algorithm " << alg.ToString() << " for " << instr_str;
SlowOperationAlarm alarm(absl::Seconds(1), [&] {
return absl::StrFormat(
"Trying algorithm %s for conv %s is taking a while...", alg.ToString(),
instr_str);
});
std::optional<size_t> workspace_size =
runner->ToAlgorithmDesc().workspace_size();
if (!workspace_size) {
return make_failure(AutotuneResult::UNKNOWN,
"Internal error: missing workspace size from "
"OpRunner::ToAlgorithmDesc()");
}
auto scratch_or = scratch_allocator.AllocateBytes(*workspace_size);
if (!scratch_or.ok()) {
return make_failure(AutotuneResult::DISQUALIFIED,
absl::StrCat("Scratch allocation failed: ",
scratch_or.status().ToString()));
}
se::DeviceMemoryBase scratch_memory = scratch_or.value();
RunConvOptions options;
options.runner_cache = runner;
float max_time = 0;
float min_time = std::numeric_limits<float>::max();
absl::Status launch_status;
std::vector<se::DeviceMemoryBase> operand_buffers =
runtime_arguments.rz_buffers.input_buffers();
std::vector<se::DeviceMemoryBase> result_buffers =
runtime_arguments.rz_buffers.output_buffers();
TF_ASSIGN_OR_RETURN(se::Stream* const stream, config_.GetStream());
launch_status = RunGpuConv(config, operand_buffers, result_buffers,
scratch_memory, stream, options);
options.profile_result = &profile_result;
profile_result.set_warmup_run_executed(true);
constexpr int kMaxIter = 10;
int num_iters = 0;
for (; num_iters < kMaxIter && launch_status.ok(); ++num_iters) {
launch_status = RunGpuConv(config, operand_buffers, result_buffers,
scratch_memory, stream, options);
if (!profile_result.is_valid()) {
break;
}
float old_min_time = min_time;
min_time = std::min(min_time, profile_result.elapsed_time_in_ms());
max_time = std::max(max_time, profile_result.elapsed_time_in_ms());
constexpr float kThreshold = 0.05f;
if (std::abs(profile_result.elapsed_time_in_ms() - old_min_time) /
old_min_time <
kThreshold) {
break;
}
}
if (!launch_status.ok()) {
VLOG(5) << "Launch failed: " << launch_status;
return make_failure(
AutotuneResult::DISQUALIFIED,
absl::StrCat("Profiling failure on cuDNN engine ", alg.ToString(), ": ",
launch_status.ToString()));
}
if (!profile_result.is_valid()) {
VLOG(5) << "Launch succeeded but profile result is invalid.";
return make_failure(
AutotuneResult::UNKNOWN,
absl::StrCat("Launch succeeded but profile result is invalid, "
"with cuDNN engine ",
alg.ToString(), ": ", launch_status.ToString()));
}
VLOG(4) << "Best time: " << min_time << " ms. Worst time: " << max_time
<< " ms. Total iterations: " << num_iters;
int64_t scratch_bytes_used =
scratch_allocator.TotalAllocatedBytesExcludingRedzones();
AutotuneResult result;
*result.mutable_algorithm() = alg.ToProto();
result.set_scratch_bytes(scratch_bytes_used);
*result.mutable_run_time() =
tsl::proto_utils::ToDurationProto(absl::Milliseconds(min_time));
if (!ShouldCheckConv(runtime_arguments.hlo_module_config)) {
if (!reference_result->has_value()) {
(*reference_result) = {
alg, std::vector<DeviceMemoryBase>(result_buffers.size())};
}
return result;
}
TF_ASSIGN_OR_RETURN(
bool input_output_allocator_redzone_clear,
CheckRedzones(runtime_arguments.rz_buffers.RedzoneAllocator(), stream,
"input/output", instr_str, &result));
TF_ASSIGN_OR_RETURN(
bool scratch_allocator_redzone_clear,
CheckRedzones(scratch_allocator, stream, "scratch", instr_str, &result));
if (!input_output_allocator_redzone_clear ||
!scratch_allocator_redzone_clear) {
if (runtime_arguments.canonical_hlo.has_value()) {
std::string canonical_hlo = runtime_arguments.canonical_hlo.value();
std::string blas_version;
if (auto* blas = stream_exec->AsBlas()) {
(void)blas->GetVersion(&blas_version);
}
AlgorithmDenylist proto;
auto entry = proto.add_entries();
entry->set_hlo(canonical_hlo);
*entry->mutable_cc() = GetComputeCapability(stream_exec);
*entry->mutable_cudnn_version() = GetCudnnVersion(stream_exec);
entry->set_blas_version(blas_version);
auto algo = entry->add_algos();
algo->set_id(alg.algo_id());
algo->set_tensor_ops(alg.tensor_ops_enabled());
LOG(ERROR) << "To denylist this algorithm for this convolution, "
"copy-paste the following "
"proto to the denylist file pointed by XLA_FLAGS "
"--xla_gpu_algorithm_denylist_path="
<< GetDebugOptionsFromFlags().xla_gpu_algorithm_denylist_path()
<< " : " << proto.ShortDebugString();
}
return result;
}
if (reference_result->has_value()) {
XLA_SCOPED_LOGGING_TIMER_LEVEL("BufferComparator::CompareEqual", 2);
const DebugOptions& debug_options =
runtime_arguments.hlo_module_config.debug_options();
for (int i = 0; i < result_buffers.size(); ++i) {
Shape output_shape = MaybeTupleElementShape(
runtime_arguments.rz_buffers.output_shape(), i);
XLA_SCOPED_LOGGING_TIMER_LEVEL("BufferComparator::CompareEqual", 2);
BufferComparator comparator(output_shape,
debug_options.xla_gpu_autotune_gemm_rtol());
absl::StatusOr<bool> compare_result = comparator.CompareEqual(
stream, (*reference_result)->buffers[i], result_buffers[i]);
if (!compare_result.ok()) {
LOG(ERROR) << "Unable to compare "
<< (*reference_result)->algorithm.ToString() << " against "
<< alg.ToString() << " for " << instr_str << ": "
<< compare_result.status();
if (compare_result.status().code() ==
absl::StatusCode::kResourceExhausted) {
return compare_result.status();
}
CHECK(!debug_options.xla_gpu_crash_on_verification_failures());
} else if (!compare_result.value()) {
LOG(ERROR)
<< "Results mismatch between different convolution algorithms. "
"This is likely a bug/unexpected loss of precision in cudnn.\n"
<< instr_str << " for " << (*reference_result)->algorithm.ToString()
<< " vs " << alg.ToString();
PrintPlatformInfo(stream);
if (instruction_info.has_value()) {
VLOG(2) << "Full module on failure: \n"
<< instruction_info->GetModelStr();
}
auto* fail = result.mutable_failure();
fail->set_kind(AutotuneResult::WRONG_RESULT);
fail->set_buffer_address(
reinterpret_cast<uint64_t>(result_buffers[i].opaque()));
*fail->mutable_reference_algorithm() =
(*reference_result)->algorithm.ToProto();
}
}
} else {
XLA_SCOPED_LOGGING_TIMER_LEVEL("Memcpy Reference Result", 2);
std::vector<DeviceMemoryBase> reference_result_buffers(
result_buffers.size());
for (int i = 0; i < result_buffers.size(); ++i) {
TF_ASSIGN_OR_RETURN(
reference_result_buffers[i],
runtime_arguments.rz_buffers.RedzoneAllocator().AllocateBytes(
result_buffers[i].size()));
TF_RETURN_IF_ERROR(stream->Memcpy(&reference_result_buffers[i],
result_buffers[i],
result_buffers[i].size()));
}
(*reference_result) = {alg, reference_result_buffers};
}
return result;
}
absl::StatusOr<AutotuneResult>
GpuConvAlgorithmPicker::PickBestAlgorithmNoCacheCuda(
const HloCustomCallInstruction* instr) {
AutotuneCacheKey instruction_info{config_.GetModelStr(), *instr};
std::string instr_str(instruction_info.GetHlo());
XLA_SCOPED_LOGGING_TIMER(absl::StrCat(
"GpuConvAlgorithmPicker::PickBestAlgorithmImpl for ", instr_str));
const DebugOptions& debug_options =
instr->GetModule()->config().debug_options();
const bool crash_on_checking_failure =
debug_options.xla_gpu_crash_on_verification_failures();
std::string blas_version;
se::StreamExecutor* stream_exec = config_.GetExecutor();
if (auto* blas = stream_exec->AsBlas()) {
(void)blas->GetVersion(&blas_version);
}
std::vector<AlgorithmDesc> disabled_algos;
TF_ASSIGN_OR_RETURN(
AutotuneRuntimeArguments runtime_arguments,
AutotuneRuntimeArguments::FromInstruction(instr, config_, debug_options));
if (runtime_arguments.canonical_hlo.has_value()) {
disabled_algos = GetDisabledConvAlgorithms(
GetComputeCapability(stream_exec), GetCudnnVersion(stream_exec),
blas_version, runtime_arguments.canonical_hlo.value());
}
const bool cudnn_frontend_enabled =
debug_options.xla_gpu_enable_cudnn_frontend();
bool allow_tf32 = true;
if (instr) {
allow_tf32 = absl::c_all_of(
instr->precision_config().operand_precision(),
[](int precision) { return precision <= PrecisionConfig::HIGH; });
}
const se::NumericOptions numeric_options{
RequireDeterminism(instr->GetModule()->config()), allow_tf32};
std::optional<ReferenceResult> reference_result;
TF_ASSIGN_OR_RETURN(se::Stream* const stream, config_.GetStream());
TF_ASSIGN_OR_RETURN(
std::vector<GenericConvRunner> runners,
GetAlgorithms(runtime_arguments.gpu_conv_config, stream,
cudnn_frontend_enabled,
false, numeric_options));
std::vector<AutotuneResult> profile_results;
for (auto& runner_cache : runners) {
TF_ASSIGN_OR_RETURN(
auto result,
AutotuneOneConvRunner(&runner_cache, &reference_result, disabled_algos,
instruction_info, runtime_arguments));
profile_results.emplace_back(std::move(result));
}
if (!reference_result) {
LOG(WARNING) << "None of the algorithms provided by cuDNN heuristics "
"worked; trying fallback algorithms.";
if (runtime_arguments.canonical_hlo.has_value()) {
LOG(WARNING) << "Conv: " << runtime_arguments.canonical_hlo.value();
}
TF_ASSIGN_OR_RETURN(
std::vector<GenericConvRunner> fallback_runners,
GetAlgorithms(runtime_arguments.gpu_conv_config, stream,
cudnn_frontend_enabled,
true, numeric_options));
for (auto& runner_cache : fallback_runners) {
TF_ASSIGN_OR_RETURN(
auto result, AutotuneOneConvRunner(&runner_cache, &reference_result,
disabled_algos, instruction_info,
runtime_arguments));
profile_results.emplace_back(std::move(result));
}
}
if (instr) {
AutotuningLog log;
{
ConvInstructionLog instr_log;
*instr_log.mutable_instruction() = instr->ToProto();
for (int i = 0; i < instr->operand_count(); i++) {
*instr_log.add_operand_shapes() = instr->operand(i)->shape().ToProto();
instr_log.add_operand_addresses(reinterpret_cast<uint64_t>(
runtime_arguments.rz_buffers.input_buffers()[i].opaque()));
}
for (se::DeviceMemoryBase result_buffer :
runtime_arguments.rz_buffers.output_buffers()) {
instr_log.add_result_addresses(
reinterpret_cast<uint64_t>(result_buffer.opaque()));
}
log.mutable_instr()->PackFrom(instr_log);
}
for (const auto& profile : profile_results) {
*log.add_results() = profile;
}
*log.mutable_compute_capability() = GetComputeCapability(stream_exec);
*log.mutable_cudnn_version() = GetCudnnVersion(stream_exec);
log.set_device_pci_bus_id(stream_exec->GetDeviceDescription().pci_bus_id());
log.set_blas_version(blas_version);
VLOG(2) << "Autotuning result: " << log.ShortDebugString();
if (crash_on_checking_failure) {
for (const auto& profile : profile_results) {
if (profile.has_failure() &&
profile.failure().kind() != AutotuneResult::DISQUALIFIED) {
LOG(FATAL) << "crash_on_checking_failure encountered errors:\n\n"
<< log.DebugString();
}
}
}
}
TF_ASSIGN_OR_RETURN(AutotuneResult selected_algorithm,
PickBestResult(profile_results, instr_str,
runtime_arguments.hlo_module_config));
return selected_algorithm;
}
#endif
absl::StatusOr<AutotuneResult>
GpuConvAlgorithmPicker::PickBestAlgorithmNoCacheRocm(
const HloCustomCallInstruction* instr) {
XLA_SCOPED_LOGGING_TIMER(absl::StrCat(
"GpuConvAlgorithmPicker::PickBestAlgorithmImpl for ", instr->ToString()));
const bool allow_tf32 = absl::c_all_of(
instr->precision_config().operand_precision(),
[](int precision) { return precision <= PrecisionConfig::HIGH; });
const se::NumericOptions numeric_options{
RequireDeterminism(instr->GetModule()->config()), allow_tf32};
se::StreamExecutor* stream_exec = config_.GetExecutor();
const auto device_ordinal = stream_exec->device_ordinal();
std::vector<se::DeviceMemoryBase> operand_buffers;
se::DeviceMemoryAllocator* allocator = config_.GetAllocator();
ScratchAllocator input_output_allocator(device_ordinal, allocator);
TF_ASSIGN_OR_RETURN(se::Stream* const stream, config_.GetStream());
const auto initialize_buffer = [stream](DeviceMemoryBase buffer) {
return stream->MemZero(&buffer, buffer.size());
};
for (const auto* operand : instr->operands()) {
TF_ASSIGN_OR_RETURN(auto buffer,
input_output_allocator.AllocateBytes(
ShapeUtil::ByteSizeOf(operand->shape())));
TF_RETURN_IF_ERROR(initialize_buffer(buffer));
operand_buffers.push_back(buffer);
}
std::vector<se::DeviceMemoryBase> result_buffers(
instr->shape().tuple_shapes_size());
if (instr->shape().IsTuple()) {
for (int i = 0; i < instr->shape().tuple_shapes_size(); ++i) {
TF_ASSIGN_OR_RETURN(
result_buffers[i],
input_output_allocator.AllocateBytes(
ShapeUtil::ByteSizeOf(instr->shape().tuple_shapes(i))));
TF_RETURN_IF_ERROR(initialize_buffer(result_buffers[i]));
}
} else {
TF_ASSIGN_OR_RETURN(
result_buffers[0],
input_output_allocator.AllocateBytes(
ShapeUtil::ByteSizeOf(instr->shape().tuple_shapes(0))));
TF_RETURN_IF_ERROR(initialize_buffer(result_buffers[0]));
}
ScratchAllocator scratch_allocator(device_ordinal, allocator);
TF_ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<const se::dnn::ConvRunner>> runners,
GetMIOpenAlgorithms(instr, absl::MakeSpan(operand_buffers),
absl::MakeSpan(result_buffers), stream_exec,
&scratch_allocator, stream, numeric_options));
std::vector<AutotuneResult> profile_results;
if (runners.size() == 1) {
TF_ASSIGN_OR_RETURN(auto alg, runners[0]->ToAlgorithmDesc());
auto algorithm_proto = alg.ToProto();
profile_results.emplace_back();
auto& result = profile_results.back();
*result.mutable_algorithm() = algorithm_proto;
result.set_scratch_bytes(runners[0]->GetWorkspaceSize());
*result.mutable_run_time() =
tsl::proto_utils::ToDurationProto(absl::Milliseconds(-1));
} else {
TF_ASSIGN_OR_RETURN(GpuConvConfig config, GetGpuConvConfig(instr));
for (auto& runner : runners) {
TF_ASSIGN_OR_RETURN(auto alg, runner->ToAlgorithmDesc());
XLA_SCOPED_LOGGING_TIMER_LEVEL(
absl::StrCat("CudnnConvAlgorithmPicker::PickBestAlgorithm algo ",
alg.ToString()),
2);
se::dnn::ProfileResult profile_result;
VLOG(4) << "Trying algorithm " << alg.ToString() << " for "
<< instr->ToString();
TF_ASSIGN_OR_RETURN(
DeviceMemoryBase scratch_memory,
scratch_allocator.AllocateBytes(runner->GetWorkspaceSize()));
TF_ASSIGN_OR_RETURN(auto lazy_runner,
se::dnn::LazyOpRunner<se::dnn::ConvOp>::FromOpRunner(
std::move(runner)));
GenericConvRunner runner_cache(std::move(lazy_runner));
RunConvOptions options;
options.profile_result = &profile_result;
options.runner_cache = &runner_cache;
absl::Status launch_status =
RunGpuConv(config, absl::MakeSpan(operand_buffers), result_buffers,
scratch_memory, stream, options);
if (!launch_status.ok()) {
continue;
}
if (!profile_result.is_valid()) {
continue;
}
profile_results.emplace_back();
AutotuneResult& result = profile_results.back();
*result.mutable_algorithm() = alg.ToProto();
int64_t scratch_bytes_used = scratch_allocator.TotalAllocatedBytes();
result.set_scratch_bytes(scratch_bytes_used);
*result.mutable_run_time() = tsl::proto_utils::ToDurationProto(
absl::Milliseconds(profile_result.elapsed_time_in_ms()));
}
}
TF_ASSIGN_OR_RETURN(AutotuneResult selected_algorithm,
PickBestResult(profile_results, instr->ToString(),
instr->GetModule()->config()));
return selected_algorithm;
}
absl::StatusOr<bool> GpuConvAlgorithmPicker::RunOnInstruction(
HloInstruction* instr) {
CHECK(IsCustomCallToDnnConvolution(*instr));
const bool strict = instr->parent()
->parent()
->config()
.debug_options()
.xla_gpu_strict_conv_algorithm_picker();
absl::StatusOr<AutotuneResult> best_algo_or =
PickBestAlgorithm(Cast<HloCustomCallInstruction>(instr));
if (!best_algo_or.ok()) {
auto msg = absl::StrFormat(
"Failed to determine best cudnn convolution algorithm for:\n%s\n\n"
"Original error: %s",
instr->ToString(), best_algo_or.status().ToString());
if (strict) {
return Unknown(
"%s\n\nTo ignore this failure and try to use a fallback algorithm "
"(which may have suboptimal performance), use "
"XLA_FLAGS=--xla_gpu_strict_conv_algorithm_picker=false. Please "
"also file a bug for the root cause of failing autotuning.",
msg);
}
LOG(WARNING)
<< msg << "\n\nAs a result, convolution performance may be suboptimal.";
return false;
}
auto best_algo = std::move(best_algo_or).value();
VLOG(3) << "Setting cudnn conv to use algorithm "
<< best_algo.conv().algorithm() << " and "
<< NumBytesToString(best_algo.scratch_bytes())
<< " of scratch memory: " << instr->ToString()
<< " tensor_ops_enabled: " << best_algo.conv().tensor_ops_enabled();
HloComputation* computation = instr->parent();
std::vector<Shape> new_call_element_shapes;
new_call_element_shapes.reserve(instr->shape().tuple_shapes_size() - 1);
for (int i = 0; i < instr->shape().tuple_shapes_size() - 1; ++i) {
new_call_element_shapes.emplace_back(instr->shape().tuple_shapes(i));
}
new_call_element_shapes.emplace_back(
ShapeUtil::MakeShape(U8, {best_algo.scratch_bytes()}));
Shape new_call_shape = ShapeUtil::MakeTupleShape(new_call_element_shapes);
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_backend_config,
instr->backend_config<GpuBackendConfig>());
CudnnConvBackendConfig& backend_config =
*gpu_backend_config.mutable_cudnn_conv_backend_config();
*backend_config.mutable_algorithm() = best_algo.algorithm();
backend_config.mutable_algorithm()->mutable_workspace_size()->set_value(
best_algo.scratch_bytes());
HloInstruction* new_call = computation->AddInstruction(
instr->CloneWithNewOperands(new_call_shape, instr->operands()));
new_call->SetAndSanitizeName(instr->name());
VLOG(3) << "Replacing convolution " << instr->ToString() << " with "
<< new_call->ToString();
TF_RETURN_IF_ERROR(new_call->set_backend_config(gpu_backend_config));
std::vector<HloInstruction*> new_tuple_elements;
new_tuple_elements.reserve(new_call->shape().tuple_shapes_size() - 1);
for (int i = 0; i < new_call->shape().tuple_shapes_size() - 1; ++i) {
new_tuple_elements.emplace_back(
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
new_call->shape().tuple_shapes(i), new_call, i)));
}
new_tuple_elements.emplace_back(computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<uint8_t>({}))));
HloInstruction* new_tuple = computation->AddInstruction(
HloInstruction::CreateTuple(new_tuple_elements));
TF_RETURN_IF_ERROR(instr->parent()->ReplaceInstruction(instr, new_tuple));
return true;
}
absl::StatusOr<bool> GpuConvAlgorithmPicker::RunOnComputation(
HloComputation* computation) {
std::vector<HloInstruction*> convs;
for (HloInstruction* instr : computation->instructions()) {
if (IsCandidate(instr)) {
convs.push_back(instr);
}
}
bool changed = false;
for (HloInstruction* instr : convs) {
TF_ASSIGN_OR_RETURN(bool result, RunOnInstruction(instr));
changed |= result;
}
return changed;
}
absl::StatusOr<bool> GpuConvAlgorithmPicker::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_SCOPED_LOGGING_TIMER(
absl::StrCat("GpuConvAlgorithmPicker for ", module->name()));
if (!IsEnabled(module)) {
VLOG(3) << "Convolution auto-tuning disabled, GpuConvAlgorithmPicker "
"returning early.";
return false;
}
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool result, RunOnComputation(computation));
changed |= result;
}
return changed;
}
}
} | #include "xla/service/gpu/autotuning/conv_algorithm_picker.h"
#include <cstdint>
#include <variant>
#include <vector>
#include "absl/strings/string_view.h"
#include "xla/autotune_results.pb.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/gpu/transforms/conv_rewriter.h"
#include "xla/service/gpu/transforms/cudnn_fused_conv_rewriter.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/platform_util.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/platform.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::gpu {
namespace {
namespace m = ::xla::match;
class GpuConvAlgorithmPickerTest : public HloTestBase {
public:
se::CudaComputeCapability GetCudaComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
stream_executor::dnn::VersionInfo GetDnnVersion() {
return GetDnnVersionInfoOrDefault(backend().default_stream_executor());
}
GpuConvAlgorithmPickerTest() { AutotunerUtil::ClearAutotuneResults(); }
};
TEST_F(GpuConvAlgorithmPickerTest, SetAlgorithm) {
constexpr absl::string_view kHlo = R"(
HloModule module
ENTRY main {
%arg0 = f32[3,56,56,16]{2,1,0,3} parameter(0)
%arg1 = f32[3,3,3,64]{2,1,0,3} parameter(1)
ROOT %conv = f32[54,54,16,64]{1,0,3,2} convolution(%arg0, %arg1), window={size=3x3}, dim_labels=f01b_i01o->01bf
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kHlo));
se::Platform* platform = PlatformUtil::GetDefaultPlatform().value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
PlatformUtil::GetStreamExecutors(platform));
ASSERT_GT(executors.size(), 0);
se::StreamExecutor* stream_exec = executors[0];
const se::GpuComputeCapability& cc = backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability();
bool changed = false;
TF_ASSERT_OK_AND_ASSIGN(changed, RunHloPass(ConvRewriter(cc), m.get()));
changed = false;
DebugOptions opts = DefaultDebugOptionsIgnoringFlags();
AutotuneConfig cfg{DeviceConfig{stream_exec, nullptr}, opts};
TF_ASSERT_OK_AND_ASSIGN(changed,
RunHloPass(GpuConvAlgorithmPicker(cfg), m.get()));
ASSERT_TRUE(changed);
AutotuneResults results;
TF_ASSERT_OK(AutotunerUtil::SerializeAutotuneResults(&results));
ASSERT_EQ(results.results_size(), 1);
auto& result = *results.mutable_results(0)->mutable_result();
int64_t old_scratch_bytes = result.scratch_bytes();
int64_t new_scratch_bytes = old_scratch_bytes + 1;
result.set_scratch_bytes(new_scratch_bytes);
AutotunerUtil::ClearAutotuneResults();
TF_ASSERT_OK(AutotunerUtil::LoadAutotuneResults(results));
TF_ASSERT_OK_AND_ASSIGN(m, ParseAndReturnVerifiedModule(kHlo));
changed = false;
TF_ASSERT_OK_AND_ASSIGN(changed, RunHloPass(ConvRewriter(cc), m.get()));
changed = false;
TF_ASSERT_OK_AND_ASSIGN(changed,
RunHloPass(GpuConvAlgorithmPicker(cfg), m.get()));
ASSERT_TRUE(changed);
TF_ASSERT_OK(RunHloPass(TupleSimplifier(), m.get()).status());
SCOPED_TRACE(m->ToString());
HloInstruction* conv;
ASSERT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(&conv))));
EXPECT_THAT(
conv->shape(),
GmockMatch(m::Shape().WithSubshape(
{1}, m::Shape().WithElementType(U8).WithDims({new_scratch_bytes}))));
TF_ASSERT_OK_AND_ASSIGN(auto dnn_version, GetDnnVersionInfo(stream_exec));
if (dnn_version.major_version() >= 9 && dnn_version.major_version() < 10 &&
std::holds_alternative<stream_executor::CudaComputeCapability>(cc) &&
std::get<stream_executor::CudaComputeCapability>(cc).major == 7 &&
std::get<stream_executor::CudaComputeCapability>(cc).minor == 0) {
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->has_cudnn_conv_backend_config() &&
conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.algorithm()
.algo_id() != 14);
}
}
TEST_F(GpuConvAlgorithmPickerTest, SetAlgorithmGraphConvF8) {
if (!GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP() << "FP8 convolutions require Hopper or newer architecture.";
}
constexpr absl::string_view kHlo = R"(
HloModule module
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] maximum(a, b)
}
ENTRY main {
input = f8e4m3fn[1,6,6,128] parameter(0)
filter = f8e4m3fn[16,3,3,128] parameter(1)
input_scale = f32[] parameter(2)
input_scale_bcast = f32[1,6,6,128] broadcast(input_scale), dimensions={}
filter_scale = f32[] parameter(3)
filter_scale_bcast = f32[16,3,3,128] broadcast(filter_scale), dimensions={}
input_f32 = f32[1,6,6,128] convert(input)
input_unscaled = f32[1,6,6,128] multiply(input_f32, input_scale_bcast)
filter_f32 = f32[16,3,3,128] convert(filter)
filter_unscaled = f32[16,3,3,128] multiply(filter_f32, filter_scale_bcast)
conv_a = f32[1,6,6,16] convolution(input_unscaled, filter_unscaled), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_o01i->b01f, feature_group_count=1
z_scale = f32[] parameter(4)
z_scale_bcast = f32[1,6,6,16] broadcast(z_scale), dimensions={}
conv_a_scaled = f32[1,6,6,16] multiply(conv_a, z_scale_bcast)
c1 = f32[] constant(-448.)
c1_bcast = f32[1,6,6,16] broadcast(c1), dimensions={}
c2 = f32[] constant(448.)
c2_bcast = f32[1,6,6,16] broadcast(c2), dimensions={}
conv_a_clamped = f32[1,6,6,16] clamp(c1_bcast, conv_a_scaled, c2_bcast)
conv_a_clamped_f8 = f8e4m3fn[1,6,6,16] convert(conv_a_clamped)
abs_conv_a = f32[1,6,6,16] abs(conv_a)
c0 = f32[] constant(-inf)
amax = f32[] reduce(abs_conv_a, c0), dimensions={0,1,2,3}, to_apply=apply
ROOT conv_f8 = (f8e4m3fn[1,6,6,16], f32[]) tuple(conv_a_clamped_f8, amax)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kHlo));
se::Platform* platform = PlatformUtil::GetDefaultPlatform().value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
PlatformUtil::GetStreamExecutors(platform));
ASSERT_GT(executors.size(), 0);
se::StreamExecutor* stream_exec = executors[0];
const se::GpuComputeCapability& cc = GetCudaComputeCapability();
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, RunHloPass(ConvRewriter(cc), m.get()));
ASSERT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(
changed,
RunHloPass(CudnnFusedConvRewriter(
GetCudaComputeCapability(), GetDnnVersion(),
stream_exec->GetDeviceDescription().runtime_version()),
m.get()));
ASSERT_TRUE(changed);
DebugOptions opts = DefaultDebugOptionsIgnoringFlags();
AutotuneConfig cfg{DeviceConfig{stream_exec, nullptr}, opts};
TF_ASSERT_OK_AND_ASSIGN(changed,
RunHloPass(GpuConvAlgorithmPicker(cfg), m.get()));
ASSERT_TRUE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/autotuning/conv_algorithm_picker.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/autotuning/conv_algorithm_picker_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a9df55e8-0ddf-4590-8744-6b2d90500b08 | cpp | tensorflow/tensorflow | gpu_backend_lib | third_party/xla/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc | third_party/xla/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib_test.cc | #include "xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <fstream>
#include <functional>
#include <ios>
#include <memory>
#include <mutex>
#include <optional>
#include <string>
#include <string_view>
#include <system_error>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/base/const_init.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "llvm/ADT/Any.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Analysis/CGSCCPassManager.h"
#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/Bitcode/BitcodeWriter.h"
#include "llvm/CodeGen/CommandFlags.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/Verifier.h"
#include "llvm/InitializePasses.h"
#include "llvm/Linker/Linker.h"
#include "llvm/MC/TargetRegistry.h"
#include "llvm/PassRegistry.h"
#include "llvm/Passes/OptimizationLevel.h"
#include "llvm/Passes/PassBuilder.h"
#include "llvm/Passes/StandardInstrumentations.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Transforms/IPO/AlwaysInliner.h"
#include "llvm/Transforms/IPO/Internalize.h"
#include "llvm/Transforms/Scalar.h"
#include "xla/service/gpu/llvm_gpu_backend/utils.h"
#include "xla/service/gpu/metrics.h"
#include "xla/service/llvm_ir/llvm_command_line_options.h"
#include "xla/service/llvm_ir/llvm_type_conversion_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/semantic_version.h"
#include "xla/tsl/util/env_var.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/cuda_root_path.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/path.h"
#include "tsl/platform/random.h"
#include "tsl/platform/rocm_rocdl_path.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/scoped_annotation.h"
#include "tsl/profiler/lib/traceme.h"
#if !defined(PLATFORM_GOOGLE) && TENSORFLOW_USE_ROCM
#include "rocm/rocm_config.h"
#endif
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#include "xla/stream_executor/cuda/cuda_asm_compiler.h"
#endif
#if TENSORFLOW_USE_SYCL
#include "LLVMSPIRVLib.h"
#include "LLVMSPIRVOpts.h"
#endif
namespace xla {
namespace gpu {
namespace {
static llvm::codegen::RegisterCodeGenFlags CGF;
const int kAMDGPUInlineThreshold = 0x100000;
const int kDefaultInlineThreshold = 1100;
std::string MakeNameForTempProduct(absl::string_view input_filename,
absl::string_view extension) {
return ReplaceFilenameExtension(tsl::io::Basename(input_filename), extension);
}
void InitializePasses(llvm::PassRegistry* pass_registry) {
llvm::initializeCore(*pass_registry);
llvm::initializeCodeGen(*pass_registry);
llvm::initializeScalarOpts(*pass_registry);
llvm::initializeVectorization(*pass_registry);
llvm::initializeIPO(*pass_registry);
llvm::initializeAnalysis(*pass_registry);
llvm::initializeTransformUtils(*pass_registry);
llvm::initializeInstCombine(*pass_registry);
llvm::initializeTarget(*pass_registry);
llvm::initializeCodeGenPrepareLegacyPassPass(*pass_registry);
}
std::unique_ptr<llvm::TargetMachine> GetTargetMachine(
llvm::Triple triple, absl::string_view cpu_name,
const DebugOptions& debug_options, absl::string_view feature_str) {
std::string error;
const llvm::Target* target =
llvm::TargetRegistry::lookupTarget("", triple, error);
if (target == nullptr) {
LOG(FATAL) << "Unable to find Target for triple '" << triple.str() << "'"
<< " -- " << error;
return nullptr;
}
llvm::TargetOptions target_options =
llvm::codegen::InitTargetOptionsFromCodeGenFlags(llvm::Triple());
target_options.MCOptions.AsmVerbose = false;
llvm::CodeGenOptLevel codegen_opt_level;
switch (debug_options.xla_backend_optimization_level()) {
case 1:
codegen_opt_level = llvm::CodeGenOptLevel::Less;
break;
case 2:
codegen_opt_level = llvm::CodeGenOptLevel::Default;
break;
case 3:
codegen_opt_level = llvm::CodeGenOptLevel::Aggressive;
break;
default:
codegen_opt_level = llvm::CodeGenOptLevel::None;
}
return absl::WrapUnique(target->createTargetMachine(
triple.str(), llvm_ir::AsStringRef(cpu_name),
llvm_ir::AsStringRef(feature_str), target_options,
llvm::codegen::getExplicitRelocModel(),
llvm::codegen::getExplicitCodeModel(), codegen_opt_level));
}
std::string EmitModuleToPTX(llvm::Module* module,
llvm::TargetMachine* target_machine) {
tsl::profiler::ScopedAnnotation annotation([&] {
return absl::StrFormat("XlaEmitGpuAsm:#module=%s#",
module->getName().str());
});
std::string ptx;
llvm::raw_string_ostream stream(ptx);
llvm::buffer_ostream pstream(stream);
llvm::legacy::PassManager pm;
pm.add(new llvm::TargetLibraryInfoWrapperPass(
llvm::Triple(module->getTargetTriple())));
target_machine->addPassesToEmitFile(pm, pstream, nullptr,
llvm::CodeGenFileType::AssemblyFile);
pm.run(*module);
return ptx;
}
void FeedLLVMWithFlags(const std::vector<std::string>& cl_opts) {
std::vector<const char*> fake_argv = {""};
for (const std::string& cl_opt : cl_opts) {
fake_argv.push_back(cl_opt.c_str());
}
llvm::cl::ParseCommandLineOptions(fake_argv.size(), fake_argv.data());
}
bool CouldNeedDeviceBitcode(const llvm::Module& module) {
for (const llvm::Function& function : module.functions()) {
if (!function.isIntrinsic() && function.isDeclaration() &&
(function.getName().starts_with("__nv_") ||
function.getName().starts_with("__ocml_") ||
function.getName().starts_with("__ockl_"))) {
return true;
}
}
return false;
}
absl::Status LinkWithBitcodeVector(
llvm::Module* module, const std::vector<std::string>& bitcode_path_vector) {
llvm::Linker linker(*module);
for (auto& bitcode_path : bitcode_path_vector) {
if (!tsl::Env::Default()->FileExists(bitcode_path).ok()) {
LOG(ERROR) << "bitcode module is required by this HLO module but was "
"not found at "
<< bitcode_path;
return xla::Internal("bitcode module not found at %s", bitcode_path);
}
std::unique_ptr<llvm::Module> bitcode_module =
LoadIRModule(bitcode_path, &module->getContext());
bitcode_module->setDataLayout(module->getDataLayout());
if (linker.linkInModule(
std::move(bitcode_module), llvm::Linker::Flags::LinkOnlyNeeded,
[](llvm::Module& M, const llvm::StringSet<>& GVS) {
internalizeModule(M, [&GVS](const llvm::GlobalValue& GV) {
return !GV.hasName() || (GVS.count(GV.getName()) == 0);
});
})) {
return xla::Internal("Error linking bitcode module from %s",
bitcode_path);
}
}
return absl::OkStatus();
}
absl::Status NVPTXTargetModuleLinker(llvm::Module* module,
se::GpuComputeCapability gpu_version,
const DebugOptions& debug_options,
const std::string& device_bitcode_path) {
TF_RETURN_IF_ERROR(
nvptx::LinkLibdeviceIfNecessary(module, device_bitcode_path));
module->addModuleFlag(llvm::Module::Override, "nvvm-reflect-ftz",
debug_options.xla_gpu_ftz());
if (debug_options.xla_gpu_ftz()) {
for (llvm::Function& fn : *module) {
fn.addFnAttr("denormal-fp-math-f32", "preserve-sign");
}
}
return absl::OkStatus();
}
std::unique_ptr<llvm::TargetMachine> NVPTXGetTargetMachine(
llvm::Triple target_triple, se::CudaComputeCapability compute_capability,
const DebugOptions& debug_options) {
#ifdef GOOGLE_CUDA
absl::StatusOr<stream_executor::SemanticVersion> runtime_cuda_version =
stream_executor::GetAsmCompilerVersion(
debug_options.xla_gpu_cuda_data_dir());
constexpr stream_executor::SemanticVersion kCompileTimeCudaVersion{
CUDA_VERSION / 1000, (CUDA_VERSION / 10) % 100, CUDA_VERSION % 10};
auto highest_supported_cuda_version = [&] {
if (runtime_cuda_version.ok()) {
return std::min(runtime_cuda_version.value(), kCompileTimeCudaVersion);
}
return kCompileTimeCudaVersion;
}();
auto ptx_version = nvptx::DetermineHighestSupportedPtxVersionFromCudaVersion(
highest_supported_cuda_version);
int highest_supported_ptx_version =
ptx_version.major() * 10 + ptx_version.minor();
VLOG(1) << "Targeting PTX version: " << highest_supported_ptx_version;
std::string feature_str =
absl::StrFormat("+ptx%d", highest_supported_ptx_version);
#else
std::string feature_str;
#endif
return GetTargetMachine(target_triple, nvptx::GetSmName(compute_capability),
debug_options, feature_str);
}
using TargetModuleLinker =
std::function<absl::Status(llvm::Module*, se::GpuComputeCapability,
const DebugOptions&, const std::string&)>;
void DumpModule(const std::string output_filename, const llvm::Module* module) {
std::error_code ec;
auto out = std::make_unique<llvm::raw_fd_ostream>(
llvm::StringRef(output_filename), ec, llvm::sys::fs::OF_None);
if (ec) {
LOG(FATAL) << "Unable to open " << output_filename
<< " to dump LLVM IR: " << ec.message();
return;
}
module->print(*out, nullptr);
out->close();
}
const llvm::Module* GetModule(llvm::Any IR) {
if (const auto** M = llvm::any_cast<const llvm::Module*>(&IR)) return *M;
if (const auto** F = llvm::any_cast<const llvm::Function*>(&IR)) {
return (*F)->getParent();
}
if (const auto** C = llvm::any_cast<const llvm::LazyCallGraph::SCC*>(&IR)) {
return (*C)->begin()->getFunction().getParent();
}
if (const auto** L = llvm::any_cast<const llvm::Loop*>(&IR)) {
const llvm::Function* F = (*L)->getHeader()->getParent();
return F->getParent();
}
return nullptr;
}
auto DumpCallbackForModule(std::string module_identifier,
std::string outputs_dir) {
int i = 0;
return [=](llvm::StringRef pass, llvm::Any ir) mutable {
const llvm::Module* module = GetModule(ir);
if (!module) {
return;
}
const std::string basename = ReplaceFilenameExtension(
absl::string_view(tsl::io::Basename(module_identifier)),
absl::StrFormat("pass-%02d.before.%s.ll", i++,
absl::string_view(pass.str())));
DumpModule(tsl::io::JoinPath(outputs_dir, basename), module);
};
}
absl::Status LinkAndOptimizeModule(
llvm::Module* module, se::GpuComputeCapability gpu_version,
const DebugOptions& debug_options, const std::string& device_bitcode_path,
TargetModuleLinker module_linker, llvm::Triple default_target_triple,
llvm::TargetMachine* target_machine, int inline_threshold) {
tsl::profiler::ScopedAnnotation annotation([&] {
return absl::StrFormat("XlaOptimizeLlvmIr:#module=%s#",
module->getName().str());
});
TF_RETURN_IF_ERROR(
module_linker(module, gpu_version, debug_options, device_bitcode_path));
llvm::LoopAnalysisManager lam;
llvm::FunctionAnalysisManager fam;
llvm::CGSCCAnalysisManager cgam;
llvm::ModuleAnalysisManager mam;
if (target_machine) {
fam.registerPass([&] { return target_machine->getTargetIRAnalysis(); });
}
llvm::PipelineTuningOptions pto;
pto.SLPVectorization = true;
pto.InlinerThreshold = inline_threshold;
llvm::PassInstrumentationCallbacks pic;
llvm::StandardInstrumentations si(module->getContext(), false);
si.registerCallbacks(pic, &mam);
llvm::PassBuilder pb(target_machine, pto, std::nullopt, &pic);
pb.registerModuleAnalyses(mam);
pb.registerCGSCCAnalyses(cgam);
pb.registerFunctionAnalyses(fam);
pb.registerLoopAnalyses(lam);
pb.crossRegisterProxies(lam, fam, cgam, mam);
if (debug_options.xla_gpu_dump_llvmir()) {
std::string outputs_dir;
if (!tsl::io::GetTestUndeclaredOutputsDir(&outputs_dir)) {
outputs_dir = debug_options.xla_dump_to();
}
if (!outputs_dir.empty()) {
pic.registerBeforeNonSkippedPassCallback(
DumpCallbackForModule(module->getModuleIdentifier(), outputs_dir));
} else {
LOG(ERROR) << "--xla_gpu_dump_llvmir is set, but neither the environment "
<< "variable TEST_UNDECLARED_OUTPUTS_DIR nor the flag "
<< "--xla_dump_to is set, so the llvm dumps are disabled.";
}
}
llvm::OptimizationLevel ol;
switch (debug_options.xla_backend_optimization_level()) {
case 0:
ol = llvm::OptimizationLevel::O0;
break;
case 1:
ol = llvm::OptimizationLevel::O1;
break;
case 2:
ol = llvm::OptimizationLevel::O2;
break;
case 3:
ol = llvm::OptimizationLevel::O3;
break;
}
llvm::ModulePassManager mpm;
mpm.addPass(llvm::VerifierPass());
if (ol == llvm::OptimizationLevel::O0) {
mpm.addPass(pb.buildO0DefaultPipeline(ol));
} else {
mpm.addPass(pb.buildPerModuleDefaultPipeline(ol));
}
mpm.addPass(llvm::VerifierPass());
mpm.run(*module, mam);
return absl::OkStatus();
}
void NVPTXBackendInit(const DebugOptions& debug_options) {
FeedLLVMWithFlags({"-bonus-inst-threshold=2"});
FeedLLVMWithFlags({"-nvptx-prec-divf32=1"});
FeedLLVMWithFlags({
"-slp-vectorize-hor=false",
"-slp-max-reg-size=32",
});
llvm_ir::InitializeLLVMCommandLineOptions(
debug_options.xla_backend_extra_options());
LLVMInitializeNVPTXTarget();
LLVMInitializeNVPTXTargetInfo();
LLVMInitializeNVPTXTargetMC();
LLVMInitializeNVPTXAsmPrinter();
llvm::PassRegistry* registry = llvm::PassRegistry::getPassRegistry();
InitializePasses(registry);
}
}
namespace nvptx {
std::string GetSmName(se::CudaComputeCapability compute_capability) {
int compute_capability_version =
compute_capability.major * 10 + compute_capability.minor;
int sm_version = 30;
int supported_versions[] = {90, 89, 87, 86, 80, 75, 72, 70, 62,
61, 60, 53, 52, 50, 37, 35, 32, 30};
for (int v : supported_versions) {
if (v <= compute_capability_version) {
sm_version = v;
break;
}
}
if (sm_version != compute_capability_version &&
compute_capability_version < supported_versions[0]) {
LOG(WARNING) << "Unknown compute capability "
<< compute_capability.ToString()
<< ". Defaulting to telling LLVM that we're compiling for sm_"
<< sm_version;
}
std::string_view extension =
(compute_capability.major == 9 && sm_version == 90) ? "a" : "";
return absl::StrCat("sm_", sm_version, extension);
}
std::string CantFindCudaMessage(absl::string_view msg,
absl::string_view xla_gpu_cuda_data_dir) {
return absl::StrCat(
msg, "\nSearched for CUDA in the following directories:\n ",
absl::StrJoin(tsl::CandidateCudaRoots(std::string{xla_gpu_cuda_data_dir}),
"\n "),
"\nYou can choose the search directory by setting xla_gpu_cuda_data_dir "
"in HloModule's DebugOptions. For most apps, setting the environment "
"variable XLA_FLAGS=--xla_gpu_cuda_data_dir=/path/to/cuda will work.");
}
static std::string GetLibdeviceDir(absl::string_view xla_gpu_cuda_data_dir) {
for (const std::string& cuda_root :
tsl::CandidateCudaRoots(std::string{xla_gpu_cuda_data_dir})) {
std::string libdevice_dir =
tsl::io::JoinPath(cuda_root, "nvvm", "libdevice");
VLOG(2) << "Looking for libdevice at " << libdevice_dir;
if (tsl::Env::Default()->IsDirectory(libdevice_dir).ok()) {
VLOG(2) << "Found libdevice dir " << libdevice_dir;
return libdevice_dir;
}
}
LOG(WARNING) << CantFindCudaMessage(
"Can't find libdevice directory ${CUDA_DIR}/nvvm/libdevice. This may "
"result in compilation or runtime failures, if the program we try to run "
"uses routines from libdevice.",
xla_gpu_cuda_data_dir);
return ".";
}
std::string LibDevicePath(absl::string_view xla_gpu_cuda_data_dir) {
static absl::Mutex libdevice_cache_mu(absl::kConstInit);
static auto& libdevice_dir_path_cache ABSL_GUARDED_BY(libdevice_cache_mu) =
*new absl::flat_hash_map<std::string, std::string>();
std::string libdevice_dir_path = [&] {
absl::MutexLock l(&libdevice_cache_mu);
auto it = libdevice_dir_path_cache.find(xla_gpu_cuda_data_dir);
if (it != libdevice_dir_path_cache.end()) {
return it->second;
}
auto [it2, inserted] = libdevice_dir_path_cache.emplace(
xla_gpu_cuda_data_dir, GetLibdeviceDir(xla_gpu_cuda_data_dir));
return it2->second;
}();
return tsl::io::JoinPath(libdevice_dir_path, "libdevice.10.bc");
}
absl::Status LinkLibdeviceIfNecessary(llvm::Module* module,
const std::string& libdevice_path) {
if (!CouldNeedDeviceBitcode(*module)) {
return absl::OkStatus();
}
if (!tsl::Env::Default()->FileExists(libdevice_path).ok()) {
LOG(WARNING)
<< "libdevice is required by this HLO module but was not found at "
<< libdevice_path;
return xla::Internal("libdevice not found at %s", libdevice_path);
}
VLOG(1) << "Linking with libdevice from: " << libdevice_path;
return LinkWithBitcodeVector(module, {libdevice_path});
}
absl::StatusOr<std::string> CompileToPtx(
llvm::Module* module, se::GpuComputeCapability gpu_version,
const DebugOptions& debug_options,
std::function<void(llvm::TargetMachine*)> configure_target) {
static absl::once_flag backend_init_flag;
absl::call_once(backend_init_flag, NVPTXBackendInit, debug_options);
std::string ptx;
std::unique_ptr<llvm::TargetMachine> target_machine;
{
tsl::profiler::TraceMe activity(
[&] { return absl::StrCat("Compiling IR:", module->getName().str()); },
tsl::profiler::TraceMeLevel::kInfo);
XLA_SCOPED_LOGGING_TIMER("Compile module " + module->getName().str());
if (module->empty() && module->global_empty()) {
VLOG(2) << "Module '" << module->getName().str()
<< "' is empty. Skipping compilation.";
return std::string();
}
auto compute_capability =
std::get_if<se::CudaComputeCapability>(&gpu_version);
if (!compute_capability) {
return xla::Internal("Incompatible compute capability was specified.");
}
llvm::Triple default_target_triple("nvptx64-unknown-unknown");
std::unique_ptr<llvm::TargetMachine> target_machine = NVPTXGetTargetMachine(
default_target_triple, *compute_capability, debug_options);
if (configure_target) {
configure_target(target_machine.get());
}
uint64_t start_usecs = tsl::Env::Default()->NowMicros();
TF_RETURN_IF_ERROR(LinkAndOptimizeModule(
module, gpu_version, debug_options,
LibDevicePath(debug_options.xla_gpu_cuda_data_dir()),
NVPTXTargetModuleLinker, default_target_triple, target_machine.get(),
kDefaultInlineThreshold));
uint64_t end_usecs = tsl::Env::Default()->NowMicros();
RecordLlvmPassesDuration(end_usecs - start_usecs);
start_usecs = tsl::Env::Default()->NowMicros();
ptx = EmitModuleToPTX(module, target_machine.get());
end_usecs = tsl::Env::Default()->NowMicros();
RecordLlvmToPtxDuration(end_usecs - start_usecs);
}
return ptx;
}
namespace {
constexpr stream_executor::SemanticVersion kFallbackPtxVersion{6, 5, 0};
constexpr stream_executor::SemanticVersion kMaxPtxVersion{8, 5, 0};
}
stream_executor::SemanticVersion
DetermineHighestSupportedPtxVersionFromCudaVersion(
stream_executor::SemanticVersion cuda_version) {
if (cuda_version < stream_executor::SemanticVersion{11, 0, 0}) {
return kFallbackPtxVersion;
}
if (cuda_version < stream_executor::SemanticVersion{12, 6, 0}) {
return {cuda_version.major() - 4, cuda_version.minor(), 0};
}
return kMaxPtxVersion;
}
}
namespace {
std::vector<std::string> GetROCDLPaths(std::string gcn_arch_name,
const std::string& rocdl_dir_path) {
static std::vector<std::string>* rocdl_filenames =
new std::vector<std::string>(
{"opencl.bc", "ocml.bc", "ockl.bc", "oclc_finite_only_off.bc",
"oclc_daz_opt_off.bc", "oclc_correctly_rounded_sqrt_on.bc",
"oclc_unsafe_math_off.bc", "oclc_wavefrontsize64_on.bc",
"oclc_abi_version_500.bc"});
std::vector<std::string> result;
result.reserve(rocdl_filenames->size() + 1);
for (auto& filename : *rocdl_filenames) {
result.push_back(tsl::io::JoinPath(rocdl_dir_path, filename));
}
std::vector<std::string> tokens = absl::StrSplit(gcn_arch_name, ':');
std::string amdgpu_version = gcn_arch_name;
if (!tokens.empty() && tokens[0].size() >= 3) {
amdgpu_version = tokens[0].substr(3);
}
result.push_back(tsl::io::JoinPath(
rocdl_dir_path,
absl::StrCat("oclc_isa_version_", amdgpu_version, ".bc")));
return result;
}
struct HsacoCacheEntry {
uint64_t hash;
std::string ir;
std::string gfx;
std::vector<uint8_t> hsaco;
};
struct HsacoCache {
protected:
std::vector<HsacoCacheEntry> cache;
std::mutex m_mutex;
int request_count = 0;
int hit_count = 0;
public:
static bool Find(const std::string& ir, uint64_t& hash,
const std::string& gfx, std::vector<uint8_t>& hsaco);
static void Add(const std::string& ir, uint64_t hash, const std::string& gfx,
const std::vector<uint8_t>& hsaco);
};
static HsacoCache g_hsacoCache;
bool HsacoCache::Find(const std::string& ir, uint64_t& hash,
const std::string& gfx, std::vector<uint8_t>& hsaco) {
std::lock_guard<std::mutex> lg(g_hsacoCache.m_mutex);
hash = std::hash<std::string>{}(ir);
bool hit = false;
for (auto& x : g_hsacoCache.cache) {
if (x.hash != hash) continue;
if (x.gfx != gfx) continue;
if (x.ir != ir) continue;
hsaco = x.hsaco;
hit = true;
break;
}
g_hsacoCache.request_count++;
if (hit) g_hsacoCache.hit_count++;
if (!(g_hsacoCache.request_count % 50))
VLOG(1) << "HSACO cache: " << g_hsacoCache.request_count << " requests, "
<< g_hsacoCache.hit_count << " hits";
return hit;
}
void HsacoCache::Add(const std::string& ir, uint64_t hash,
const std::string& gfx,
const std::vector<uint8_t>& hsaco) {
std::lock_guard<std::mutex> lg(g_hsacoCache.m_mutex);
g_hsacoCache.cache.resize(g_hsacoCache.cache.size() + 1);
g_hsacoCache.cache.back().ir = ir;
g_hsacoCache.cache.back().hash = hash;
g_hsacoCache.cache.back().gfx = gfx;
g_hsacoCache.cache.back().hsaco = hsaco;
}
absl::StatusOr<std::vector<uint8_t>> EmitModuleToHsaco(
llvm::Module* module, llvm::TargetMachine* target_machine) {
auto* env = tsl::Env::Default();
std::vector<std::string> tempdir_vector;
env->GetLocalTempDirectories(&tempdir_vector);
if (tempdir_vector.empty()) {
return xla::Internal(
"Unable to locate a temporary directory for compile-time artifacts.");
}
std::string tempdir_name = tempdir_vector.front();
VLOG(1) << "Compile-time artifacts located at: " << tempdir_name;
bool keep_tempfiles = false;
TF_CHECK_OK(tsl::ReadBoolFromEnvVar("TF_ROCM_KEEP_XLA_TEMPFILES",
false, &keep_tempfiles));
std::string random_number = std::to_string(tsl::random::New64());
std::string ir_filename =
absl::StrCat(module->getModuleIdentifier(), random_number + ".ll");
std::string ir_path = tsl::io::JoinPath(tempdir_name, ir_filename);
std::string ir_opt_filename =
absl::StrCat(module->getModuleIdentifier(), random_number + "_opt.ll");
std::string ir_opt_path = tsl::io::JoinPath(tempdir_name, ir_opt_filename);
std::string isabin_filename =
absl::StrCat(module->getModuleIdentifier(), random_number + ".o");
std::string isabin_path = tsl::io::JoinPath(tempdir_name, isabin_filename);
std::string hsaco_filename =
absl::StrCat(module->getModuleIdentifier(), random_number + ".hsaco");
std::string hsaco_path = tsl::io::JoinPath(tempdir_name, hsaco_filename);
std::error_code ec;
std::unique_ptr<llvm::raw_fd_ostream> ir_fs(
new llvm::raw_fd_ostream(ir_path, ec, llvm::sys::fs::OF_None));
module->print(*ir_fs, nullptr);
ir_fs->flush();
llvm::legacy::PassManager pm;
pm.add(new llvm::TargetLibraryInfoWrapperPass(
llvm::Triple(module->getTargetTriple())));
llvm::SmallVector<char, 0> stream;
llvm::raw_svector_ostream pstream(stream);
std::unique_ptr<llvm::raw_fd_ostream> isabin_fs(
new llvm::raw_fd_ostream(isabin_path, ec, llvm::sys::fs::OF_Text));
module->setDataLayout(target_machine->createDataLayout());
target_machine->addPassesToEmitFile(pm, *isabin_fs, nullptr,
llvm::CodeGenFileType::ObjectFile);
pm.run(*module);
isabin_fs->flush();
if (keep_tempfiles) {
std::unique_ptr<llvm::raw_fd_ostream> ir_fs(
new llvm::raw_fd_ostream(ir_opt_path, ec, llvm::sys::fs::OF_None));
module->print(*ir_fs, nullptr);
ir_fs->flush();
}
std::string lld_path;
if (std::getenv("LLVM_PATH")) {
lld_path = tsl::io::JoinPath(std::getenv("LLVM_PATH"), "bin");
} else {
lld_path = tsl::io::JoinPath(tsl::RocmRoot(), "llvm/bin");
}
auto lld_program = llvm::sys::findProgramByName("ld.lld", {lld_path});
if (!lld_program) {
return xla::Internal("unable to find ld.lld in PATH: %s",
lld_program.getError().message());
}
std::vector<llvm::StringRef> lld_args{
llvm_ir::AsStringRef("ld.lld"), llvm_ir::AsStringRef("-flavor"),
llvm_ir::AsStringRef("gnu"), llvm_ir::AsStringRef("-shared"),
llvm_ir::AsStringRef(isabin_path), llvm_ir::AsStringRef("-o"),
llvm_ir::AsStringRef(hsaco_path),
};
std::string error_message;
int lld_result =
llvm::sys::ExecuteAndWait(*lld_program, llvm_ir::AsArrayRef(lld_args),
std::nullopt, {}, 0, 0, &error_message);
if (lld_result) {
return xla::Internal("ld.lld execute fail: %s, error code %d",
error_message, lld_result);
}
std::ifstream hsaco_file(hsaco_path, std::ios::binary | std::ios::ate);
std::ifstream::pos_type hsaco_file_size = hsaco_file.tellg();
std::vector<uint8_t> hsaco(hsaco_file_size);
hsaco_file.seekg(0, std::ios::beg);
hsaco_file.read(reinterpret_cast<char*>(hsaco.data()), hsaco_file_size);
hsaco_file.close();
if (!keep_tempfiles) {
remove(ir_path.c_str());
remove(isabin_path.c_str());
remove(hsaco_path.c_str());
}
return hsaco;
}
absl::Status LinkROCDLIfNecessary(llvm::Module* module,
std::string gcn_arch_name,
const std::string& rocdl_dir_path) {
if (!CouldNeedDeviceBitcode(*module)) {
return absl::OkStatus();
}
return LinkWithBitcodeVector(module,
GetROCDLPaths(gcn_arch_name, rocdl_dir_path));
}
absl::Status AMDGPUTargetModuleLinker(
llvm::Module* module, se::GpuComputeCapability gpu_version,
const DebugOptions& debug_options,
const std::string& device_bitcode_dir_path) {
auto compute_capability =
std::get_if<se::RocmComputeCapability>(&gpu_version);
if (!compute_capability) {
return xla::Internal("Incompatible compute capability was specified.");
}
std::string gcn_arch_name = compute_capability->gcn_arch_name();
TF_RETURN_IF_ERROR(
LinkROCDLIfNecessary(module, gcn_arch_name, device_bitcode_dir_path));
if (debug_options.xla_gpu_ftz()) {
for (llvm::Function& fn : *module) {
fn.addFnAttr("denormal-fp-math-f32", "preserve-sign");
}
}
return absl::OkStatus();
}
std::string MapGCNArchNameTokenToFeatureStr(const std::string& token,
const std::string& gfx) {
if (token == "sramecc+") {
return "+sramecc";
} else if (token == "sramecc-") {
if (gfx == "gfx90a" || gfx == "gfx940" || gfx == "gfx941" ||
gfx == "gfx942")
return "";
return "-sramecc";
} else if (token == "xnack+") {
return "+xnack";
} else if (token == "xnack-") {
return "-xnack";
}
return "";
}
std::pair<std::string, std::string> GetFeatureStrFromGCNArchName(
const std::string& gcn_arch_name) {
std::string feature_str;
std::string gfx = gcn_arch_name;
std::vector<std::string> tokens = absl::StrSplit(gcn_arch_name, ':');
std::vector<std::string> mapped_tokens;
if (!tokens.empty()) gfx = tokens[0];
for (auto it = tokens.begin(); it != tokens.end(); it++) {
if (it != tokens.begin()) {
std::string token(*it);
std::string mapped_token = MapGCNArchNameTokenToFeatureStr(token, gfx);
mapped_tokens.push_back(mapped_token);
}
}
feature_str = absl::StrJoin(mapped_tokens, ",");
return std::make_pair(gfx, feature_str);
}
std::unique_ptr<llvm::TargetMachine> AMDGPUGetTargetMachine(
llvm::Triple target_triple, se::GpuComputeCapability gpu_version,
const DebugOptions& debug_options) {
auto compute_capability =
std::get_if<se::RocmComputeCapability>(&gpu_version);
std::string gcn_arch_name = compute_capability->gcn_arch_name();
auto arch = GetFeatureStrFromGCNArchName(gcn_arch_name);
return GetTargetMachine(std::move(target_triple), arch.first, debug_options,
arch.second);
}
std::string GetROCDLDir(const DebugOptions& debug_options) {
std::vector<std::string> potential_rocdl_dirs;
const std::string& datadir = debug_options.xla_gpu_cuda_data_dir();
if (!datadir.empty()) {
potential_rocdl_dirs.push_back(datadir);
}
potential_rocdl_dirs.push_back(tsl::RocdlRoot());
for (const std::string& potential_rocdl_dir : potential_rocdl_dirs) {
if (tsl::Env::Default()->IsDirectory(potential_rocdl_dir).ok()) {
VLOG(2) << "Found ROCm-Device-Libs dir " << potential_rocdl_dir;
return potential_rocdl_dir;
}
VLOG(2) << "Unable to find potential ROCm-Device-Libs dir "
<< potential_rocdl_dir;
}
return ".";
}
void AMDGPUBackendInit(const DebugOptions& debug_options,
std::string& rocdl_dir_path) {
llvm_ir::InitializeLLVMCommandLineOptions(
debug_options.xla_backend_extra_options());
#if TENSORFLOW_USE_ROCM
LLVMInitializeAMDGPUTarget();
LLVMInitializeAMDGPUTargetInfo();
LLVMInitializeAMDGPUTargetMC();
LLVMInitializeAMDGPUAsmParser();
LLVMInitializeAMDGPUAsmPrinter();
#endif
rocdl_dir_path = GetROCDLDir(debug_options);
llvm::PassRegistry* registry = llvm::PassRegistry::getPassRegistry();
InitializePasses(registry);
}
}
namespace amdgpu {
std::string LibDevicePath(std::string gcn_arch_name,
const std::string& rocdl_dir_path) {
auto libdevice_dir_paths = GetROCDLPaths(gcn_arch_name, rocdl_dir_path);
for (auto libdevice_dir_path : libdevice_dir_paths) {
if (libdevice_dir_path.find("ocml.bc")) {
return libdevice_dir_path;
}
}
return "";
}
absl::StatusOr<std::vector<uint8_t>> CompileToHsaco(
llvm::Module* module, se::GpuComputeCapability gpu_version,
const DebugOptions& debug_options,
const std::string& module_config_cache_key) {
static absl::once_flag backend_init_flag;
static std::string rocdl_dir_path;
absl::call_once(backend_init_flag, AMDGPUBackendInit, debug_options,
rocdl_dir_path);
std::vector<uint8_t> hsaco;
std::unique_ptr<llvm::TargetMachine> target_machine;
std::string str;
llvm::raw_string_ostream stream(str);
stream << *module;
if (str.size() >= 13 && str.substr(0, 13) == "; ModuleID = ") {
auto pos = str.find('\n');
if (pos != std::string::npos) str = str.substr(pos + 1);
}
if (str.size() >= 18 && str.substr(0, 18) == "source_filename = ") {
auto pos = str.find('\n');
if (pos != std::string::npos) str = str.substr(pos + 1);
}
str += module_config_cache_key;
{
tsl::profiler::TraceMe activity(
[&] { return absl::StrCat("Compiling IR", module->getName().str()); },
tsl::profiler::TraceMeLevel::kInfo);
XLA_SCOPED_LOGGING_TIMER("Compile module " + module->getName().str());
auto compute_capability =
std::get_if<se::RocmComputeCapability>(&gpu_version);
if (!compute_capability) {
return xla::Internal("Incompatible compute capability was specified.");
}
std::string gcn_arch_name = compute_capability->gcn_arch_name();
uint64_t hash;
if (HsacoCache::Find(str, hash, gcn_arch_name, hsaco)) {
VLOG(1) << "HSACO cache hit";
return hsaco;
}
VLOG(1) << "HSACO cache miss";
bool dump_lls = false;
if (dump_lls) {
static int hsaco_count = 0;
std::string name = "/tmp/" + std::to_string(hsaco_count) + ".ll";
hsaco_count++;
std::ofstream ofs(name);
ofs << str;
ofs.close();
}
llvm::Triple default_target_triple("amdgcn--amdhsa-amdgiz");
std::unique_ptr<llvm::TargetMachine> target_machine =
AMDGPUGetTargetMachine(default_target_triple, gpu_version,
debug_options);
TF_RETURN_IF_ERROR(LinkAndOptimizeModule(
module, gpu_version, debug_options, rocdl_dir_path,
AMDGPUTargetModuleLinker, default_target_triple, target_machine.get(),
kAMDGPUInlineThreshold));
TF_ASSIGN_OR_RETURN(hsaco, EmitModuleToHsaco(module, target_machine.get()));
HsacoCache::Add(str, hash, gcn_arch_name, hsaco);
}
return hsaco;
}
}
namespace {
std::unique_ptr<llvm::TargetMachine> SPIRGetTargetMachine(
llvm::Triple target_triple, se::GpuComputeCapability gpu_version,
const DebugOptions& debug_options) {
return nullptr;
}
absl::Status SPIRTargetModuleLinker(
llvm::Module* module, se::GpuComputeCapability gpu_version,
const DebugOptions& debug_options,
const std::string& device_bitcode_dir_path) {
return absl::OkStatus();
}
absl::StatusOr<std::string> EmitModuleToSpir(
llvm::Module* module, se::GpuComputeCapability gpu_version,
const DebugOptions& debug_options) {
#if TENSORFLOW_USE_SYCL
SPIRV::TranslatorOpts::ExtensionsStatusMap ExtensionsStatus;
SPIRV::TranslatorOpts opts(SPIRV::VersionNumber::MaximumVersion,
ExtensionsStatus);
opts.enableAllExtensions();
std::ostringstream oss;
std::string err;
bool success = llvm::writeSpirv(module, opts, oss, err);
if (!success) {
return xla::Internal("Fails to convert LLVM as SPIR-V: %s", err);
}
return oss.str();
#else
return absl::UnimplementedError("Not implemented for SYCL");
#endif
}
void SPIRBackendInit(const DebugOptions& debug_options) {
FeedLLVMWithFlags({
"-slp-vectorize-hor=false",
"-slp-min-reg-size=64",
"-slp-max-reg-size=64",
});
llvm_ir::InitializeLLVMCommandLineOptions(
debug_options.xla_backend_extra_options());
llvm::PassRegistry* registry = llvm::PassRegistry::getPassRegistry();
InitializePasses(registry);
}
}
namespace spir {
absl::StatusOr<std::vector<uint8_t>> CompileToSpir(
llvm::Module* module, se::GpuComputeCapability gpu_version,
const DebugOptions& debug_options) {
std::string libdevice_dir_path;
static absl::once_flag backend_init_flag;
absl::call_once(backend_init_flag, SPIRBackendInit, debug_options);
std::string spir;
{
XLA_SCOPED_LOGGING_TIMER("Compile module " + module->getName().str());
if (module->empty() && module->global_empty()) {
VLOG(2) << "Module '" << module->getName().str()
<< "' is empty. Skipping compilation.";
return std::vector<uint8_t>();
}
llvm::Triple default_target_triple("spir64-unknown-unknown");
std::unique_ptr<llvm::TargetMachine> target_machine =
SPIRGetTargetMachine(default_target_triple, gpu_version, debug_options);
TF_RETURN_IF_ERROR(LinkAndOptimizeModule(
module, gpu_version, debug_options, libdevice_dir_path,
SPIRTargetModuleLinker, default_target_triple, target_machine.get(),
kDefaultInlineThreshold));
TF_ASSIGN_OR_RETURN(spir,
EmitModuleToSpir(module, gpu_version, debug_options));
}
return std::vector<uint8_t>(spir.begin(), spir.end());
}
}
}
} | #include "xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.h"
#include <utility>
#include "absl/strings/str_cat.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/semantic_version.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
namespace se = ::stream_executor;
TEST(UtilsTest, TestGetSmName) {
se::CudaComputeCapability cc_hopper(9, 0);
ASSERT_EQ(nvptx::GetSmName(cc_hopper), "sm_90a");
se::CudaComputeCapability cc_next(10, 0);
ASSERT_EQ(nvptx::GetSmName(cc_next), "sm_90");
}
using VersionPair = std::pair<se::SemanticVersion, se::SemanticVersion>;
using PtxVersionFromCudaVersionTest = ::testing::TestWithParam<VersionPair>;
TEST_P(PtxVersionFromCudaVersionTest, VerifyMapping) {
EXPECT_EQ(nvptx::DetermineHighestSupportedPtxVersionFromCudaVersion(
GetParam().first),
GetParam().second);
}
INSTANTIATE_TEST_SUITE_P(VersionTest, PtxVersionFromCudaVersionTest,
::testing::ValuesIn<VersionPair>({
{{11, 0, 0}, {7, 0, 0}},
{{11, 1, 0}, {7, 1, 0}},
{{11, 2, 0}, {7, 2, 0}},
{{11, 3, 0}, {7, 3, 0}},
{{11, 4, 0}, {7, 4, 0}},
{{11, 5, 0}, {7, 5, 0}},
{{11, 6, 0}, {7, 6, 0}},
{{11, 7, 0}, {7, 7, 0}},
{{11, 8, 0}, {7, 8, 0}},
{{12, 0, 0}, {8, 0, 0}},
{{12, 1, 0}, {8, 1, 0}},
{{12, 2, 0}, {8, 2, 0}},
{{12, 3, 0}, {8, 3, 0}},
{{12, 4, 0}, {8, 4, 0}},
{{12, 5, 0}, {8, 5, 0}},
{{12, 6, 0}, {8, 5, 0}},
}),
[](::testing::TestParamInfo<VersionPair> data) {
se::SemanticVersion cuda_version = data.param.first;
return absl::StrCat("cuda_", cuda_version.major(),
"_", cuda_version.minor());
});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
43a8a8a6-4fb8-4471-9a04-067387bd6f07 | cpp | tensorflow/tensorflow | cpu_runtime | third_party/xla/xla/service/cpu/cpu_runtime.cc | third_party/xla/xla/service/cpu/cpu_runtime_test.cc | #include "xla/service/cpu/cpu_runtime.h"
#include <cstdarg>
#include <cstdint>
#include <cstring>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/executable_run_options.h"
#include "xla/layout_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/computation_placer.h"
#include "xla/service/cpu/collectives_interface.h"
#include "xla/service/cpu/cpu_executable_run_options.h"
#include "xla/service/cpu/in_process_collectives.h"
#include "xla/service/cpu/xfeed_manager.h"
#include "xla/service/global_device_id.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla {
namespace cpu {
namespace runtime {
XfeedManager* GetXfeedManager(int device_ordinal) {
static auto* managers = new absl::flat_hash_map<int, XfeedManager*>();
static absl::Mutex* mutex = new absl::Mutex();
absl::MutexLock lock(mutex);
auto it = managers->find(device_ordinal);
if (it == managers->end()) {
it = managers->emplace(device_ordinal, new XfeedManager()).first;
}
return it->second;
}
int GetDeviceOrdinal(const xla::ExecutableRunOptions* run_options) {
if (!run_options) {
return 0;
} else if (run_options->device_ordinal() != -1) {
return run_options->device_ordinal();
}
return run_options->stream()->parent()->device_ordinal();
}
extern const char* const kEigenMatMulF16SymbolName =
"__xla_cpu_runtime_EigenMatMulF16";
extern const char* const kEigenMatMulF32SymbolName =
"__xla_cpu_runtime_EigenMatMulF32";
extern const char* const kEigenMatMulF64SymbolName =
"__xla_cpu_runtime_EigenMatMulF64";
extern const char* const kEigenMatMulC64SymbolName =
"__xla_cpu_runtime_EigenMatMulC64";
extern const char* const kEigenMatMulC128SymbolName =
"__xla_cpu_runtime_EigenMatMulC128";
extern const char* const kEigenMatMulS32SymbolName =
"__xla_cpu_runtime_EigenMatMulS32";
extern const char* const kEigenBatchMatMulF32SymbolName =
"__xla_cpu_runtime_EigenBatchMatMulF32";
extern const char* const kMKLConv2DF32SymbolName =
"__xla_cpu_runtime_MKLConv2DF32";
extern const char* const kACLConv2DF32SymbolName =
"__xla_cpu_runtime_ACLConv2DF32";
extern const char* const kACLMatMulF32SymbolName =
"__xla_cpu_runtime_ACLMatMulF32";
extern const char* const kACLBatchMatMulF32SymbolName =
"__xla_cpu_runtime_ACLBatchMatMulF32";
extern const char* const kEigenConv2DF16SymbolName =
"__xla_cpu_runtime_EigenConv2DF16";
extern const char* const kEigenConv2DF32SymbolName =
"__xla_cpu_runtime_EigenConv2DF32";
extern const char* const kEigenConv3DF16SymbolName =
"__xla_cpu_runtime_EigenConv3DF16";
extern const char* const kEigenConv3DF32SymbolName =
"__xla_cpu_runtime_EigenConv3DF32";
extern const char* const kDuccFftSymbolName = "__xla_cpu_runtime_DuccFft";
extern const char* const kDuccSingleThreadedFftSymbolName =
"__xla_cpu_runtime_DuccSingleThreadedFft";
extern const char* const kEigenSingleThreadedMatMulF8E4M3FNSymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulF8E4M3FN";
extern const char* const kEigenSingleThreadedMatMulF8E5M2SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulF8E5M2";
extern const char* const kEigenSingleThreadedMatMulF16SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulF16";
extern const char* const kEigenSingleThreadedMatMulF32SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulF32";
extern const char* const kEigenSingleThreadedMatMulF64SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulF64";
extern const char* const kEigenSingleThreadedMatMulC64SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulC64";
extern const char* const kEigenSingleThreadedMatMulC128SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulC128";
extern const char* const kEigenSingleThreadedMatMulS32SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulS32";
extern const char* const kEigenSingleThreadedMatMulU8SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulU8";
extern const char* const kEigenSingleThreadedConv2DF16SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedConv2DF16";
extern const char* const kEigenSingleThreadedConv2DF32SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedConv2DF32";
extern const char* const kEigenSingleThreadedConv3DF16SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedConv3DF16";
extern const char* const kEigenSingleThreadedConv3DF32SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedConv3DF32";
extern const char* const kAcquireInfeedBufferForDequeueSymbolName =
"__xla_cpu_runtime_AcquireInfeedBufferForDequeue";
extern const char* const kReleaseInfeedBufferAfterDequeueSymbolName =
"__xla_cpu_runtime_ReleaseInfeedBufferAfterDequeue";
extern const char* const kAcquireOutfeedBufferForPopulationSymbolName =
"__xla_cpu_runtime_AcquireOutfeedBufferForPopulation";
extern const char* const kReleaseOutfeedBufferAfterPopulationSymbolName =
"__xla_cpu_runtime_ReleaseOutfeedBufferAfterPopulation";
extern const char* const kParallelForkJoinSymbolName =
"__xla_cpu_runtime_ParallelForkJoin";
extern const char* const kPrintfToStderrSymbolName =
"__xla_cpu_runtime_PrintfToStderr";
extern const char* const kStatusIsSuccessSymbolName =
"__xla_cpu_runtime_StatusIsSuccess";
extern const char* const kKeyValueSortSymbolName =
"__xla_cpu_runtime_KeyValueSort";
extern const char* const kTopKF32SymbolName = "__xla_cpu_runtime_TopKF32";
extern const char* const kTracingStartSymbolName =
"__xla_cpu_runtime_TracingStart";
extern const char* const kTracingEndSymbolName = "__xla_cpu_runtime_TracingEnd";
extern const char* const kXlaCpuRuntimeSymbolNamePrefix = "__xla_cpu_runtime_";
extern const char* const kAllReduceSymbolName = "__xla_cpu_runtime_AllReduce";
extern const char* const kAllGatherSymbolName = "__xla_cpu_runtime_AllGather";
extern const char* const kReduceScatterSymbolName =
"__xla_cpu_runtime_ReduceScatter";
extern const char* const kAllToAllSymbolName = "__xla_cpu_runtime_AllToAll";
extern const char* const kCollectivePermuteSymbolName =
"__xla_cpu_runtime_CollectivePermute";
extern const char* const kPartitionIdSymbolName =
"__xla_cpu_runtime_PartitionId";
extern const char* const kReplicaIdSymbolName = "__xla_cpu_runtime_ReplicaId";
extern const char* const kOneDnnMatMulSymbolName =
"__xla_cpu_runtime_OneDnnMatMul";
extern const char* const kOneDnnSoftmaxSymbolName =
"__xla_cpu_runtime_OneDnnSoftmax";
extern const char* const kOneDnnLayerNormSymbolName =
"__xla_cpu_runtime_OneDnnLayerNorm";
extern const char* const kOneDnnConvolutionSymbolName =
"__xla_cpu_runtime_OneDnnConvolution";
extern const char* const kOneDnnMatMulReorderSymbolName =
"__xla_cpu_runtime_OneDnnMatMulReorder";
extern const char* const kHandleFfiCallSymbolName =
"__xla_cpu_runtime_HandleFfiCall";
namespace {
absl::StatusOr<Shape> DecodeSelfDescribingShapeConstant(const void* shape_ptr,
int32_t size_bytes) {
ShapeProto shape_proto;
if (!shape_proto.ParseFromArray(shape_ptr, size_bytes)) {
return tsl::errors::Internal("Failed parsing the shape proto");
}
Shape shape(shape_proto);
auto status = ShapeUtil::ValidateShape(shape);
if (!status.ok()) {
return status;
}
return std::move(shape);
}
std::string ShapeString(const void* shape_ptr, int32_t shape_length) {
absl::StatusOr<Shape> shape =
DecodeSelfDescribingShapeConstant(shape_ptr, shape_length);
if (shape.ok()) {
return ShapeUtil::HumanStringWithLayout(shape.value());
}
return "<invalid shape>";
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void* AcquireInfeedBufferForDequeueImpl(const ExecutableRunOptions* run_options,
int32_t buffer_length,
const void* shape,
int32_t shape_length) {
int device_ordinal = GetDeviceOrdinal(run_options);
VLOG(2) << "AcquireInfeedBufferForDequeue: "
<< ShapeString(shape, shape_length) << " on stream executor "
<< device_ordinal;
XfeedManager* xfeed = GetXfeedManager(device_ordinal);
XfeedBuffer* buffer = xfeed->infeed()->BlockingDequeueBuffer();
CHECK_EQ(buffer->length(), buffer_length)
<< "XLA program infeed request buffer size " << buffer_length
<< " did not match the runtime's infed buffer length " << buffer->length()
<< "; program reports desired shape: "
<< ShapeString(shape, shape_length);
return buffer->data();
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void ReleaseInfeedBufferAfterDequeueImpl(
const ExecutableRunOptions* run_options, int32_t buffer_length,
void* buffer_ptr, const void* shape_ptr, int32_t shape_length) {
int device_ordinal = GetDeviceOrdinal(run_options);
VLOG(2) << "ReleaseInfeedBufferAfterDeque: "
<< ShapeString(shape_ptr, shape_length) << " on stream executor "
<< device_ordinal;
XfeedManager* xfeed = GetXfeedManager(device_ordinal);
absl::StatusOr<Shape> shape =
DecodeSelfDescribingShapeConstant(shape_ptr, shape_length);
xfeed->infeed()->ReleaseCurrentBuffer(buffer_length, buffer_ptr,
std::move(shape));
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void* AcquireOutfeedBufferForPopulationImpl(
const ExecutableRunOptions* run_options, int32_t buffer_length,
const void* shape_ptr, int32_t shape_length) {
int device_ordinal = GetDeviceOrdinal(run_options);
VLOG(2) << "AcquireOutfeedBufferForPopulation: "
<< ShapeString(shape_ptr, shape_length) << " on stream executor "
<< device_ordinal;
XfeedManager* xfeed = GetXfeedManager(device_ordinal);
XfeedBuffer* buffer = xfeed->outfeed()->BlockingDequeueBuffer();
CHECK_EQ(buffer->length(), buffer_length)
<< "XLA program outfeed request buffer size " << buffer_length
<< " did not match the runtime's outfeed buffer length "
<< buffer->length() << "; program reports outfed shape: "
<< ShapeString(shape_ptr, shape_length);
return buffer->data();
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void ReleaseOutfeedBufferAfterPopulationImpl(
const ExecutableRunOptions* run_options, int32_t buffer_length,
void* buffer_ptr, const void* shape_ptr, int32_t shape_length) {
int device_ordinal = GetDeviceOrdinal(run_options);
VLOG(2) << "ReleaseOutfeedBufferAfterPopulation: "
<< ShapeString(shape_ptr, shape_length) << " on stream executor "
<< device_ordinal;
XfeedManager* xfeed = GetXfeedManager(device_ordinal);
absl::StatusOr<Shape> shape =
DecodeSelfDescribingShapeConstant(shape_ptr, shape_length);
xfeed->outfeed()->ReleaseCurrentBuffer(buffer_length, buffer_ptr,
std::move(shape));
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void ReplicaIdImpl(const ExecutableRunOptions* run_options,
void* output_buffer) {
int device_ordinal = GetDeviceOrdinal(run_options);
int32_t replica_id = run_options->device_assignment()
->ReplicaIdForDevice(GlobalDeviceId(device_ordinal))
.value();
std::memcpy(output_buffer, &replica_id, 4);
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void PartitionIdImpl(const ExecutableRunOptions* run_options,
void* output_buffer) {
int device_ordinal = GetDeviceOrdinal(run_options);
const DeviceAssignment::LogicalID logical_id =
run_options->device_assignment()
->LogicalIdForDevice(GlobalDeviceId(device_ordinal))
.value();
std::memcpy(output_buffer, &logical_id.computation_id, 4);
}
RendezvousKey GetRendezvousKey(const ExecutableRunOptions* run_options,
GlobalDeviceId device,
std::vector<ReplicaGroup> group,
int32_t channel_id_present,
std::optional<bool> use_global_device_ids,
int64_t op_id) {
const DeviceAssignment& device_assignment = *run_options->device_assignment();
RendezvousKey::CollectiveOpKind op_kind = channel_id_present
? RendezvousKey::kCrossModule
: RendezvousKey::kCrossReplica;
std::vector<GlobalDeviceId> participating_devices =
GetParticipatingDevices(GlobalDeviceId(device), device_assignment, group,
GetCollectiveOpGroupMode(channel_id_present != 0,
use_global_device_ids)
.value())
.value();
int num_local_participants = participating_devices.size();
return RendezvousKey{run_options->run_id(), std::move(participating_devices),
num_local_participants, op_kind, op_id};
}
CollectivesInterface* GetInProcessCollectivesImpl() {
static InProcessCollectives* c = new InProcessCollectives();
return c;
}
CollectivesInterface* GetCollectivesImpl(
const ExecutableRunOptions* run_options) {
if (run_options->cpu_executable_run_options() &&
run_options->cpu_executable_run_options()->collectives()) {
return run_options->cpu_executable_run_options()->collectives();
}
return GetInProcessCollectivesImpl();
}
absl::Duration DefaultCollectiveTimeout() { return absl::Minutes(30); }
absl::StatusOr<int> RankInGlobalDevices(
absl::Span<GlobalDeviceId const> devices, GlobalDeviceId device) {
auto it = absl::c_find(devices, device);
if (it == devices.end()) {
return InvalidArgument(
"Device %d not present in global devices %s.", device.value(),
absl::StrJoin(devices, ", ", [](std::string* out, GlobalDeviceId id) {
absl::StrAppend(out, id.value());
}));
}
return std::distance(devices.begin(), it);
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void AllToAllImpl(const ExecutableRunOptions* run_options,
int32_t channel_id_present, int64_t op_id,
const void* replica_groups_str,
int32_t replica_groups_str_size, int32_t num_buffers,
int64_t buffer_size, void** source_buffers,
void** destination_buffers) {
GlobalDeviceId device(GetDeviceOrdinal(run_options));
std::string_view replica_groups_serialized(
static_cast<const char*>(replica_groups_str), replica_groups_str_size);
std::vector<ReplicaGroup> group =
ParseReplicaGroupsOnly(replica_groups_serialized).value();
RendezvousKey rendezvous_key =
GetRendezvousKey(run_options, device, group, channel_id_present,
std::nullopt, op_id);
int rank = RankInGlobalDevices(rendezvous_key.global_devices, device).value();
CollectivesInterface* collectives = GetCollectivesImpl(run_options);
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(source_buffers,
sizeof(void*) * num_buffers);
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(destination_buffers,
sizeof(void*) * num_buffers);
auto communicator =
collectives->GetCommunicator(rendezvous_key.global_devices, rank).value();
TF_CHECK_OK(communicator->AllToAll(
rendezvous_key, buffer_size,
absl::Span<const void* const>(source_buffers, num_buffers),
absl::Span<void* const>(destination_buffers, num_buffers),
DefaultCollectiveTimeout()));
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void AllGatherImpl(const ExecutableRunOptions* run_options,
int32_t channel_id_present, int32_t use_global_device_ids,
int64_t op_id, const void* replica_groups_str,
int32_t replica_groups_str_size, int64_t buffer_size,
void* source_buffer, void* destination_buffer) {
GlobalDeviceId device(GetDeviceOrdinal(run_options));
std::string_view replica_groups_serialized(
static_cast<const char*>(replica_groups_str), replica_groups_str_size);
std::vector<ReplicaGroup> group =
ParseReplicaGroupsOnly(replica_groups_serialized).value();
RendezvousKey rendezvous_key =
GetRendezvousKey(run_options, device, group, channel_id_present,
use_global_device_ids, op_id);
int rank = RankInGlobalDevices(rendezvous_key.global_devices, device).value();
CollectivesInterface* collectives = GetCollectivesImpl(run_options);
auto communicator =
collectives->GetCommunicator(rendezvous_key.global_devices, rank).value();
TF_CHECK_OK(communicator->AllGather(rendezvous_key, buffer_size,
source_buffer, destination_buffer,
DefaultCollectiveTimeout()));
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void ReduceScatterImpl(const ExecutableRunOptions* run_options,
const void* replica_groups_str,
int32_t replica_groups_str_size,
int32_t channel_id_present,
int32_t use_global_device_ids, int64_t op_id,
int32_t reduction_kind, int32_t element_type,
int64_t chunk_elems, void* input_buffer,
void* output_buffer) {
GlobalDeviceId device(GetDeviceOrdinal(run_options));
std::string_view replica_groups_serialized(
static_cast<const char*>(replica_groups_str), replica_groups_str_size);
std::vector<ReplicaGroup> group =
ParseReplicaGroupsOnly(replica_groups_serialized).value();
RendezvousKey rendezvous_key =
GetRendezvousKey(run_options, device, group, channel_id_present,
use_global_device_ids, op_id);
int rank = RankInGlobalDevices(rendezvous_key.global_devices, device).value();
CollectivesInterface* collectives = GetCollectivesImpl(run_options);
auto communicator =
collectives->GetCommunicator(rendezvous_key.global_devices, rank).value();
TF_CHECK_OK(communicator->ReduceScatter(
rendezvous_key, static_cast<ReductionKind>(reduction_kind),
static_cast<PrimitiveType>(element_type), chunk_elems, input_buffer,
output_buffer, DefaultCollectiveTimeout()));
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void AllReduceImpl(const ExecutableRunOptions* run_options,
const void* replica_groups_str,
int32_t replica_groups_str_size, int32_t channel_id_present,
int32_t use_global_device_ids, int64_t op_id,
int32_t reduction_kind, const void* shape_ptr,
int32_t shape_length, int32_t num_buffers,
void** input_buffers, void** output_buffers) {
GlobalDeviceId device(GetDeviceOrdinal(run_options));
std::string_view replica_groups_serialized(
static_cast<const char*>(replica_groups_str), replica_groups_str_size);
std::vector<ReplicaGroup> group =
ParseReplicaGroupsOnly(replica_groups_serialized).value();
RendezvousKey rendezvous_key =
GetRendezvousKey(run_options, device, group, channel_id_present,
use_global_device_ids, op_id);
auto shape_str = ShapeString(shape_ptr, shape_length);
VLOG(2) << "All-reduce input/output shape : " << shape_str;
Shape shape =
DecodeSelfDescribingShapeConstant(shape_ptr, shape_length).value();
CHECK((num_buffers > 1 && shape.IsTuple()) ||
(num_buffers == 1 && LayoutUtil::IsDenseArray(shape)));
int rank = RankInGlobalDevices(rendezvous_key.global_devices, device).value();
CollectivesInterface* collectives = GetCollectivesImpl(run_options);
auto communicator =
collectives->GetCommunicator(rendezvous_key.global_devices, rank).value();
for (int i = 0; i < num_buffers; i++) {
Shape subshape = num_buffers == 1 ? shape : shape.tuple_shapes(i);
TF_CHECK_OK(communicator->AllReduce(
rendezvous_key, static_cast<ReductionKind>(reduction_kind),
subshape.element_type(), ShapeUtil::ElementsIn(subshape),
input_buffers[i], output_buffers[i], DefaultCollectiveTimeout()));
}
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void CollectivePermuteImpl(const ExecutableRunOptions* run_options,
int32_t channel_id_present, int64_t op_id,
int32_t byte_size, void* input_buffer,
void* output_buffer, const void* source_target_pairs,
int32_t source_target_pairs_size) {
GlobalDeviceId device(GetDeviceOrdinal(run_options));
std::string_view source_target_pairs_serialized(
static_cast<const char*>(source_target_pairs), source_target_pairs_size);
auto pairs = absl::StrSplit(source_target_pairs_serialized, ',');
const DeviceAssignment::LogicalID logical_id =
run_options->device_assignment()->LogicalIdForDevice(device).value();
int32_t logical_device_id =
channel_id_present ? logical_id.computation_id : logical_id.replica_id;
std::optional<int> source_replica_id;
std::vector<int> copy_to;
for (auto& p : pairs) {
std::vector<std::string> mapping = absl::StrSplit(p, '=');
CHECK_EQ(mapping.size(), 2);
int from = std::stoi(mapping[0]);
int to = std::stoi(mapping[1]);
if (from == logical_device_id) {
copy_to.push_back(to);
}
if (to == logical_device_id) {
CHECK(!source_replica_id.has_value());
source_replica_id = from;
}
}
RendezvousKey rendezvous_key =
GetRendezvousKey(run_options, device, {}, channel_id_present,
std::nullopt, op_id);
int rank = RankInGlobalDevices(rendezvous_key.global_devices, device).value();
CollectivesInterface* collectives = GetCollectivesImpl(run_options);
auto communicator =
collectives->GetCommunicator(rendezvous_key.global_devices, rank).value();
TF_CHECK_OK(communicator->CollectivePermute(
rendezvous_key, byte_size, source_replica_id, copy_to, input_buffer,
output_buffer, DefaultCollectiveTimeout()));
}
}
}
}
}
extern "C" {
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY int __xla_cpu_runtime_PrintfToStderr(
const char* format, ...) {
VLOG(3) << "__xla_cpu_runtime_PrintfToStderr " << format;
va_list args;
va_start(args, format);
int result = vfprintf(stderr, format, args);
va_end(args);
return result;
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY int64_t __xla_cpu_runtime_TracingStart(
const void* , const char* name,
const char* hlo_module, int64_t program_id) {
VLOG(3) << "TracingStart " << name;
auto trace_in =
tsl::profiler::TraceMeEncode(name, {{"hlo_op", name},
{"hlo_module", hlo_module},
{"program_id", program_id}});
return tsl::profiler::TraceMe::ActivityStart(trace_in);
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_TracingEnd(
const void* , int64_t id) {
VLOG(3) << "TracingEnd " << id;
tsl::profiler::TraceMe::ActivityEnd(id);
}
void* __xla_cpu_runtime_AcquireInfeedBufferForDequeue(
const xla::ExecutableRunOptions* run_options, int32_t buffer_length,
const void* shape, int32_t shape_length) {
return xla::cpu::runtime::AcquireInfeedBufferForDequeueImpl(
run_options, buffer_length, shape, shape_length);
}
void __xla_cpu_runtime_ReleaseInfeedBufferAfterDequeue(
const xla::ExecutableRunOptions* run_options, int32_t buffer_length,
void* buffer_ptr, const void* shape_ptr, int32_t shape_length) {
return xla::cpu::runtime::ReleaseInfeedBufferAfterDequeueImpl(
run_options, buffer_length, buffer_ptr, shape_ptr, shape_length);
}
void* __xla_cpu_runtime_AcquireOutfeedBufferForPopulation(
const xla::ExecutableRunOptions* run_options, int32_t buffer_length,
const void* shape_ptr, int32_t shape_length) {
return xla::cpu::runtime::AcquireOutfeedBufferForPopulationImpl(
run_options, buffer_length, shape_ptr, shape_length);
}
void __xla_cpu_runtime_ReleaseOutfeedBufferAfterPopulation(
const xla::ExecutableRunOptions* run_options, int32_t buffer_length,
void* buffer_ptr, const void* shape_ptr, int32_t shape_length) {
return xla::cpu::runtime::ReleaseOutfeedBufferAfterPopulationImpl(
run_options, buffer_length, buffer_ptr, shape_ptr, shape_length);
}
void __xla_cpu_runtime_AllToAll(const xla::ExecutableRunOptions* run_options,
int32_t channel_id_present, int64_t op_id,
const void* replica_groups_str,
int32_t replica_groups_str_size,
int32_t num_buffers, int64_t buffer_size,
void** source_buffers,
void** destination_buffers) {
return xla::cpu::runtime::AllToAllImpl(
run_options, channel_id_present, op_id, replica_groups_str,
replica_groups_str_size, num_buffers, buffer_size, source_buffers,
destination_buffers);
}
void __xla_cpu_runtime_AllGather(const xla::ExecutableRunOptions* run_options,
int32_t channel_id_present,
int32_t use_global_device_ids, int64_t op_id,
const void* replica_groups_str,
int32_t replica_groups_str_size,
int64_t buffer_size, void* source_buffer,
void* destination_buffer) {
return xla::cpu::runtime::AllGatherImpl(
run_options, channel_id_present, use_global_device_ids, op_id,
replica_groups_str, replica_groups_str_size, buffer_size, source_buffer,
destination_buffer);
}
void __xla_cpu_runtime_ReduceScatter(
const xla::ExecutableRunOptions* run_options,
const void* replica_groups_str, int32_t replica_groups_str_size,
int32_t channel_id_present, int32_t use_global_device_ids, int64_t op_id,
int32_t reduction_kind, int32_t element_type, int64_t chunk_elems,
void* input_buffer, void* output_buffer) {
return xla::cpu::runtime::ReduceScatterImpl(
run_options, replica_groups_str, replica_groups_str_size,
channel_id_present, use_global_device_ids, op_id, reduction_kind,
element_type, chunk_elems, input_buffer, output_buffer);
}
void __xla_cpu_runtime_AllReduce(const xla::ExecutableRunOptions* run_options,
const void* replica_groups_str,
int32_t replica_groups_str_size,
int32_t channel_id_present,
int32_t use_global_device_ids, int64_t op_id,
int32_t reduction_kind, const void* shape_ptr,
int32_t shape_length, int32_t num_buffers,
void** input_buffers, void** output_buffers) {
return xla::cpu::runtime::AllReduceImpl(
run_options, replica_groups_str, replica_groups_str_size,
channel_id_present, use_global_device_ids, op_id, reduction_kind,
shape_ptr, shape_length, num_buffers, input_buffers, output_buffers);
}
void __xla_cpu_runtime_ReplicaId(const xla::ExecutableRunOptions* run_options,
void* output_buffer) {
return xla::cpu::runtime::ReplicaIdImpl(run_options, output_buffer);
}
void __xla_cpu_runtime_PartitionId(const xla::ExecutableRunOptions* run_options,
void* output_buffer) {
return xla::cpu::runtime::PartitionIdImpl(run_options, output_buffer);
}
void __xla_cpu_runtime_CollectivePermute(
const xla::ExecutableRunOptions* run_options, int32_t channel_id_present,
int64_t op_id, int32_t byte_size, void* input_buffer, void* output_buffer,
const void* source_target_pairs, int32_t source_target_pairs_size) {
return xla::cpu::runtime::CollectivePermuteImpl(
run_options, channel_id_present, op_id, byte_size, input_buffer,
output_buffer, source_target_pairs, source_target_pairs_size);
}
} | #define EIGEN_USE_THREADS
#include "xla/service/cpu/cpu_runtime.h"
#include <memory>
#include <string>
#include <tuple>
#include "absl/strings/str_format.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "xla/array2d.h"
#include "xla/client/local_client.h"
#include "xla/executable_run_options.h"
#include "xla/service/cpu/runtime_custom_call_status.h"
#include "xla/service/cpu/runtime_matmul.h"
#include "xla/service/cpu/runtime_matmul_acl.h"
#include "xla/service/cpu/runtime_single_threaded_matmul.h"
#include "xla/service/custom_call_status_internal.h"
#include "xla/types.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class CpuRuntimeTest : public ::testing::Test {};
template <typename T>
std::unique_ptr<Array2D<float>> MaybeTransposeArray2D(const Array2D<T>& array,
bool transpose) {
int64_t output_height = array.height();
int64_t output_width = array.width();
if (transpose) {
std::swap(output_width, output_height);
}
auto output = std::make_unique<Array2D<float>>(output_height, output_width);
for (int y = 0; y < array.height(); y++) {
for (int x = 0; x < array.width(); x++) {
if (transpose) {
(*output)(x, y) = array(y, x);
} else {
(*output)(y, x) = array(y, x);
}
}
}
return output;
}
void CheckMatrixMultiply(const Array2D<float>& a, const Array2D<float>& b,
const Array2D<float>& c) {
for (int i = 0; i < a.height(); ++i) {
for (int j = 0; j < b.width(); ++j) {
float sum = 0.0;
for (int k = 0; k < a.width(); ++k) {
sum += a(i, k) * b(k, j);
}
EXPECT_NEAR(sum, c(i, j), 0.01);
}
}
}
std::unique_ptr<Array2D<float>> EigenMatrixMultiply(const Array2D<float>& a,
const Array2D<float>& b,
bool transpose_lhs,
bool transpose_rhs,
bool single_threaded) {
CHECK_EQ(a.width(), b.height());
int64_t m = a.height();
int64_t n = b.width();
int64_t k = a.width();
auto a_transpose = MaybeTransposeArray2D(a, !transpose_lhs);
auto b_transpose = MaybeTransposeArray2D(b, !transpose_rhs);
auto c_transpose = std::make_unique<Array2D<float>>(n, m);
if (single_threaded) {
__xla_cpu_runtime_EigenSingleThreadedMatMulF32(
nullptr, c_transpose->data(), a_transpose->data(), b_transpose->data(),
m, n, k, transpose_lhs, transpose_rhs);
} else {
tsl::thread::ThreadPool pool(tsl::Env::Default(), "XLAEigen", 2);
Eigen::ThreadPoolDevice device(pool.AsEigenThreadPool(), pool.NumThreads());
ExecutableRunOptions run_options;
run_options.set_intra_op_thread_pool(&device);
__xla_cpu_runtime_EigenMatMulF32(&run_options, c_transpose->data(),
a_transpose->data(), b_transpose->data(),
m, n, k, transpose_lhs, transpose_rhs);
}
return MaybeTransposeArray2D(*c_transpose, true);
}
struct MatMulShape {
int64_t m;
int64_t k;
int64_t n;
};
MatMulShape MatMulShapes[] = {
MatMulShape{2, 2, 3}, MatMulShape{256, 512, 1024},
MatMulShape{128, 128, 1}, MatMulShape{1, 128, 128},
MatMulShape{1, 32, 128}, MatMulShape{1, 32, 16},
MatMulShape{32, 16, 1}, MatMulShape{32, 128, 1},
};
using MatMulTestParam = std::tuple<MatMulShape, bool, bool, bool>;
class EigenMatMulTest : public CpuRuntimeTest,
public ::testing::WithParamInterface<MatMulTestParam> {
public:
static std::string Name(
const ::testing::TestParamInfo<MatMulTestParam>& info) {
MatMulShape shape = std::get<0>(info.param);
bool transpose_lhs = std::get<1>(info.param);
bool transpose_rhs = std::get<2>(info.param);
bool single_threaded = std::get<3>(info.param);
return absl::StrFormat("EigenMatMul_%d_%d_%d_%s%s%s_threaded", shape.m,
shape.k, shape.n, transpose_lhs ? "Tlhs_" : "",
transpose_rhs ? "Trhs_" : "",
single_threaded ? "single" : "multi");
}
};
TEST_P(EigenMatMulTest, DoIt) {
MatMulShape shape = std::get<0>(GetParam());
bool transpose_lhs = std::get<1>(GetParam());
bool transpose_rhs = std::get<2>(GetParam());
bool single_threaded = std::get<3>(GetParam());
auto a = MakeLinspaceArray2D(0.0, 1.0, shape.m, shape.k);
auto b = MakeLinspaceArray2D(-2.0, 2.0, shape.k, shape.n);
auto c = EigenMatrixMultiply(*a, *b, transpose_lhs, transpose_rhs,
single_threaded);
CheckMatrixMultiply(*a, *b, *c);
}
INSTANTIATE_TEST_SUITE_P(EigenMatMulTestInstantiaion, EigenMatMulTest,
::testing::Combine(::testing::ValuesIn(MatMulShapes),
::testing::Bool(),
::testing::Bool(),
::testing::Bool()),
EigenMatMulTest::Name);
TEST_F(CpuRuntimeTest, SuccessStatus) {
XlaCustomCallStatus success_status;
ASSERT_TRUE(__xla_cpu_runtime_StatusIsSuccess(&success_status));
}
TEST_F(CpuRuntimeTest, FailureStatus) {
XlaCustomCallStatus success_status;
XlaCustomCallStatusSetFailure(&success_status, "Failed", 6);
ASSERT_FALSE(__xla_cpu_runtime_StatusIsSuccess(&success_status));
}
TEST_F(CpuRuntimeTest, GetDeviceOrdinalWhenRunOptionsEmpty) {
EXPECT_EQ(cpu::runtime::GetDeviceOrdinal(nullptr), 0);
}
TEST_F(CpuRuntimeTest, GetDeviceOrdinalWhenSetInRunOptions) {
ExecutableRunOptions run_options;
ASSERT_EQ(run_options.device_ordinal(), -1);
run_options.set_device_ordinal(3);
EXPECT_EQ(cpu::runtime::GetDeviceOrdinal(&run_options), 3);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/cpu_runtime.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/cpu_runtime_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a93ebaaa-58e6-4f7b-8f29-a489e31d4189 | cpp | tensorflow/tensorflow | ir_emitter2 | third_party/xla/xla/service/cpu/ir_emitter2.cc | third_party/xla/xla/service/cpu/ir_emitter2_test.cc | #include "xla/service/cpu/ir_emitter2.h"
#include <array>
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/btree_map.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/CodeGen.h"
#include "xla/cpu_function_runtime.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/layout_util.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/dot_op_emitter.h"
#include "xla/service/cpu/elemental_math_emitter.h"
#include "xla/service/cpu/ir_emitter.h"
#include "xla/service/cpu/parallel_loop_emitter.h"
#include "xla/service/cpu/shape_partition.h"
#include "xla/service/elemental_ir_emitter.h"
#include "xla/service/llvm_ir/dynamic_update_slice_util.h"
#include "xla/service/llvm_ir/fused_ir_emitter.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/service/llvm_ir/loop_emitter.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::cpu {
namespace {
static llvm::StructType* Dim3StructTy(llvm::LLVMContext& ctx,
std::string_view name) {
auto* i64 = llvm::IntegerType::getInt64Ty(ctx);
return llvm::StructType::create(name, i64, i64, i64);
}
static llvm::StructType* KernelThreadDimTy(llvm::LLVMContext& ctx) {
return Dim3StructTy(ctx, "SE_HOST_KernelThreadDim");
}
static llvm::StructType* KernelThreadTy(llvm::LLVMContext& ctx) {
return Dim3StructTy(ctx, "SE_HOST_KernelThread");
}
static llvm::StructType* KernelArgTy(llvm::LLVMContext& ctx) {
auto* ptr = llvm::PointerType::getUnqual(ctx);
auto* i64 = llvm::IntegerType::getInt64Ty(ctx);
return llvm::StructType::create("SE_HOST_KernelArg", ptr, i64);
}
static llvm::StructType* KernelCallFrameTy(llvm::LLVMContext& ctx) {
auto* ptr = llvm::PointerType::getUnqual(ctx);
auto* i64 = llvm::IntegerType::getInt64Ty(ctx);
return llvm::StructType::create("SE_HOST_KernelCallFrame", ptr, ptr, i64,
ptr);
}
static llvm::FunctionType* KernelFunctionTy(llvm::LLVMContext& ctx) {
return llvm::FunctionType::get(llvm::PointerType::getUnqual(ctx),
llvm::PointerType::getUnqual(ctx),
false);
}
}
class IrEmitter2::ElementalIrEmitter : public xla::ElementalIrEmitter {
public:
ElementalIrEmitter(llvm::Module* module, llvm::IRBuilder<>* b,
const HloModule* hlo_module, IrEmitter* nested_ir_emitter,
bool fast_min_max)
: xla::ElementalIrEmitter(
module, b,
Options{true}),
hlo_module_(hlo_module),
nested_ir_emitter_(nested_ir_emitter),
fast_min_max_(fast_min_max) {}
protected:
absl::StatusOr<llvm::Value*> EmitAtan2(PrimitiveType prim_type,
llvm::Value* lhs, llvm::Value* rhs,
absl::string_view) override {
return xla::cpu::EmitAtan2(module(), *b(), prim_type, lhs, rhs);
}
absl::StatusOr<llvm::Value*> EmitTanh(PrimitiveType prim_type,
llvm::Value* value) override {
return xla::cpu::EmitTanh(module(), *b(), prim_type, value);
}
absl::StatusOr<llvm::Value*> EmitErf(PrimitiveType prim_type,
llvm::Value* value) override {
return xla::cpu::EmitErf(module(), *b(), prim_type, value);
}
absl::StatusOr<std::vector<llvm::Value*>> EmitThreadLocalCall(
const HloComputation& callee, absl::Span<llvm::Value* const> parameters,
absl::string_view name, bool is_reducer) override {
if (!hlo_module_ || !hlo_module_->has_schedule()) {
return absl::InternalError(
"HLO module must be scheduled to emit thread local computation.");
}
auto emit_computation = [&](const HloComputation* computation) {
if (!nested_ir_emitter_->is_computation_emitted(*computation,
is_reducer)) {
VLOG(2) << "Emit nested computation: " << computation->name();
TF_RETURN_IF_ERROR(
nested_ir_emitter_
->EmitComputation(
const_cast<HloComputation*>(computation), name, false,
hlo_module_->schedule()
.sequence(computation)
.instructions(),
is_reducer,
{llvm::Attribute::AlwaysInline})
.status());
}
return absl::OkStatus();
};
for (HloComputation* embedded : callee.MakeEmbeddedComputationsList()) {
if (embedded->IsFusionComputation()) continue;
TF_RETURN_IF_ERROR(emit_computation(embedded));
}
TF_RETURN_IF_ERROR(emit_computation(&callee));
VLOG(2) << "Emit thread local call to: " << callee.name();
nested_ir_emitter_->b()->SetInsertPoint(b()->GetInsertPoint());
auto values = nested_ir_emitter_->EmitThreadLocalCall(
callee, parameters, name, is_reducer, false);
return values;
}
bool fast_min_max() override { return fast_min_max_; }
private:
const HloModule* hlo_module_;
IrEmitter* nested_ir_emitter_;
bool fast_min_max_;
};
IrEmitter2::IrEmitter2(const HloModule& hlo_module, llvm::Module* module,
IrEmitter* nested_ir_emitter)
: hlo_module_(hlo_module),
module_(module),
nested_ir_emitter_(nested_ir_emitter),
call_frame_ty_(KernelCallFrameTy(module_->getContext())),
thread_dims_ty_(KernelThreadDimTy(module_->getContext())),
thread_ty_(KernelThreadTy(module_->getContext())),
arg_ty_(KernelArgTy(module_->getContext())) {}
bool IrEmitter2::fast_min_max() const {
return hlo_module_.config().debug_options().xla_cpu_enable_fast_min_max();
}
IrEmitter2::KernelInfo::KernelInfo(KernelPrototype prototype,
const se::BlockDim& block_dims,
const se::ThreadDim& thread_dims)
: name(prototype.function->getName().str()),
block_dims(block_dims),
thread_dims(thread_dims),
invariant_arguments(std::move(prototype.invariant_arguments)) {}
absl::StatusOr<IrEmitter2::KernelInfo> IrEmitter2::EmitElementalHostKernel(
const HloInstruction* instr) {
VLOG(2) << "Emit elemental host kernel: " << instr->name();
TF_ASSIGN_OR_RETURN(KernelPrototype kernel_prototype,
EmitKernelPrototype(instr));
llvm::IRBuilder<> b(module_->getContext());
b.SetInsertPoint(kernel_prototype.function->getEntryBlock().getTerminator());
ElementalIrEmitter::HloToElementGeneratorMap operand_to_generator;
for (int64_t i = 0; i < instr->operand_count(); ++i) {
const HloInstruction* operand = instr->operand(i);
operand_to_generator[operand] = [&, i](const llvm_ir::IrArray::Index& idx) {
return kernel_prototype.arguments[i].EmitReadArrayElement(idx, &b);
};
}
ElementalIrEmitter elemental_emitter(module_, &b, &hlo_module_,
nested_ir_emitter_, fast_min_max());
llvm_ir::ElementGenerator element_generator =
elemental_emitter.MakeElementGenerator(instr, operand_to_generator);
TF_ASSIGN_OR_RETURN(
se::ThreadDim thread_dims,
EmitElementalLoops(b, instr, kernel_prototype, element_generator));
return kernels_.emplace_back(
KernelInfo(std::move(kernel_prototype), se::BlockDim(), thread_dims));
}
absl::StatusOr<IrEmitter2::KernelInfo> IrEmitter2::EmitPadHostKernel(
const HloInstruction* pad) {
VLOG(2) << "Emit Pad host kernel.";
TF_ASSIGN_OR_RETURN(KernelPrototype kernel_prototype,
EmitKernelPrototype(pad));
llvm_ir::IrArray operand_array = kernel_prototype.arguments[0];
llvm_ir::IrArray padvalue_array = kernel_prototype.arguments[1];
llvm_ir::IrArray output_array = kernel_prototype.results[0];
llvm::LLVMContext& ctx = module_->getContext();
llvm::IRBuilder<> b(ctx);
auto builder_overwrite = nested_ir_emitter_->WithBuilder(b);
nested_ir_emitter_->PushComputeFunction(
&b, module_,
0, kernel_prototype.function,
nullptr, kernel_prototype.return_block);
TF_RETURN_IF_ERROR(nested_ir_emitter_->HandlePad(
const_cast<HloInstruction*>(pad), operand_array, padvalue_array,
output_array));
nested_ir_emitter_->PopComputeFunction();
return kernels_.emplace_back(
KernelInfo(std::move(kernel_prototype), se::BlockDim(), se::ThreadDim()));
}
absl::StatusOr<IrEmitter2::KernelInfo> IrEmitter2::EmitFusionHostKernel(
const HloFusionInstruction* fusion) {
VLOG(2) << "Emit fusion host kernel: " << fusion->name();
if (fusion->fusion_kind() == HloInstruction::FusionKind::kOutput) {
return EmitDotFusionHostKernel(fusion);
}
if (fusion->fusion_kind() != HloInstruction::FusionKind::kLoop) {
return Internal("Unsupported loop fusion kind for instruction: %s",
fusion->ToString());
}
TF_ASSIGN_OR_RETURN(KernelPrototype kernel_prototype,
EmitKernelPrototype(fusion));
llvm::IRBuilder<> b(module_->getContext());
b.SetInsertPoint(kernel_prototype.function->getEntryBlock().getTerminator());
ElementalIrEmitter elemental_emitter(module_, &b, &hlo_module_,
nested_ir_emitter_, fast_min_max());
FusedIrEmitter fused_emitter(elemental_emitter);
for (int i = 0; i < fusion->operand_count(); i++) {
fused_emitter.BindGenerator(
*fusion->fused_parameter(i), [&, i](llvm_ir::IrArray::Index idx) {
return kernel_prototype.arguments[i].EmitReadArrayElement(idx, &b);
});
}
if (llvm_ir::CanEmitFusedDynamicUpdateSliceInPlace(
const_cast<HloFusionInstruction*>(fusion),
nested_ir_emitter_->assignment())) {
TF_RETURN_IF_ERROR(llvm_ir::EmitFusedDynamicUpdateSliceInPlace(
const_cast<HloFusionInstruction*>(fusion), kernel_prototype.results[0],
&fused_emitter, &b));
return kernels_.emplace_back(KernelInfo(std::move(kernel_prototype),
se::BlockDim(), se::ThreadDim()));
}
TF_ASSIGN_OR_RETURN(
auto element_generator,
fused_emitter.GetGenerator(*fusion->fused_expression_root()));
TF_ASSIGN_OR_RETURN(
se::ThreadDim thread_dims,
EmitElementalLoops(b, fusion, kernel_prototype, element_generator));
return kernels_.emplace_back(
KernelInfo(std::move(kernel_prototype), se::BlockDim(), thread_dims));
}
absl::StatusOr<IrEmitter2::KernelInfo> IrEmitter2::EmitReductionHostKernel(
const HloInstruction* instr) {
VLOG(2) << "Emit reduction host kernel: " << instr->name();
return EmitElementalHostKernel(instr);
}
static bool IsDotCodegenStrategy(DotImplementationStrategy strategy) {
static std::array<DotImplementationStrategy, 3> kDotCodegenStrategies = {
DotImplementationStrategy::kNaiveLlvmIr,
DotImplementationStrategy::kTiledLlvmIrGemm,
DotImplementationStrategy::kTiledLlvmIrGemv,
};
return absl::c_find(kDotCodegenStrategies, strategy) !=
kDotCodegenStrategies.end();
}
absl::StatusOr<IrEmitter2::KernelInfo> IrEmitter2::EmitDotHostKernel(
const HloInstruction* instr) {
VLOG(2) << "Emit dot host kernel: " << instr->name();
DotImplementationStrategy strategy = GetDotImplementationStrategy(
hlo_module_.config(), *instr,
nested_ir_emitter_->target_machine_features());
if (!IsDotCodegenStrategy(strategy)) {
return Internal("Unsupported dot implementation strategy");
}
TF_ASSIGN_OR_RETURN(KernelPrototype kernel_prototype,
EmitKernelPrototype(instr));
llvm::IRBuilder<> b(module_->getContext());
b.SetInsertPoint(kernel_prototype.function->getEntryBlock().getTerminator());
llvm_ir::IrArray lhs_array = kernel_prototype.arguments[0];
llvm_ir::IrArray rhs_array = kernel_prototype.arguments[1];
llvm_ir::IrArray target_array = kernel_prototype.results[0];
TF_RETURN_IF_ERROR(EmitDotOperation(
*instr, target_array, lhs_array, rhs_array,
nullptr, nullptr, &b,
hlo_module_.config(), nested_ir_emitter_->target_machine_features(),
false));
return kernels_.emplace_back(
KernelInfo(std::move(kernel_prototype), se::BlockDim(), se::ThreadDim()));
}
absl::StatusOr<IrEmitter2::KernelInfo> IrEmitter2::EmitConcatenateHostKernel(
const HloInstruction* instr) {
VLOG(2) << "Emit concatenate host kernel: " << instr->name();
auto fast_impl_reason = CanDoFastConcatenate(instr);
if (fast_impl_reason.ok()) {
VLOG(1) << "Emitting fast concatenate for " << instr->ToString() << ": "
<< fast_impl_reason.message();
TF_ASSIGN_OR_RETURN(KernelPrototype kernel_prototype,
EmitKernelPrototype(instr));
llvm::IRBuilder<> ir_builder(module_->getContext());
ir_builder.SetInsertPoint(
kernel_prototype.function->getEntryBlock().getTerminator());
llvm_ir::IrArray output_array = kernel_prototype.results[0];
TF_RETURN_IF_ERROR(::xla::cpu::EmitFastConcatenate(
instr, kernel_prototype.arguments, output_array, module_, ir_builder));
return kernels_.emplace_back(KernelInfo(std::move(kernel_prototype),
se::BlockDim(), se::ThreadDim()));
}
VLOG(1) << "Could not emit fast concatenate for " << instr->ToString() << ": "
<< fast_impl_reason.message();
return EmitElementalHostKernel(instr);
}
absl::StatusOr<IrEmitter2::KernelInfo> IrEmitter2::EmitDotFusionHostKernel(
const HloFusionInstruction* fusion) {
VLOG(2) << "Emit dot fusion host kernel: " << fusion->name();
const HloInstruction* add = fusion->fused_expression_root();
if (add->opcode() != HloOpcode::kAdd) {
return Internal("Dot fusion supports only `add` root instruction");
}
bool is_dot_operand0 = add->operand(0)->opcode() == HloOpcode::kDot;
bool is_dot_operand1 = add->operand(1)->opcode() == HloOpcode::kDot;
if (is_dot_operand0 == is_dot_operand1) {
return Internal("Dot fusion root instruction must have single dot operand");
}
int64_t dot_op_index = is_dot_operand0 ? 0 : 1;
int64_t addend_op_index = 1 - dot_op_index;
const HloInstruction* dot = add->operand(dot_op_index);
DotImplementationStrategy strategy = GetDotImplementationStrategy(
hlo_module_.config(), *dot,
nested_ir_emitter_->target_machine_features());
if (!IsDotCodegenStrategy(strategy)) {
return Internal("Unsupported dot implementation strategy");
}
int64_t dot_lhs_pnum = dot->operand(0)->parameter_number();
int64_t dot_rhs_pnum = dot->operand(1)->parameter_number();
int64_t addend_pnum = add->operand(addend_op_index)->parameter_number();
TF_ASSIGN_OR_RETURN(KernelPrototype kernel_prototype,
EmitKernelPrototype(fusion));
llvm::IRBuilder<> b(module_->getContext());
b.SetInsertPoint(kernel_prototype.function->getEntryBlock().getTerminator());
llvm_ir::IrArray lhs_array = kernel_prototype.arguments[dot_lhs_pnum];
llvm_ir::IrArray rhs_array = kernel_prototype.arguments[dot_rhs_pnum];
llvm_ir::IrArray addend_array = kernel_prototype.arguments[addend_pnum];
llvm_ir::IrArray target_array = kernel_prototype.results[0];
TF_RETURN_IF_ERROR(EmitDotOperation(
*dot, target_array, lhs_array, rhs_array, &addend_array,
nullptr, &b, hlo_module_.config(),
nested_ir_emitter_->target_machine_features(),
false));
return kernels_.emplace_back(
KernelInfo(std::move(kernel_prototype), se::BlockDim(), se::ThreadDim()));
}
absl::StatusOr<IrEmitter2::KernelInfo> IrEmitter2::EmitSliceToDynamicHostKernel(
const HloInstruction* instr) {
VLOG(2) << "Emit slice-to-dynamic host kernel: " << instr->name();
TF_ASSIGN_OR_RETURN(KernelPrototype kernel_prototype,
EmitKernelPrototype(instr));
llvm::IRBuilder<> ir_builder(module_->getContext());
ir_builder.SetInsertPoint(
kernel_prototype.function->getEntryBlock().getTerminator());
llvm_ir::IrArray output_array = kernel_prototype.results[0];
auto guard = nested_ir_emitter_->WithBuilder(ir_builder);
TF_RETURN_IF_ERROR(nested_ir_emitter_->EmitSliceToDynamic(
instr, kernel_prototype.arguments, output_array));
return kernels_.emplace_back(
KernelInfo(std::move(kernel_prototype), se::BlockDim(), se::ThreadDim()));
}
absl::StatusOr<IrEmitter2::KernelInfo>
IrEmitter2::EmitSelectAndScatterHostKernel(const HloInstruction* instr) {
TF_ASSIGN_OR_RETURN(KernelPrototype kernel_prototype,
EmitKernelPrototype(instr));
llvm_ir::IrArray operand_array = kernel_prototype.arguments[0];
llvm_ir::IrArray source_array = kernel_prototype.arguments[1];
llvm_ir::IrArray output_array = kernel_prototype.results[0];
TF_RETURN_IF_ERROR(nested_ir_emitter_->HandleSelectAndScatter(
const_cast<HloInstruction*>(instr), operand_array, source_array,
output_array));
return kernels_.emplace_back(
KernelInfo(std::move(kernel_prototype), se::BlockDim(), se::ThreadDim()));
}
absl::StatusOr<IrEmitter2::KernelInfo>
IrEmitter2::EmitDynamicUpdateSliceHostKernel(const HloInstruction* instr) {
if (llvm_ir::CanUpdateDynamicSliceInPlace(const_cast<HloInstruction*>(instr),
nested_ir_emitter_->assignment())) {
VLOG(2) << "Emit in-place dynamic-update-slice kernel: " << instr->name();
TF_ASSIGN_OR_RETURN(KernelPrototype kernel_prototype,
EmitKernelPrototype(instr));
llvm::IRBuilder<> b(module_->getContext());
b.SetInsertPoint(
kernel_prototype.function->getEntryBlock().getTerminator());
TF_RETURN_IF_ERROR(llvm_ir::EmitDynamicUpdateSliceInPlace(
kernel_prototype.arguments, kernel_prototype.results.front(),
llvm_ir::IrName(instr, "in_place"), &b));
return kernels_.emplace_back(KernelInfo(std::move(kernel_prototype),
se::BlockDim(), se::ThreadDim()));
}
return EmitElementalHostKernel(instr);
}
absl::StatusOr<IrEmitter2::ComparatorInfo> IrEmitter2::EmitSortComparator(
const HloInstruction* instr) {
HloComputation* comparator = instr->to_apply();
auto info = absl::c_find_if(comparators_, [&](const ComparatorInfo& info) {
return info.name == comparator->name();
});
if (info != comparators_.end()) return *info;
auto schedule = comparator->MakeInstructionPostOrder();
TF_ASSIGN_OR_RETURN(llvm::Function * comparator_function,
nested_ir_emitter_->EmitComputation(
comparator, comparator->name(),
true, schedule,
false));
comparator_function->setUWTableKind(llvm::UWTableKind::Default);
return comparators_.emplace_back(
ComparatorInfo{comparator_function->getName().str()});
}
absl::StatusOr<BufferAllocation::Slice> IrEmitter2::GetAllocationSlice(
const HloInstruction* instruction, const ShapeIndex& index) {
return nested_ir_emitter_->assignment().GetUniqueSlice(instruction, index);
}
absl::StatusOr<std::vector<IrEmitter2::KernelParameter>>
IrEmitter2::GetKernelArgumentsParameters(const HloInstruction* instruction) {
std::vector<KernelParameter> arguments;
for (HloInstruction* operand : instruction->operands()) {
for (auto& indexed : ShapeUtil::GetLeafShapes(operand->shape())) {
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice slice,
GetAllocationSlice(operand, indexed.index));
arguments.push_back(KernelParameter{indexed.shape, slice});
}
}
return arguments;
}
absl::StatusOr<std::vector<IrEmitter2::KernelParameter>>
IrEmitter2::GetKernelResultsParameters(const HloInstruction* instruction) {
std::vector<KernelParameter> results;
for (auto& indexed : ShapeUtil::GetLeafShapes(instruction->shape())) {
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice slice,
GetAllocationSlice(instruction, indexed.index));
results.push_back(KernelParameter{indexed.shape, slice});
}
return results;
}
absl::Status IrEmitter2::VerifyKernelParameters(
absl::Span<const KernelParameter> arguments,
absl::Span<const KernelParameter> results) {
for (size_t i = 0; i < arguments.size(); ++i) {
for (size_t j = i + 1; j < arguments.size(); ++j) {
const KernelParameter& a = arguments[i];
const KernelParameter& b = arguments[j];
if (a.slice != b.slice && a.slice.OverlapsWith(b.slice)) {
return Internal(
"Kernel arguments must not overlap: result #%d (%s) overlaps "
"with result #%d (%s)",
i, a.slice.ToString(), j, b.slice.ToString());
}
}
}
for (size_t i = 0; i < results.size(); ++i) {
for (size_t j = i + 1; j < results.size(); ++j) {
const KernelParameter& a = results[i];
const KernelParameter& b = results[j];
if (a.slice.OverlapsWith(b.slice)) {
return Internal(
"Kernel results must not overlap: result #%d (%s) overlaps "
"with result #%d (%s)",
i, a.slice.ToString(), j, b.slice.ToString());
}
}
}
for (size_t i = 0; i < results.size(); ++i) {
for (size_t j = 0; j < arguments.size(); ++j) {
const KernelParameter& result = results[i];
const KernelParameter& argument = arguments[j];
if (result.slice.OverlapsWith(argument.slice) &&
result.slice != argument.slice) {
return Internal(
"Kernel results must not partially overlap with arguments: result "
"#%d (%s) overlaps with argument #%d (%s)",
i, result.slice.ToString(), j, argument.slice.ToString());
break;
}
}
}
return absl::OkStatus();
}
IrEmitter2::KernelThreadDims IrEmitter2::EmitKernelThreadDims(
llvm::IRBuilder<>& b, llvm::Value* call_frame) {
auto* td_gep = b.CreateStructGEP(call_frame_ty_, call_frame, 0, "tdims_gep");
auto* tdims = b.CreateLoad(b.getPtrTy(), td_gep, "tdims");
auto* x_gep = b.CreateStructGEP(thread_dims_ty_, tdims, 0, "tdim_x_gep");
auto* y_gep = b.CreateStructGEP(thread_dims_ty_, tdims, 1, "tdim_y_gep");
auto* z_gep = b.CreateStructGEP(thread_dims_ty_, tdims, 2, "tdim_z_gep");
return {b.CreateLoad(b.getInt64Ty(), x_gep, "tdim_x"),
b.CreateLoad(b.getInt64Ty(), y_gep, "tdim_y"),
b.CreateLoad(b.getInt64Ty(), z_gep, "tdim_z")};
}
IrEmitter2::KernelThread IrEmitter2::EmitKernelThread(llvm::IRBuilder<>& b,
llvm::Value* call_frame) {
auto* t_gep = b.CreateStructGEP(call_frame_ty_, call_frame, 1, "tid_gep");
auto* tids = b.CreateLoad(b.getPtrTy(), t_gep, "tids");
auto* x_gep = b.CreateStructGEP(thread_ty_, tids, 0, "tid_x_gep");
auto* y_gep = b.CreateStructGEP(thread_ty_, tids, 1, "tid_y_gep");
auto* z_gep = b.CreateStructGEP(thread_ty_, tids, 2, "tid_z_gep");
return {b.CreateLoad(b.getInt64Ty(), x_gep, "tid_x"),
b.CreateLoad(b.getInt64Ty(), y_gep, "tid_y"),
b.CreateLoad(b.getInt64Ty(), z_gep, "tid_z")};
}
llvm_ir::IrArray IrEmitter2::EmitKernelArgument(llvm::IRBuilder<>& b,
llvm::Value* call_frame,
int64_t index,
const Shape& shape) {
llvm::Type* ptr = llvm::PointerType::get(b.getContext(), 0);
std::string name = absl::StrCat("arg", index);
auto* args_gep = b.CreateStructGEP(call_frame_ty_, call_frame, 3, "args_gep");
auto* args = b.CreateLoad(ptr, args_gep, "args");
auto* data_gep = b.CreateConstGEP2_32(arg_ty_, args, index, 0, name + "_gep");
auto* data = b.CreateLoad(ptr, data_gep, name);
llvm_ir::SetAlignmentMetadataForLoad(data, cpu_function_runtime::MinAlign());
IrEmitter::AttachDereferenceableMetadataForLoad(data, ByteSizeOf(shape));
AttachInvariantLoadMetadataForLoad(data);
return llvm_ir::IrArray(data, llvm_ir::ShapeToIrType(shape, module_), shape);
}
absl::StatusOr<IrEmitter2::KernelPrototype> IrEmitter2::EmitKernelPrototype(
std::string_view name, absl::Span<const KernelParameter> arguments,
absl::Span<const KernelParameter> results) {
VLOG(3) << "Emit kernel prototype: " << name
<< ", #arguments=" << arguments.size()
<< ", #results=" << results.size();
for (const KernelParameter& argument : arguments) {
VLOG(3) << " argument: " << argument.shape.ToString(true) << " in "
<< argument.slice.ToString();
}
for (const KernelParameter& result : results) {
VLOG(3) << " result: " << result.shape.ToString(true) << " in "
<< result.slice.ToString();
}
TF_RETURN_IF_ERROR(VerifyKernelParameters(arguments, results));
llvm::LLVMContext& ctx = module_->getContext();
llvm::MDBuilder mb(ctx);
llvm::IRBuilder<> b(ctx);
llvm::MDNode* domain = mb.createAliasScopeDomain(
absl::StrFormat("XLA host kernel %s AA domain", name));
absl::btree_map<BufferAllocation::Slice, llvm::MDNode*> alias_scopes;
for (const KernelParameter& result : results) {
if (result.slice.allocation()->is_parameter_aliased_with_output()) {
continue;
}
alias_scopes[result.slice] = mb.createAliasScope(
absl::StrFormat("result slice: %s", result.slice.ToString()), domain);
}
auto get_alias_scope = [&](BufferAllocation::Slice slice) -> llvm::MDNode* {
auto it = alias_scopes.find(slice);
return it == alias_scopes.end() ? nullptr
: llvm::MDNode::get(ctx, it->second);
};
auto get_noalias = [&](BufferAllocation::Slice slice) -> llvm::MDNode* {
llvm::SmallVector<llvm::Metadata*> scopes;
for (const auto& [alias_slice, alias_scope] : alias_scopes) {
if (!slice.OverlapsWith(alias_slice)) {
scopes.push_back(alias_scope);
}
}
return scopes.empty() ? nullptr : llvm::MDNode::get(ctx, scopes);
};
absl::flat_hash_set<BufferAllocation::Slice> result_slices;
result_slices.reserve(results.size());
for (const KernelParameter& result : results) {
result_slices.insert(result.slice);
}
llvm::Function* function = llvm::Function::Create(
KernelFunctionTy(ctx), llvm::GlobalValue::ExternalLinkage, name, module_);
function->setCallingConv(llvm::CallingConv::C);
function->setUWTableKind(llvm::UWTableKind::Default);
const DebugOptions& debug_options = hlo_module_.config().debug_options();
function->addFnAttr(
"prefer-vector-width",
absl::StrCat(debug_options.xla_cpu_prefer_vector_width()));
function->addFnAttr("frame-pointer", "all");
b.SetInsertPoint(llvm::BasicBlock::Create(ctx, "", function));
llvm::Value* call_frame = function->getArg(0);
KernelThreadDims kernel_thread_dims = EmitKernelThreadDims(b, call_frame);
KernelThread kernel_thread = EmitKernelThread(b, call_frame);
int64_t idx = 0;
absl::flat_hash_set<int64_t> invariant_arguments;
std::vector<llvm_ir::IrArray> ir_arguments;
for (int64_t i = 0; i < arguments.size(); ++i) {
const KernelParameter& argument = arguments[i];
auto ir_argument = EmitKernelArgument(b, call_frame, idx++, argument.shape);
if (auto* noalias = get_noalias(argument.slice)) {
ir_argument.AddNoaliasMetadata(noalias);
}
if (!result_slices.contains(argument.slice)) {
ir_argument.MarkInvariantOverWholeProgram(&ctx);
invariant_arguments.insert(i);
}
ir_arguments.push_back(std::move(ir_argument));
}
std::vector<llvm_ir::IrArray> ir_results;
for (const KernelParameter& result : results) {
auto ir_result = EmitKernelArgument(b, call_frame, idx++, result.shape);
if (auto* noalias = get_noalias(result.slice)) {
ir_result.AddNoaliasMetadata(noalias);
}
if (auto* alias_scope = get_alias_scope(result.slice)) {
ir_result.AddAliasScopeMetadata(alias_scope);
}
ir_results.push_back(std::move(ir_result));
}
llvm::BasicBlock* return_block =
llvm::BasicBlock::Create(ctx, "return", function);
b.CreateBr(return_block);
b.SetInsertPoint(return_block);
b.CreateRet(
llvm::ConstantPointerNull::get(llvm::PointerType::getUnqual(ctx)));
return KernelPrototype{function,
return_block,
kernel_thread_dims,
kernel_thread,
std::move(ir_arguments),
std::move(ir_results),
std::move(invariant_arguments)};
}
absl::StatusOr<IrEmitter2::KernelPrototype> IrEmitter2::EmitKernelPrototype(
const HloInstruction* instr) {
TF_ASSIGN_OR_RETURN(std::vector<KernelParameter> arguments,
GetKernelArgumentsParameters(instr));
TF_ASSIGN_OR_RETURN(std::vector<KernelParameter> results,
GetKernelResultsParameters(instr));
return EmitKernelPrototype(instr->name(), std::move(arguments),
std::move(results));
}
std::optional<IrEmitter2::ParallelConfig> IrEmitter2::GetParallelConfig(
const HloInstruction* instr) {
auto backend_config = instr->backend_config<BackendConfig>();
if (!backend_config.ok() ||
backend_config->outer_dimension_partitions().empty()) {
return std::nullopt;
}
ParallelConfig config;
config.outer_dimension_partitions.assign(
backend_config->outer_dimension_partitions().begin(),
backend_config->outer_dimension_partitions().end());
return config;
}
absl::Status IrEmitter2::CanDoFastConcatenate(
const HloInstruction* concatenate) const {
if (!concatenate->parent()
->root_instruction()
->template backend_config<BackendConfig>()
->outer_dimension_partitions()
.empty()) {
return absl::Status(
absl::StatusCode::kFailedPrecondition,
"Cannot generate memcpy-based concat for the parallel CPU backend");
}
const Shape& output_shape = concatenate->shape();
for (auto* op : concatenate->operands()) {
if (!LayoutUtil::Equal(op->shape().layout(), output_shape.layout())) {
return absl::Status(absl::StatusCode::kFailedPrecondition,
"Operand has mismatching layouts");
}
}
return absl::OkStatus();
};
IrEmitter2::ParallelPartitionBounds IrEmitter2::EmitParallelPartitionBounds(
llvm::IRBuilder<>& b, const KernelPrototype& kernel_prototype,
const ParallelConfig& parallel_config, const Shape& shape,
std::string_view name) {
ShapePartitionIterator it(shape, parallel_config.outer_dimension_partitions);
size_t num_parallel_dimensions =
parallel_config.outer_dimension_partitions.size();
llvm::ArrayType* dim_bounds_ty = llvm::ArrayType::get(b.getInt64Ty(), 2);
llvm::ArrayType* partition_bounds_ty =
llvm::ArrayType::get(dim_bounds_ty, num_parallel_dimensions);
llvm::ArrayType* parallel_bounds_ty =
llvm::ArrayType::get(partition_bounds_ty, it.GetTotalPartitionCount());
std::vector<llvm::Constant*> partition_bounds;
for (int64_t i = 0; i < it.GetTotalPartitionCount(); ++i) {
std::vector<llvm::Constant*> dim_counts;
for (auto [lower, size] : it.GetPartition(i)) {
dim_counts.push_back(llvm::ConstantArray::get(
dim_bounds_ty, {b.getInt64(lower), b.getInt64(lower + size)}));
}
partition_bounds.push_back(
llvm::ConstantArray::get(partition_bounds_ty, dim_counts));
}
llvm::Constant* parallel_bounds =
llvm::ConstantArray::get(parallel_bounds_ty, partition_bounds);
llvm::Module* module = b.GetInsertBlock()->getParent()->getParent();
llvm::GlobalVariable* parallel_bounds_global = new llvm::GlobalVariable(
*module,
parallel_bounds_ty,
true,
llvm::GlobalValue::PrivateLinkage,
parallel_bounds,
absl::StrCat(name, "_parallel_bounds"));
ParallelPartitionBounds bounds;
for (size_t i = 0; i < num_parallel_dimensions; ++i) {
llvm::Value* partition = kernel_prototype.thread.x;
llvm::Value* parallel_dim = b.getInt32(i);
llvm::Value* lower_gep = b.CreateInBoundsGEP(
parallel_bounds_ty, parallel_bounds_global,
{b.getInt32(0), partition, parallel_dim, b.getInt32(0)},
absl::StrCat("lo_dim_", i, "_gep"));
llvm::Value* upper_gep = b.CreateInBoundsGEP(
parallel_bounds_ty, parallel_bounds_global,
{b.getInt32(0), partition, parallel_dim, b.getInt32(1)},
absl::StrCat("up_dim_", i, "_gep"));
bounds.emplace_back(
b.CreateLoad(b.getInt64Ty(), lower_gep, absl::StrCat("lo_dim_", i)),
b.CreateLoad(b.getInt64Ty(), upper_gep, absl::StrCat("up_dim_", i)));
}
return bounds;
}
absl::StatusOr<se::ThreadDim> IrEmitter2::EmitElementalLoops(
llvm::IRBuilder<>& b, const HloInstruction* instr,
const KernelPrototype& kernel_prototype,
const llvm_ir::ElementGenerator& element_generator) {
bool multiple_results = kernel_prototype.results.size() > 1;
bool support_multiple_results = instr->opcode() == HloOpcode::kFusion ||
instr->opcode() == HloOpcode::kReduce ||
instr->opcode() == HloOpcode::kReduceWindow;
auto parallel_config = GetParallelConfig(instr);
bool has_parallel_config = parallel_config.has_value();
if (multiple_results && !support_multiple_results) {
return Internal(
"Multi-output host kernels are not supported for %s instruction",
HloOpcodeString(instr->opcode()));
}
if (multiple_results) {
TF_RETURN_IF_ERROR(
llvm_ir::LoopEmitter(element_generator, kernel_prototype.results, &b)
.EmitLoop(llvm_ir::IrName(instr)));
return se::ThreadDim();
}
const llvm_ir::IrArray& result = kernel_prototype.results.front();
if (has_parallel_config) {
ParallelPartitionBounds parallel_bounds = EmitParallelPartitionBounds(
b, kernel_prototype, *parallel_config, instr->shape(), instr->name());
TF_RETURN_IF_ERROR(
ParallelLoopEmitter(element_generator, result, ¶llel_bounds, &b)
.EmitLoop(llvm_ir::IrName(instr)));
return se::ThreadDim(ShapePartitionAssigner::GetTotalPartitionCount(
parallel_config->outer_dimension_partitions));
}
TF_RETURN_IF_ERROR(llvm_ir::LoopEmitter(element_generator, result, &b)
.EmitLoop(llvm_ir::IrName(instr)));
return se::ThreadDim();
}
int64_t IrEmitter2::ByteSizeOf(const Shape& shape) const {
return llvm_ir::ByteSizeOf(shape, module_->getDataLayout());
}
void IrEmitter2::AttachInvariantLoadMetadataForLoad(
llvm::LoadInst* instr) const {
nested_ir_emitter_->AttachInvariantLoadMetadataForLoad(instr,
hlo_module_.config());
}
} | #include "xla/service/cpu/ir_emitter2.h"
#include <cstdint>
#include <memory>
#include <string_view>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Type.h"
#include "xla/cpu_function_runtime.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/ir_emitter.h"
#include "xla/service/cpu/target_machine_features_fake.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_ordering.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/service/logical_buffer.h"
#include "xla/shape_util.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
class IrEmitter2Test : public HloTestBase {
public:
static auto EmitKernelPrototype(
IrEmitter2& ir_emitter,
const std::vector<IrEmitter2::KernelParameter>& arguments,
const std::vector<IrEmitter2::KernelParameter>& results) {
return ir_emitter.EmitKernelPrototype("test", arguments, results);
}
absl::StatusOr<IrEmitter2> MakeIrEmitter2(llvm::Module& module,
const HloModule& hlo) {
TF_ASSIGN_OR_RETURN(
buffer_assignment_,
BufferAssigner::Run(
&hlo, std::make_unique<DependencyHloOrdering>(&hlo),
backend().compiler()->BufferSizeBytesFunction(),
[](LogicalBuffer::Color) { return 1; }));
target_machine_ =
std::make_unique<TargetMachineFeaturesWithFakeAlignmentLogic>(
[](int64_t size) { return 1; });
nested_ir_emitter_ = absl::WrapUnique(
new IrEmitter(nullptr, hlo, *buffer_assignment_, &module, {}, {}, {},
target_machine_.get(), false));
return IrEmitter2(hlo, &module, nested_ir_emitter_.get());
}
absl::StatusOr<IrEmitter2::KernelInfo> EmitElementalHostKernel(
IrEmitter2& ir_emitter, HloModule& hlo,
std::string_view instruction_name) {
HloInstruction* instruction = FindInstruction(&hlo, instruction_name);
if (instruction == nullptr) {
return absl::InternalError("Instruction not found");
}
TF_ASSIGN_OR_RETURN(IrEmitter2::KernelInfo kernel,
ir_emitter.EmitElementalHostKernel(instruction));
return kernel;
}
private:
std::unique_ptr<BufferAssignment> buffer_assignment_;
std::unique_ptr<TargetMachineFeaturesWithFakeAlignmentLogic> target_machine_;
std::unique_ptr<IrEmitter> nested_ir_emitter_;
};
namespace {
TEST_F(IrEmitter2Test, BuildKernelPrototype) {
auto hlo = std::make_unique<HloModule>("test", HloModuleConfig());
llvm::LLVMContext context;
auto module = std::make_unique<llvm::Module>("test", context);
auto shape = ShapeUtil::MakeShape(PrimitiveType::F32, {4, 2});
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice arg0(&alloc, 0, 256);
BufferAllocation::Slice arg1(&alloc, 256, 256);
BufferAllocation::Slice res0(&alloc, 512, 256);
BufferAllocation::Slice res1(&alloc, 768, 256);
std::vector<IrEmitter2::KernelParameter> arguments = {{shape, arg0},
{shape, arg1}};
std::vector<IrEmitter2::KernelParameter> results = {{shape, res0},
{shape, res1}};
IrEmitter2 ir_emitter(*hlo, module.get(), nullptr);
TF_ASSERT_OK_AND_ASSIGN(auto prototype,
EmitKernelPrototype(ir_emitter, arguments, results));
llvm::IRBuilder<> b(context);
b.SetInsertPoint(prototype.function->getEntryBlock().getTerminator());
auto* zero = llvm::ConstantInt::get(llvm::Type::getInt32Ty(context), 0);
llvm_ir::IrArray::Index index(zero, shape, &b);
EXPECT_NE(prototype.arguments[0].EmitReadArrayElement(index, &b), nullptr);
EXPECT_NE(prototype.arguments[1].EmitReadArrayElement(index, &b), nullptr);
EXPECT_NE(prototype.results[0].EmitReadArrayElement(index, &b), nullptr);
EXPECT_NE(prototype.results[1].EmitReadArrayElement(index, &b), nullptr);
ASSERT_TRUE(*RunFileCheck(llvm_ir::DumpToString(module.get()),
absl::StrCat(R"(
CHECK: define ptr @test(ptr %0) #0 {
CHECK-NEXT: getelementptr inbounds nuw %SE_HOST_KernelCallFrame, {{.*}} i32 0
CHECK: getelementptr inbounds nuw %SE_HOST_KernelThreadDim, {{.*}} i32 0
CHECK: getelementptr inbounds nuw %SE_HOST_KernelThreadDim, {{.*}} i32 1
CHECK: getelementptr inbounds nuw %SE_HOST_KernelThreadDim, {{.*}} i32 2
CHECK: load i64
CHECK: load i64
CHECK: load i64
CHECK-NEXT: getelementptr inbounds nuw %SE_HOST_KernelCallFrame, {{.*}} i32 1
CHECK: getelementptr inbounds nuw %SE_HOST_KernelThread, {{.*}} i32 0
CHECK: getelementptr inbounds nuw %SE_HOST_KernelThread, {{.*}} i32 1
CHECK: getelementptr inbounds nuw %SE_HOST_KernelThread, {{.*}} i32 2
CHECK: load i64
CHECK: load i64
CHECK: load i64
CHECK-NEXT: getelementptr inbounds nuw %SE_HOST_KernelCallFrame, {{.*}} i32 3
CHECK: load ptr
CHECK: getelementptr %SE_HOST_KernelArg, {{.*}} i32 0, i32 0
CHECK: %[[ARG0:.+]] = load ptr, {{.*}}, !invariant.load ![[SCOPE0:.+]], !dereferenceable ![[DEREF_BYTES:.+]], !align ![[ALIGNMENT:.+]]
CHECK-NEXT: getelementptr inbounds nuw %SE_HOST_KernelCallFrame, {{.*}} i32 3
CHECK: load ptr
CHECK: getelementptr %SE_HOST_KernelArg, {{.*}} i32 1, i32 0
CHECK: %[[ARG1:.+]] = load ptr, {{.*}}, !invariant.load ![[SCOPE0]], !dereferenceable ![[DEREF_BYTES]], !align ![[ALIGNMENT]]
CHECK-NEXT: getelementptr inbounds nuw %SE_HOST_KernelCallFrame, {{.*}} i32 3
CHECK: load ptr
CHECK: getelementptr %SE_HOST_KernelArg, {{.*}} i32 2, i32 0
CHECK: %[[ARG2:.+]] = load ptr, {{.*}}, !invariant.load ![[SCOPE0]], !dereferenceable ![[DEREF_BYTES]], !align ![[ALIGNMENT]]
CHECK-NEXT: getelementptr inbounds nuw %SE_HOST_KernelCallFrame, {{.*}} i32 3
CHECK: load ptr
CHECK: getelementptr %SE_HOST_KernelArg, {{.*}} i32 3, i32 0
CHECK: %[[ARG3:.+]] = load ptr, {{.*}}, !invariant.load ![[SCOPE0]], !dereferenceable ![[DEREF_BYTES]], !align ![[ALIGNMENT]]
CHECK-NEXT: %[[PTR0:.+]] = getelementptr inbounds float, ptr %[[ARG0]]
CHECK: load float, ptr %[[PTR0]], align 4,
CHECK-SAME: !invariant.load ![[SCOPE0]],
CHECK-SAME: !noalias ![[SCOPE1:.+]]
CHECK-NEXT: %[[PTR1:.+]] = getelementptr inbounds float, ptr %[[ARG1]]
CHECK: load float, ptr %[[PTR1]], align 4,
CHECK-SAME: !invariant.load ![[SCOPE0]],
CHECK-SAME: !noalias ![[SCOPE1]]
CHECK-NEXT: %[[PTR2:.+]] = getelementptr inbounds float, ptr %[[ARG2]]
CHECK: load float, ptr %[[PTR2]], align 4, !alias.scope ![[SCOPE2:.+]],
CHECK: !noalias ![[SCOPE3:.+]]
CHECK-NEXT: %[[PTR3:.+]] = getelementptr inbounds float, ptr %[[ARG3]]
CHECK: load float, ptr %[[PTR3]], align 4, !alias.scope ![[SCOPE3]],
CHECK: !noalias ![[SCOPE2]]
CHECK: ret ptr null
CHECK: }
#0 = { uwtable "frame-pointer"="all" "prefer-vector-width"="256" }
CHECK-DAG: ![[ALIGNMENT]] = !{i64 )", cpu_function_runtime::MinAlign(), R"(}
CHECK-DAG: ![[SCOPE0]] = !{}
CHECK-DAG: ![[SCOPE1]] = !{![[RES0:.+]], ![[RES1:.+]]}
CHECK-DAG: ![[SCOPE2]] = !{![[RES0]]}
CHECK-DAG: ![[SCOPE3]] = !{![[RES1]]}
CHECK-DAG: ![[RES0]] = !{!"{{.*}}, offset:512, {{.*}}", ![[DOMAIN:.+]]}
CHECK-DAG: ![[RES1]] = !{!"{{.*}}, offset:768, {{.*}}", ![[DOMAIN]]}
CHECK-DAG: ![[DOMAIN]] = !{!"XLA host kernel test AA domain"}
)")));
ASSERT_TRUE(*RunFileCheck(llvm_ir::DumpToString(module.get()), R"(
CHECK: {{.+}} = load ptr, {{.*}}, !dereferenceable ![[DEREF_BYTES:.+]],
CHECK: ![[DEREF_BYTES]] = !{i64 32}
)"));
}
TEST_F(IrEmitter2Test, EmitElementalKernel) {
llvm::LLVMContext context;
auto module = std::make_unique<llvm::Module>("test", context);
const char* hlo_text = R"(
HloModule m
ENTRY main {
p0 = f32[2,2] parameter(0)
ROOT convert = s32[2,2] convert(p0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo, ParseAndReturnUnverifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(IrEmitter2 ir_emitter, MakeIrEmitter2(*module, *hlo));
TF_ASSERT_OK_AND_ASSIGN(IrEmitter2::KernelInfo kernel,
EmitElementalHostKernel(ir_emitter, *hlo, "convert"));
ASSERT_TRUE(*RunFileCheck(llvm_ir::DumpToString(module.get()), R"(
CHECK: define ptr @convert(ptr %0) #0 {
CHECK: fptosi float {{.*}} to i32
CHECK: }
)"));
}
TEST_F(IrEmitter2Test, EmitParallelKernel) {
llvm::LLVMContext context;
auto module = std::make_unique<llvm::Module>("test", context);
const char* hlo_text = R"(
HloModule m
ENTRY main {
p0 = f32[1,2,1,16384,256] parameter(0)
ROOT convert = s32[1,2,1,16384,256] convert(p0),
backend_config={"outer_dimension_partitions":["1","2","1","4"]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo, ParseAndReturnUnverifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(IrEmitter2 ir_emitter, MakeIrEmitter2(*module, *hlo));
TF_ASSERT_OK_AND_ASSIGN(IrEmitter2::KernelInfo kernel,
EmitElementalHostKernel(ir_emitter, *hlo, "convert"));
ASSERT_TRUE(*RunFileCheck(llvm_ir::DumpToString(module.get()), R"(
CHECK: @convert_parallel_bounds = private constant [8 x [4 x [2 x i64]]]
CHECK: define ptr @convert(ptr %0) #0 {
CHECK: %lo_dim_0_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 0, i32 0
CHECK: %up_dim_0_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 0, i32 1
CHECK: %lo_dim_1_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 1, i32 0
CHECK: %up_dim_1_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 1, i32 1
CHECK: %lo_dim_2_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 2, i32 0
CHECK: %up_dim_2_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 2, i32 1
CHECK: %lo_dim_3_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 3, i32 0
CHECK: %up_dim_3_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 3, i32 1
CHECK: fptosi float {{.*}} to i32
CHECK: }
)"));
}
using IrEmitter2InvariantBuffersTest = IrEmitter2Test;
TEST_F(IrEmitter2InvariantBuffersTest, AllInvariantBuffers) {
llvm::LLVMContext context;
auto module = std::make_unique<llvm::Module>("test", context);
const char* hlo_text = R"(
HloModule m
ENTRY main {
p0 = f32[2,2] parameter(0)
p1 = f32[2,2] parameter(1)
ROOT add.0 = f32[2,2] add(p0, p1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo, ParseAndReturnUnverifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(IrEmitter2 ir_emitter, MakeIrEmitter2(*module, *hlo));
TF_ASSERT_OK_AND_ASSIGN(IrEmitter2::KernelInfo kernel,
EmitElementalHostKernel(ir_emitter, *hlo, "add.0"));
ASSERT_EQ(kernel.invariant_arguments.size(), 2);
}
TEST_F(IrEmitter2InvariantBuffersTest, InvariantBufferPassedTwice) {
llvm::LLVMContext context;
auto module = std::make_unique<llvm::Module>("test", context);
const char* hlo_text = R"(
HloModule m
ENTRY main {
p0 = f32[2,2] parameter(0)
ROOT add.0 = f32[2,2] add(p0, p0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo, ParseAndReturnUnverifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(IrEmitter2 ir_emitter, MakeIrEmitter2(*module, *hlo));
TF_ASSERT_OK_AND_ASSIGN(IrEmitter2::KernelInfo kernel,
EmitElementalHostKernel(ir_emitter, *hlo, "add.0"));
ASSERT_EQ(kernel.invariant_arguments.size(), 2);
}
TEST_F(IrEmitter2InvariantBuffersTest, NoInvariantBuffers) {
llvm::LLVMContext context;
auto module = std::make_unique<llvm::Module>("test", context);
const char* hlo_text = R"(
HloModule m, input_output_alias={ {}: (0, {}, must-alias) }
ENTRY main {
p0 = f32[2,2] parameter(0)
ROOT add.0 = f32[2,2] add(p0, p0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo, ParseAndReturnUnverifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(IrEmitter2 ir_emitter, MakeIrEmitter2(*module, *hlo));
TF_ASSERT_OK_AND_ASSIGN(IrEmitter2::KernelInfo kernel,
EmitElementalHostKernel(ir_emitter, *hlo, "add.0"));
ASSERT_EQ(kernel.invariant_arguments.size(), 0);
}
TEST_F(IrEmitter2InvariantBuffersTest, MixedBuffers) {
llvm::LLVMContext context;
auto module = std::make_unique<llvm::Module>("test", context);
const char* hlo_text = R"(
HloModule m, input_output_alias={ {}: (1, {}, must-alias) }
ENTRY main {
p0 = f32[2,2] parameter(0)
p1 = f32[2,2] parameter(1)
ROOT add.0 = f32[2,2] add(p0, p1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo, ParseAndReturnUnverifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(IrEmitter2 ir_emitter, MakeIrEmitter2(*module, *hlo));
TF_ASSERT_OK_AND_ASSIGN(IrEmitter2::KernelInfo kernel,
EmitElementalHostKernel(ir_emitter, *hlo, "add.0"));
EXPECT_EQ(kernel.invariant_arguments.size(), 1);
EXPECT_TRUE(kernel.invariant_arguments.contains(0));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/ir_emitter2.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/ir_emitter2_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
75346150-76eb-46bf-af45-eced291a9d7e | cpp | tensorflow/tensorflow | cpu_layout_assignment | third_party/xla/xla/service/cpu/cpu_layout_assignment.cc | third_party/xla/xla/service/cpu/cpu_layout_assignment_test.cc | #include "xla/service/cpu/cpu_layout_assignment.h"
#include <cstdint>
#include <numeric>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/map_util.h"
#include "xla/service/cpu/dot_op_emitter.h"
#include "xla/service/cpu/ir_emission_utils.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace cpu {
namespace {
using std::nullopt;
using std::optional;
using ShouldMakeOperandColMajorCache =
absl::flat_hash_map<const HloInstruction*, bool>;
}
static bool ShouldMakeAllUsersColMajor(const HloInstruction* instruction) {
for (auto* user : instruction->users()) {
optional<int64_t> operand_idx =
ProfitableToMakeDotOperandColumnMajor(*user);
if (!operand_idx || user->operand(*operand_idx) != instruction ||
absl::c_count(user->operands(), instruction) != 1) {
return false;
}
}
return true;
}
static optional<int64_t> ShouldMakeOperandColumnMajor(
ShouldMakeOperandColMajorCache* cache, const HloInstruction& instruction) {
optional<int64_t> operand_idx =
ProfitableToMakeDotOperandColumnMajor(instruction);
if (!operand_idx) {
return nullopt;
}
const HloInstruction* operand = instruction.operand(*operand_idx);
if (operand->opcode() != HloOpcode::kConstant) {
return nullopt;
}
auto it = cache->find(operand);
if (it == cache->end()) {
auto insert_result =
cache->insert({operand, ShouldMakeAllUsersColMajor(operand)});
CHECK(insert_result.second);
it = insert_result.first;
}
return it->second ? operand_idx : nullopt;
}
static Shape RowMajorShape(Shape shape) {
ShapeUtil::ForEachMutableSubshape(
&shape, [](Shape* subshape, const ShapeIndex& index) {
if (!subshape->IsArray()) {
return;
}
std::vector<int64_t> dimension_order(subshape->dimensions_size());
std::iota(dimension_order.rbegin(), dimension_order.rend(), 0);
*subshape->mutable_layout() = LayoutUtil::MakeLayout(dimension_order);
});
return shape;
}
static Shape ColMajorShape(const Shape& old_shape) {
Shape new_shape(old_shape);
std::vector<int64_t> dimension_order(new_shape.dimensions_size());
std::iota(dimension_order.begin(), dimension_order.end(), 0);
*new_shape.mutable_layout() = LayoutUtil::MakeLayout(dimension_order);
return new_shape;
}
static bool OperandsAndResultMustHaveRowMajorLayout(
const HloInstruction& instr,
const TargetMachineFeatures& target_machine_features) {
if (instr.opcode() == HloOpcode::kConvolution) {
return PotentiallyImplementedAsEigenConvolution(instr,
target_machine_features);
} else if (instr.opcode() == HloOpcode::kDot) {
return DotOperandsAndResultMustHaveRowMajorLayout(instr,
target_machine_features);
} else if (instr.opcode() == HloOpcode::kCustomCall) {
return instr.custom_call_target() == "TopK";
}
return false;
}
absl::Status CpuLayoutAssignment::AddBackendConstraints(
LayoutConstraints* constraints) {
ShouldMakeOperandColMajorCache cache;
const HloComputation* computation = constraints->computation();
for (auto* instruction : computation->instructions()) {
if (OperandsAndResultMustHaveRowMajorLayout(*instruction,
target_machine_features_)) {
TF_RETURN_IF_ERROR(SetInstructionLayout(
RowMajorShape(instruction->shape()), instruction));
for (int i = 0; i < instruction->operand_count(); i++) {
TF_RETURN_IF_ERROR(SetOperandLayout(
RowMajorShape(instruction->operand(i)->shape()), instruction, i));
}
} else if (optional<int64_t> op_idx =
ShouldMakeOperandColumnMajor(&cache, *instruction)) {
const HloInstruction* op = instruction->operand(*op_idx);
TF_RETURN_IF_ERROR(
SetOperandLayout(ColMajorShape(op->shape()), instruction, *op_idx));
} else if (instruction->opcode() == HloOpcode::kReduceScatter) {
auto ars = Cast<HloReduceScatterInstruction>(instruction);
TF_RETURN_IF_ERROR(SetInstructionLayout(
ShapeUtil::MoveDimToMajor(ars->shape(), ars->scatter_dimension()),
ars));
} else if (instruction->opcode() == HloOpcode::kAllGather) {
auto ag = Cast<HloAllGatherInstruction>(instruction);
TF_RETURN_IF_ERROR(SetInstructionLayout(
ShapeUtil::MoveDimToMajor(ag->shape(), ag->all_gather_dimension()),
ag));
} else {
for (int64_t operand_no = 0; operand_no < instruction->operand_count();
++operand_no) {
if (constraints->OperandLayout(instruction, operand_no) != nullptr) {
continue;
}
if (AnyOperandBufferForwarded(instruction, operand_no)) {
continue;
}
if (!instruction->operand(operand_no)->shape().IsArray()) {
continue;
}
Shape operand_shape(
RowMajorShape(instruction->operand(operand_no)->shape()));
TF_RETURN_IF_ERROR(
SetOperandLayout(operand_shape, instruction, operand_no));
}
if (computation->parent()->entry_computation() == computation &&
computation->root_instruction() == instruction) {
continue;
}
if (!instruction->shape().IsArray()) {
continue;
}
}
}
return absl::OkStatus();
}
}
} | #include "xla/service/cpu/cpu_layout_assignment.h"
#include <initializer_list>
#include <memory>
#include <utility>
#include <vector>
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/computation_layout.h"
#include "xla/service/cpu/target_machine_features_fake.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
class CpuLayoutAssignmentTest : public HloTestBase {
protected:
void AssignLayouts(HloModule* module,
ComputationLayout* entry_computation_layout) {
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features(
[](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
});
cpu::CpuLayoutAssignment layout_assignment(entry_computation_layout,
&target_machine_features);
EXPECT_IS_OK(layout_assignment.Run(module).status());
}
};
TEST_F(CpuLayoutAssignmentTest, DotWithConstantRhsTensor) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12}, {0});
Shape rhs_shape = ShapeUtil::MakeShape(F32, {12, 24});
Shape result_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {24}, {0});
auto dot_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "param0"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(rhs_shape)));
auto result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_lhs, dot_rhs));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0}),
dot_lhs->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0, 1}),
dot_rhs->shape().layout()));
EXPECT_TRUE(
LayoutUtil::Equal(LayoutUtil::MakeLayout({0}), result->shape().layout()));
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, MultipleDotsWithSameConstantRhsTensor0) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12}, {0});
Shape rhs_shape = ShapeUtil::MakeShape(F32, {12, 24});
Shape result_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {24}, {0});
auto dot_a_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "param0"));
auto dot_b_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, lhs_shape, "param1"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(rhs_shape)));
auto dot_a_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_a_lhs, dot_rhs));
auto dot_b_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_b_lhs, dot_rhs));
builder.AddInstruction(HloInstruction::CreateBinary(
result_shape, HloOpcode::kAdd, dot_a_result, dot_b_result));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0, 1}),
dot_rhs->shape().layout()));
for (HloInstruction* instruction :
{dot_a_lhs, dot_b_lhs, dot_a_result, dot_b_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, MultipleDotsWithSameConstantRhsTensor1) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_a_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 12}, {0, 1});
Shape lhs_b_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 12}, {0, 1});
Shape rhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12, 24}, {0, 1});
Shape result_a_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 24}, {0, 1});
Shape result_b_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 24}, {0, 1});
auto dot_a_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_a_shape, "param0"));
auto dot_b_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, lhs_b_shape, "param1"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(rhs_shape)));
auto dot_a_result = builder.AddInstruction(
CreateCanonicalDot(result_a_shape, dot_a_lhs, dot_rhs));
auto dot_b_result = builder.AddInstruction(
CreateCanonicalDot(result_b_shape, dot_b_lhs, dot_rhs));
auto tuple_result = builder.AddInstruction(
HloInstruction::CreateTuple({dot_a_result, dot_b_result}));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_a_shape));
*computation_layout.mutable_parameter_layout(1) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_b_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(tuple_result->shape()));
AssignLayouts(module.get(), &computation_layout);
for (HloInstruction* instruction :
{dot_rhs, dot_a_lhs, dot_b_lhs, dot_a_result, dot_b_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({1, 0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, DotWithConstantLhsTensor) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 12}, {0, 1});
Shape rhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12, 24}, {0, 1});
Shape result_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 24}, {0, 1});
auto dot_lhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(lhs_shape)));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, rhs_shape, "param0"));
auto dot_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_lhs, dot_rhs));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(rhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
for (HloInstruction* instruction : {dot_lhs, dot_rhs, dot_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({1, 0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, DotWithConstantRhsTensorThroughGTE) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 12}, {0, 1});
Shape rhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12, 24}, {0, 1});
Shape other_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {100, 24}, {0, 1});
auto constant_shape = ShapeUtil::MakeTupleShape({other_shape, rhs_shape});
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(constant_shape)));
Shape result_shape = ShapeUtil::MakeShape(F32, {1, 24});
auto dot_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "param0"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(rhs_shape, constant, 1));
auto dot_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_lhs, dot_rhs));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
for (HloInstruction* instruction : {dot_lhs, dot_rhs, dot_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({1, 0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
struct DotOutputFusionLayoutAssignmentResult {
bool layout_assignment_changed_something;
const HloInstruction* dot_lhs_fusion_param;
const HloInstruction* dot_rhs_fusion_param;
const HloInstruction* addend_fusion_param;
};
static absl::StatusOr<DotOutputFusionLayoutAssignmentResult> RunDotOutputFusion(
HloModule* module, const std::string& test_name, int m, int k, int n,
const int64_t dot_operand_idx_in_add) {
DotOutputFusionLayoutAssignmentResult result;
CHECK(dot_operand_idx_in_add == 0 || dot_operand_idx_in_add == 1);
auto builder = HloComputation::Builder(test_name);
Shape dot_lhs_shape = ShapeUtil::MakeShape(F32, {m, k});
Shape dot_rhs_shape = ShapeUtil::MakeShape(F32, {k, n});
Shape dot_shape = ShapeUtil::MakeShape(F32, {m, n});
if (m == 1) {
dot_lhs_shape = ShapeUtil::MakeShape(F32, {k});
dot_shape = ShapeUtil::MakeShape(F32, {n});
} else if (n == 1) {
dot_rhs_shape = ShapeUtil::MakeShape(F32, {k});
dot_shape = ShapeUtil::MakeShape(F32, {m});
}
HloInstruction* dot_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, dot_lhs_shape, "param0"));
HloInstruction* addend = builder.AddInstruction(
HloInstruction::CreateParameter(1, dot_shape, "param1"));
HloInstruction* dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(dot_rhs_shape)));
HloInstruction* dot_result =
builder.AddInstruction(CreateCanonicalDot(dot_shape, dot_lhs, dot_rhs));
HloInstruction* add_result;
if (dot_operand_idx_in_add == 0) {
add_result = builder.AddInstruction(HloInstruction::CreateBinary(
dot_shape, HloOpcode::kAdd, dot_result, addend));
} else {
add_result = builder.AddInstruction(HloInstruction::CreateBinary(
dot_shape, HloOpcode::kAdd, addend, dot_result));
}
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloInstruction* fusion_instruction =
module->entry_computation()->AddInstruction(HloInstruction::CreateFusion(
dot_shape, HloInstruction::FusionKind::kOutput, add_result));
TF_RETURN_IF_ERROR(
computation->ReplaceInstruction(add_result, fusion_instruction));
HloInstruction* fused_add =
fusion_instruction->fused_instructions_computation()->root_instruction();
HloInstruction* fused_dot = fusion_instruction->FuseInstruction(dot_result);
TF_RETURN_IF_ERROR(
computation->RemoveInstructionAndUnusedOperands(dot_result));
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(dot_lhs_shape));
*computation_layout.mutable_parameter_layout(1) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(dot_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(dot_shape));
result.dot_lhs_fusion_param =
fusion_instruction->operand(fused_dot->operand(0)->parameter_number());
result.dot_rhs_fusion_param =
fusion_instruction->operand(fused_dot->operand(1)->parameter_number());
result.addend_fusion_param = fusion_instruction->operand(
fused_add->operand(1 - dot_operand_idx_in_add)->parameter_number());
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features(
[](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
});
cpu::CpuLayoutAssignment layout_assignment(&computation_layout,
&target_machine_features);
TF_ASSIGN_OR_RETURN(result.layout_assignment_changed_something,
layout_assignment.Run(module));
return result;
}
static void AssertCorrectLayoutForDotOutputFusion(
const HloComputation* computation,
const DotOutputFusionLayoutAssignmentResult& layout_assignment_result,
bool expect_col_major_dot_rhs) {
Layout expected_dot_rhs_layout = expect_col_major_dot_rhs
? LayoutUtil::MakeLayout({0, 1})
: LayoutUtil::MakeLayout({1, 0});
if (layout_assignment_result.dot_rhs_fusion_param->shape().rank() == 1) {
expected_dot_rhs_layout = LayoutUtil::MakeLayout({0});
}
EXPECT_TRUE(LayoutUtil::Equal(
expected_dot_rhs_layout,
layout_assignment_result.dot_rhs_fusion_param->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(
LayoutUtil::MakeDescendingLayout(
layout_assignment_result.dot_lhs_fusion_param->shape().rank()),
layout_assignment_result.dot_lhs_fusion_param->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(
LayoutUtil::MakeDescendingLayout(
layout_assignment_result.addend_fusion_param->shape().rank()),
layout_assignment_result.addend_fusion_param->shape().layout()));
EXPECT_THAT(computation->instructions(), Each(Not(op::Copy())));
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_1x50x19_dot_idx_0) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 1, 50, 19,
0));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
true);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_1x50x19_dot_idx_1) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 1, 50, 19,
1));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
true);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_19x50x1_dot_idx_0) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 19, 50, 1,
0));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
false);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_19x50x1_dot_idx_1) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 19, 50, 1,
1));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
false);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_19x50x19_dot_idx_0) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 19, 50, 19,
0));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
false);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_19x50x19_dot_idx_1) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 19, 50, 19,
1));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
false);
}
TEST_F(CpuLayoutAssignmentTest, BatchDotLayoutMustBeRowMajor) {
const char* hlo_string = R"(
HloModule BatchDotLayoutMustBeRowMajor
ENTRY BatchDotLayoutMustBeRowMajor {
p0 = f32[10,1,10] parameter(0)
p1 = f32[10,10,1] parameter(1)
ROOT dot = f32[10,1,1] dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={2},
rhs_batch_dims={0},
rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* computation = module->entry_computation();
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) = ShapeLayout(
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 1, 10}, {2, 1, 0}));
*computation_layout.mutable_parameter_layout(1) = ShapeLayout(
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 10, 1}, {2, 1, 0}));
*computation_layout.mutable_result_layout() = ShapeLayout(
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 1, 1}, {1, 2, 0}));
AssignLayouts(module.get(), &computation_layout);
Shape expected_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 1, 1}, {2, 1, 0});
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(op::ShapeWithLayout(expected_shape)));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Copy(op::Dot(
op::ShapeWithLayout(computation_layout.parameter_layout(0).shape()),
op::ShapeWithLayout(
computation_layout.parameter_layout(1).shape()))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/cpu_layout_assignment.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/cpu_layout_assignment_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
adef24a5-72cd-40be-a7ea-abcef4d0cf40 | cpp | tensorflow/tensorflow | xfeed_manager | third_party/xla/xla/service/cpu/xfeed_manager.cc | third_party/xla/xla/service/cpu/xfeed_manager_test.cc | #include "xla/service/cpu/xfeed_manager.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace cpu {
namespace runtime {
void XfeedQueueManager::EnqueueBuffersAtomically(
absl::Span<XfeedBuffer* const> buffers) {
absl::MutexLock l(&mu_);
bool was_empty = enqueued_buffers_.empty();
for (XfeedBuffer* b : buffers) {
VLOG(3) << "Enqueueing " << queue_name_ << " buffer (of " << buffers.size()
<< " buffers) with length: " << b->length();
enqueued_buffers_.push_back(b);
}
if (was_empty && !buffers.empty()) {
cv_.Signal();
}
}
XfeedBuffer* XfeedQueueManager::BlockingDequeueBuffer() {
absl::MutexLock l(&mu_);
VLOG(3) << "Waiting for an available buffer.";
while (enqueued_buffers_.empty()) {
cv_.Wait(&mu_);
}
VLOG(3) << "A buffer is available!";
CHECK(current_buffer_ == nullptr);
current_buffer_ = enqueued_buffers_.front();
enqueued_buffers_.pop_front();
return current_buffer_;
}
void XfeedQueueManager::ReleaseCurrentBuffer(int32_t length, void* data,
absl::StatusOr<Shape> shape) {
VLOG(3) << "Releasing buffer with shape: "
<< (shape.ok() ? ShapeUtil::HumanString(shape.value())
: "<error status>");
absl::MutexLock l(&mu_);
CHECK(current_buffer_ != nullptr);
CHECK_EQ(length, current_buffer_->length());
CHECK_EQ(data, current_buffer_->data());
current_buffer_->Done(std::move(shape));
current_buffer_ = nullptr;
}
int64_t GetByteSizeRequirement(const Shape& shape, int64_t pointer_size) {
if (shape.IsTuple() || shape.is_static()) {
return ShapeUtil::ByteSizeOf(shape, pointer_size);
}
int64_t metadata_size = sizeof(int32_t) * shape.dimensions_size();
return ShapeUtil::ByteSizeOf(shape, pointer_size) + metadata_size;
}
}
}
} | #include "xla/service/cpu/xfeed_manager.h"
#include <memory>
#include "xla/service/cpu/cpu_runtime.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace {
class InfeedManagerTest : public ::testing::Test {};
class TestInfeedBuffer : public cpu::runtime::XfeedBuffer {
public:
explicit TestInfeedBuffer(int32_t length, bool expect_shape_match = true)
: shape_(ShapeUtil::MakeShape(U8, {length})),
done_called_(false),
length_(length),
expect_shape_match_(expect_shape_match) {}
~TestInfeedBuffer() override { EXPECT_TRUE(done_called_); }
int32_t length() override { return length_; }
void* data() override { return nullptr; }
void Done(absl::StatusOr<Shape> shape) override {
CHECK(!done_called_);
done_called_ = true;
TF_ASSERT_OK(shape.status());
EXPECT_EQ(expect_shape_match_, ShapeUtil::Equal(shape_, shape.value()))
<< "want " << ShapeUtil::HumanString(shape_) << " "
<< (expect_shape_match_ ? "==" : "!=") << " "
<< ShapeUtil::HumanString(shape.value());
delete this;
}
const Shape& shape() const { return shape_; }
private:
Shape shape_;
bool done_called_;
int32_t length_;
bool expect_shape_match_;
};
void ProcessNextBuffer(int32_t length) {
auto shape = ShapeUtil::MakeShape(U8, {length});
std::string bytes = shape.SerializeAsString();
void* buffer = __xla_cpu_runtime_AcquireInfeedBufferForDequeue(
nullptr, length, bytes.data(), bytes.size());
__xla_cpu_runtime_ReleaseInfeedBufferAfterDequeue(
nullptr, length, buffer, bytes.data(), bytes.size());
}
void ProcessNextOutfeedBuffer(int32_t length, const Shape& shape) {
std::string bytes = shape.SerializeAsString();
void* buffer = __xla_cpu_runtime_AcquireOutfeedBufferForPopulation(
nullptr, length, bytes.data(), bytes.size());
__xla_cpu_runtime_ReleaseOutfeedBufferAfterPopulation(
nullptr, length, buffer, bytes.data(), bytes.size());
}
TEST_F(InfeedManagerTest, SingleThreadedSequential) {
TestInfeedBuffer* a = new TestInfeedBuffer(64);
TestInfeedBuffer* b = new TestInfeedBuffer(32);
cpu::runtime::XfeedManager* xfeed = cpu::runtime::GetXfeedManager(0);
xfeed->infeed()->EnqueueBuffersAtomically({a});
xfeed->infeed()->EnqueueBuffersAtomically({b});
ProcessNextBuffer(a->length());
ProcessNextBuffer(b->length());
}
TEST_F(InfeedManagerTest, SingleThreadedInterleaved) {
TestInfeedBuffer* a = new TestInfeedBuffer(64);
TestInfeedBuffer* b = new TestInfeedBuffer(32);
cpu::runtime::XfeedManager* xfeed = cpu::runtime::GetXfeedManager(0);
xfeed->infeed()->EnqueueBuffersAtomically({a});
ProcessNextBuffer(a->length());
xfeed->infeed()->EnqueueBuffersAtomically({b});
ProcessNextBuffer(b->length());
}
TEST_F(InfeedManagerTest, MultiThreaded) {
tsl::thread::ThreadPool pool(tsl::Env::Default(), "test", 2);
cpu::runtime::XfeedManager* xfeed = cpu::runtime::GetXfeedManager(0);
const int32_t length = 64;
pool.Schedule([length, &xfeed]() {
int64_t start_micros = tsl::Env::Default()->NowMicros();
while (true) {
int64_t end_micros = tsl::Env::Default()->NowMicros();
if ((end_micros - start_micros) >= 100000) {
break;
}
}
TestInfeedBuffer* a = new TestInfeedBuffer(length);
xfeed->infeed()->EnqueueBuffersAtomically({a});
});
ProcessNextBuffer(length);
}
TEST_F(InfeedManagerTest, OutfeedBasic) {
TestInfeedBuffer* b = new TestInfeedBuffer(32, true);
cpu::runtime::XfeedManager* xfeed = cpu::runtime::GetXfeedManager(0);
xfeed->outfeed()->EnqueueBuffersAtomically({b});
ProcessNextOutfeedBuffer(32, ShapeUtil::MakeShape(U8, {32}));
}
TEST_F(InfeedManagerTest, OutfeedEmpty) {
TestInfeedBuffer* b = new TestInfeedBuffer(0, true);
cpu::runtime::XfeedManager* xfeed = cpu::runtime::GetXfeedManager(0);
xfeed->outfeed()->EnqueueBuffersAtomically({b});
ProcessNextOutfeedBuffer(0, ShapeUtil::MakeShape(U8, {0}));
}
TEST_F(InfeedManagerTest, OutfeedWrongShape) {
TestInfeedBuffer* b = new TestInfeedBuffer(32, false);
cpu::runtime::XfeedManager* xfeed = cpu::runtime::GetXfeedManager(0);
xfeed->outfeed()->EnqueueBuffersAtomically({b});
ProcessNextOutfeedBuffer(32, ShapeUtil::MakeShape(U8, {33}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/xfeed_manager.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/xfeed_manager_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7f0424d3-3804-43b2-8e39-74cec9ac5e61 | cpp | tensorflow/tensorflow | conv_canonicalization | third_party/xla/xla/service/cpu/conv_canonicalization.cc | third_party/xla/xla/service/cpu/conv_canonicalization_test.cc | #include "xla/service/cpu/conv_canonicalization.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/permutation_util.h"
#include "xla/service/cpu/cpu_runtime.h"
#include "xla/service/cpu/ir_emission_utils.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace cpu {
absl::StatusOr<bool> ConvCanonicalization::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloInstruction* hlo :
module->entry_computation()->MakeInstructionPostOrder()) {
if (hlo->opcode() == HloOpcode::kConvolution &&
!PotentiallyImplementedAsEigenConvolution(*hlo,
target_machine_features_)) {
const ConvolutionDimensionNumbers& dnums =
hlo->convolution_dimension_numbers();
auto input_batch_dim = dnums.input_batch_dimension();
auto input_feature_dim = dnums.input_feature_dimension();
auto kernel_input_feature_dim = dnums.kernel_input_feature_dimension();
auto kernel_output_feature_dim = dnums.kernel_output_feature_dimension();
const int64_t num_spatial_dims = dnums.output_spatial_dimensions_size();
const int64_t num_dims = num_spatial_dims + 2;
HloInstruction* input = hlo->mutable_operand(0);
std::vector<int64_t> new_input_dim_order(num_dims);
std::vector<int64_t> new_input_dims(num_dims);
new_input_dim_order[0] = input_batch_dim;
new_input_dims[0] = input->shape().dimensions(input_batch_dim);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
new_input_dim_order[i + 1] = dnums.input_spatial_dimensions(i);
new_input_dims[i + 1] =
input->shape().dimensions(dnums.input_spatial_dimensions(i));
}
new_input_dim_order[num_dims - 1] = input_feature_dim;
new_input_dims[num_dims - 1] =
input->shape().dimensions(input_feature_dim);
Shape new_input_shape =
ShapeUtil::MakeShape(input->shape().element_type(), new_input_dims);
HloInstruction* new_input = module->entry_computation()->AddInstruction(
HloInstruction::CreateTranspose(new_input_shape, input,
new_input_dim_order));
HloInstruction* kernel = hlo->mutable_operand(1);
std::vector<int64_t> new_kernel_dim_order(num_dims);
std::vector<int64_t> new_kernel_dims(num_dims);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
new_kernel_dim_order[i] = dnums.kernel_spatial_dimensions(i);
new_kernel_dims[i] =
kernel->shape().dimensions(dnums.kernel_spatial_dimensions(i));
}
new_kernel_dim_order[num_dims - 2] = kernel_input_feature_dim;
new_kernel_dims[num_dims - 2] =
kernel->shape().dimensions(kernel_input_feature_dim);
new_kernel_dim_order[num_dims - 1] = kernel_output_feature_dim;
new_kernel_dims[num_dims - 1] =
kernel->shape().dimensions(kernel_output_feature_dim);
Shape new_kernel_shape =
ShapeUtil::MakeShape(kernel->shape().element_type(), new_kernel_dims);
HloInstruction* new_kernel = module->entry_computation()->AddInstruction(
HloInstruction::CreateTranspose(new_kernel_shape, kernel,
new_kernel_dim_order));
std::vector<int64_t> new_output_dim_order(num_dims);
std::vector<int64_t> new_conv_dims(num_dims);
auto output_batch_dim = dnums.output_batch_dimension();
auto output_feature_dim = dnums.output_feature_dimension();
new_output_dim_order[0] = output_batch_dim;
new_conv_dims[0] = hlo->shape().dimensions(output_batch_dim);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
new_output_dim_order[i + 1] = dnums.output_spatial_dimensions(i);
new_conv_dims[i + 1] =
hlo->shape().dimensions(dnums.output_spatial_dimensions(i));
}
new_output_dim_order[num_dims - 1] = output_feature_dim;
new_conv_dims[num_dims - 1] = hlo->shape().dimensions(output_feature_dim);
Shape new_conv_shape =
ShapeUtil::MakeShape(hlo->shape().element_type(), new_conv_dims);
ConvolutionDimensionNumbers new_dnums;
new_dnums.set_input_batch_dimension(0);
new_dnums.set_output_batch_dimension(0);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
new_dnums.add_input_spatial_dimensions(i + 1);
new_dnums.add_kernel_spatial_dimensions(i);
new_dnums.add_output_spatial_dimensions(i + 1);
}
new_dnums.set_input_feature_dimension(num_dims - 1);
new_dnums.set_output_feature_dimension(num_dims - 1);
new_dnums.set_kernel_input_feature_dimension(num_dims - 2);
new_dnums.set_kernel_output_feature_dimension(num_dims - 1);
HloInstruction* new_conv = module->entry_computation()->AddInstruction(
HloInstruction::CreateConvolve(
new_conv_shape, new_input, new_kernel, hlo->feature_group_count(),
hlo->batch_group_count(), hlo->window(), new_dnums,
hlo->precision_config()));
TF_RETURN_IF_ERROR(module->entry_computation()->ReplaceWithNewInstruction(
hlo, HloInstruction::CreateTranspose(
hlo->shape(), new_conv,
InversePermutation(new_output_dim_order))));
changed = true;
}
}
return changed;
}
}
} | #include "xla/service/cpu/conv_canonicalization.h"
#include <vector>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/cpu/target_machine_features_fake.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
namespace xla {
namespace cpu {
using ::testing::ElementsAre;
class ConvCanonicalizationTest : public HloTestBase {
public:
ConvCanonicalizationTest() {
for (int i = 0; i < 2; ++i) {
auto dim = conv_window_.add_dimensions();
dim->set_size(kWindowSize);
dim->set_stride(1);
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_window_dilation(1);
dim->set_base_dilation(1);
}
}
protected:
Window conv_window_;
static constexpr int kBatchSize = 50;
static constexpr int kInputSize = 28;
static constexpr int kWindowSize = 5;
static constexpr int kInputFeatureCount = 32;
static constexpr int kOutputFeatureCount = 64;
};
TEST_F(ConvCanonicalizationTest, NonCanonicalToCanonical) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR4FromArray4D(Array4D<float>(
kInputFeatureCount, kBatchSize, kInputSize, kInputSize))));
auto kernel = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR4FromArray4D(Array4D<float>(
kOutputFeatureCount, kInputFeatureCount, kWindowSize, kWindowSize))));
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(1);
dnums.set_output_batch_dimension(1);
dnums.add_input_spatial_dimensions(2);
dnums.add_output_spatial_dimensions(2);
dnums.add_input_spatial_dimensions(3);
dnums.add_output_spatial_dimensions(3);
dnums.set_input_feature_dimension(0);
dnums.set_output_feature_dimension(0);
dnums.add_kernel_spatial_dimensions(2);
dnums.add_kernel_spatial_dimensions(3);
dnums.set_kernel_input_feature_dimension(1);
dnums.set_kernel_output_feature_dimension(0);
auto output_size = kInputSize - kWindowSize + 1;
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(
F32, {kOutputFeatureCount, kBatchSize, output_size, output_size}),
input, kernel, 1, 1,
conv_window_, dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features(
[](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
});
ConvCanonicalization conv_canonicalization(&target_machine_features);
EXPECT_TRUE(conv_canonicalization.Run(module.get()).value());
const HloInstruction* output_reshape = entry_computation->root_instruction();
EXPECT_EQ(HloOpcode::kTranspose, output_reshape->opcode());
const HloInstruction* canonical_conv = output_reshape->operand(0);
EXPECT_EQ(HloOpcode::kConvolution, canonical_conv->opcode());
const HloInstruction* input_reshape = canonical_conv->operand(0);
EXPECT_EQ(HloOpcode::kTranspose, input_reshape->opcode());
const HloInstruction* kernel_reshape = canonical_conv->operand(1);
EXPECT_EQ(HloOpcode::kTranspose, kernel_reshape->opcode());
EXPECT_THAT(input_reshape->dimensions(), ElementsAre(1, 2, 3, 0));
EXPECT_THAT(kernel_reshape->dimensions(), ElementsAre(2, 3, 1, 0));
EXPECT_THAT(output_reshape->dimensions(), ElementsAre(3, 0, 1, 2));
}
TEST_F(ConvCanonicalizationTest, CanonicalStaysTheSame) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR4FromArray4D(Array4D<float>(
kBatchSize, kInputSize, kInputSize, kInputFeatureCount))));
auto kernel = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR4FromArray4D(Array4D<float>(
kWindowSize, kWindowSize, kInputFeatureCount, kOutputFeatureCount))));
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(0);
dnums.add_input_spatial_dimensions(1);
dnums.add_output_spatial_dimensions(1);
dnums.add_input_spatial_dimensions(2);
dnums.add_output_spatial_dimensions(2);
dnums.set_input_feature_dimension(3);
dnums.set_output_feature_dimension(3);
dnums.add_kernel_spatial_dimensions(0);
dnums.add_kernel_spatial_dimensions(1);
dnums.set_kernel_input_feature_dimension(2);
dnums.set_kernel_output_feature_dimension(3);
auto output_size = kInputSize - kWindowSize + 1;
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(
F32, {kBatchSize, output_size, output_size, kOutputFeatureCount}),
input, kernel, 1, 1,
conv_window_, dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features(
[](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
});
ConvCanonicalization conv_canonicalization(&target_machine_features);
EXPECT_FALSE(conv_canonicalization.Run(module.get()).value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/conv_canonicalization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/conv_canonicalization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
145ec2e0-d3a0-4ee7-9f72-6f42a78ef99f | cpp | tensorflow/tensorflow | onednn_softmax | third_party/xla/xla/service/cpu/onednn_softmax.cc | third_party/xla/xla/service/cpu/tests/onednn_softmax_test.cc | #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_softmax.h"
#include <algorithm>
#include <cmath>
#include <initializer_list>
#include <vector>
#include "absl/base/dynamic_annotations.h"
#include "dnnl.hpp"
#include "xla/executable_run_options.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/onednn_config.pb.h"
#include "xla/service/cpu/onednn_memory_util.h"
#include "xla/service/cpu/runtime_lightweight_check.h"
#include "xla/tsl/util/onednn_threadpool.h"
#include "unsupported/Eigen/CXX11/Tensor"
namespace xla {
namespace cpu {
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_OneDnnSoftmax(
const void* run_options_ptr, void* input, void* result,
void* softmax_config_ptr) {
const xla::ExecutableRunOptions* run_options =
static_cast<const xla::ExecutableRunOptions*>(run_options_ptr);
XLA_LIGHTWEIGHT_CHECK(run_options != nullptr);
XLA_LIGHTWEIGHT_CHECK(run_options->intra_op_thread_pool() != nullptr);
tsl::OneDnnThreadPool thread_pool(
run_options->intra_op_thread_pool()->getPool(), false);
dnnl::engine cpu_engine(dnnl::engine::kind::cpu, 0);
#ifndef ENABLE_ONEDNN_OPENMP
auto onednn_stream = dnnl::stream(
dnnl::threadpool_interop::make_stream(cpu_engine, &thread_pool));
#else
auto onednn_stream = dnnl::stream(cpu_engine);
#endif
std::string config_str(static_cast<const char*>(softmax_config_ptr));
OneDnnSoftmaxConfig softmax_config;
softmax_config.ParseFromString(config_str);
MemrefInfo input_minfo(input);
MemrefInfo result_minfo(result);
auto src_md = input_minfo.GetOneDnnMemDesc();
auto dst_md = result_minfo.GetOneDnnMemDesc();
auto src_mem = dnnl::memory(src_md, cpu_engine, input_minfo.Data());
auto dst_mem = dnnl::memory(dst_md, cpu_engine, result_minfo.Data());
int axis = softmax_config.softmax_axis();
auto softmax_pd = dnnl::softmax_forward::primitive_desc(
cpu_engine, dnnl::prop_kind::forward_inference,
dnnl::algorithm::softmax_accurate, src_md, dst_md, axis);
auto softmax_prim = dnnl::softmax_forward(softmax_pd);
std::unordered_map<int, dnnl::memory> softmax_args;
softmax_args.insert({DNNL_ARG_SRC, src_mem});
softmax_args.insert({DNNL_ARG_DST, dst_mem});
softmax_prim.execute(onednn_stream, softmax_args);
}
}
}
#endif | #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include <utility>
#include "absl/strings/str_replace.h"
#include "absl/strings/substitute.h"
#include "xla/literal.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/onednn_config.pb.h"
#include "xla/service/cpu/onednn_ops_rewriter.h"
#include "xla/service/cpu/onednn_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
namespace xla {
namespace cpu {
std::string TestParamsToString(
const ::testing::TestParamInfo<std::tuple<PrimitiveType, int>>& data) {
PrimitiveType data_type;
int batch_size;
std::tie(data_type, batch_size) = data.param;
return absl::StrCat(primitive_util::LowercasePrimitiveTypeName(data_type),
"_BatchSize", std::to_string(batch_size));
}
class OneDnnSoftmaxTest
: public HloTestBase,
public ::testing::WithParamInterface<std::tuple<PrimitiveType, int>> {
protected:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_cpu_use_thunk_runtime(false);
return debug_options;
}
const char* onednn_softmax_ =
R"(
; CHECK: custom_call_target="__onednn$softmax"
)";
const std::string GetGenericSoftmaxHLORawText(PrimitiveType data_type,
int batch_size) {
const std::string softmax_hlo_template_string = R"(
HloModule softmax_module
region_max {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(Arg_0, Arg_1)
}
region_add {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT add = $0[] add(Arg_0, Arg_1)
}
ENTRY main {
Arg_0 = $0[$1,128,30522]{2,1,0} parameter(0)
neg_inf = $0[] constant(-inf)
reduce_max = $0[$1,128]{1,0} reduce(Arg_0, neg_inf), dimensions={2}, to_apply=region_max
reshape.0 = $0[$1,128,1]{2,1,0} reshape(reduce_max)
broadcast.0 = $0[$1,128,1]{2,1,0} broadcast(reshape.0), dimensions={0,1,2}
reshape.1 = $0[$1,128]{1,0} reshape(broadcast.0)
broadcast.1 = $0[$1,128,30522]{2,1,0} broadcast(reshape.1), dimensions={0,1}
subtract.0 = $0[$1,128,30522]{2,1,0} subtract(Arg_0, broadcast.1)
exponential = $0[$1,128,30522]{2,1,0} exponential(subtract.0)
const_zero = $0[] constant(0)
reduce_add = $0[$1,128]{1,0} reduce(exponential, const_zero), dimensions={2}, to_apply=region_add
reshape.2 = $0[$1,128,1]{2,1,0} reshape(reduce_add)
broadcast.2 = $0[$1,128,1]{2,1,0} broadcast(reshape.2), dimensions={0,1,2}
reshape.3 = $0[$1,128]{1,0} reshape(broadcast.2)
broadcast.3 = $0[$1,128,30522]{2,1,0} broadcast(reshape.3), dimensions={0,1}
ROOT divide = $0[$1,128,30522]{2,1,0} divide(exponential, broadcast.3)
}
)";
const std::string softmax_hlo_string = absl::Substitute(
softmax_hlo_template_string,
primitive_util::LowercasePrimitiveTypeName(data_type), batch_size);
return softmax_hlo_string;
}
void TestSoftmaxPatternMatching(std::string input_hlo_string,
int expected_softmax_axis) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(input_hlo_string));
OneDnnOpsRewriter softmax_rewrite_pass;
HloInstruction* onednn_softmax;
OneDnnSoftmaxConfig softmax_config;
TF_ASSERT_OK_AND_ASSIGN(
bool changed, this->RunHloPass(&softmax_rewrite_pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(::xla::match::CustomCall(&onednn_softmax,
{"__onednn$softmax"})));
auto backend_config = onednn_softmax->backend_config<BackendConfig>();
softmax_config.CopyFrom(backend_config->onednn_softmax_config());
int axis_after_rewrite = softmax_config.softmax_axis();
EXPECT_EQ(expected_softmax_axis, axis_after_rewrite);
}
};
TEST_P(OneDnnSoftmaxTest, SoftmaxGenericTest) {
PrimitiveType data_type;
int batch_size;
std::tie(data_type, batch_size) = GetParam();
if (!IsSupportedType(data_type)) {
GTEST_SKIP() << "CPU does not support "
<< primitive_util::LowercasePrimitiveTypeName(data_type);
}
const std::string softmax_hlo_string =
GetGenericSoftmaxHLORawText(data_type, batch_size);
TestSoftmaxPatternMatching(softmax_hlo_string, 2);
}
TEST_P(OneDnnSoftmaxTest, SoftmaxGenericNumericalCorrectnessTest) {
PrimitiveType data_type;
int batch_size;
std::tie(data_type, batch_size) = GetParam();
if (!IsSupportedType(data_type)) {
GTEST_SKIP() << "CPU does not support "
<< primitive_util::LowercasePrimitiveTypeName(data_type);
}
const std::string onednn_softmax_hlo_template_string = R"(
HloModule softmax_module
ENTRY main {
Arg_0 = $0[$1,128,30522]{2,1,0} parameter(0)
ROOT custom-call = $0[$1,128,30522]{2,1,0} custom-call(Arg_0), custom_call_target="$2", backend_config={"onednn_softmax_config":{"softmax_axis":2}}
}
)";
auto onednn_softmax_hlo_string =
absl::Substitute(onednn_softmax_hlo_template_string,
primitive_util::LowercasePrimitiveTypeName(data_type),
batch_size, "__onednn$softmax");
const std::string hlo_string_ref =
GetGenericSoftmaxHLORawText(data_type, batch_size);
float atol = (data_type == F32) ? 1e-4 : 1e-2;
float rtol = (data_type == F32) ? 1e-4 : 1e-2;
EXPECT_TRUE(RunAndCompareTwoModules(onednn_softmax_hlo_string, hlo_string_ref,
ErrorSpec{atol, rtol},
false));
}
INSTANTIATE_TEST_SUITE_P(OneDnnSoftmaxTestSuite, OneDnnSoftmaxTest,
::testing::Combine(::testing::ValuesIn({F32, BF16,
F16}),
::testing::Values(1, 16)),
TestParamsToString);
TEST_F(OneDnnSoftmaxTest, SoftmaxFP32OnAxisZero) {
const std::string softmax_hlo_string = R"(
HloModule softmax_module
region_max {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT maximum = f32[] maximum(Arg_0, Arg_1)
}
region_add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
ENTRY main {
Arg_0 = f32[3,1,1]{2,1,0} parameter(0)
neg_inf = f32[] constant(-inf)
reduce_max = f32[1,1]{1,0} reduce(Arg_0, neg_inf), dimensions={0}, to_apply=region_max
neg_inf.1 = f32[1,1]{1,0} constant({ {-inf} })
maximum = f32[1,1]{1,0} maximum(reduce_max, neg_inf.1)
reshape.0 = f32[1,1,1]{2,1,0} reshape(maximum)
broadcast.0 = f32[1,1,1]{2,1,0} broadcast(reshape.0), dimensions={0,1,2}
reshape.1 = f32[1,1]{1,0} reshape(broadcast.0)
broadcast.1 = f32[3,1,1]{2,1,0} broadcast(reshape.1), dimensions={1,2}
subtract = f32[3,1,1]{2,1,0} subtract(Arg_0, broadcast.1)
exponential = f32[3,1,1]{2,1,0} exponential(subtract)
const_zero = f32[] constant(0)
reduce_add = f32[1,1]{1,0} reduce(exponential, const_zero), dimensions={0}, to_apply=region_add
reshape.2 = f32[1,1,1]{2,1,0} reshape(reduce_add)
broadcast.2 = f32[1,1,1]{2,1,0} broadcast(reshape.2), dimensions={0,1,2}
reshape.3 = f32[1,1]{1,0} reshape(broadcast.2)
broadcast.3 = f32[3,1,1]{2,1,0} broadcast(reshape.3), dimensions={1,2}
ROOT divide = f32[3,1,1]{2,1,0} divide(exponential, broadcast.3)
}
)";
TestSoftmaxPatternMatching(softmax_hlo_string, 0);
}
TEST_F(OneDnnSoftmaxTest, SoftmaxWithBF16ConvertOutputFP32Pattern) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const std::string softmax_hlo_string = R"(
HloModule softmax_module
region_max {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT maximum = f32[] maximum(Arg_0, Arg_1)
}
region_add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
ENTRY main {
Arg_0 = f32[16,128,30522]{2,1,0} parameter(0)
neg_inf = f32[] constant(-inf)
reduce_max = f32[16,128]{1,0} reduce(Arg_0, neg_inf), dimensions={2}, to_apply=region_max
reshape.0 = f32[16,128,1]{2,1,0} reshape(reduce_max)
broadcast.0 = f32[16,128,1]{2,1,0} broadcast(reshape.0), dimensions={0,1,2}
reshape.1 = f32[16,128]{1,0} reshape(broadcast.0)
broadcast.1 = f32[16,128,30522]{2,1,0} broadcast(reshape.1), dimensions={0,1}
subtract = f32[16,128,30522]{2,1,0} subtract(Arg_0, broadcast.1)
exponential = f32[16,128,30522]{2,1,0} exponential(subtract)
const_zero = f32[] constant(0)
reduce_add = f32[16,128]{1,0} reduce(exponential, const_zero), dimensions={2}, to_apply=region_add
reshape.2 = f32[16,128,1]{2,1,0} reshape(reduce_add)
broadcast.2 = f32[16,128,1]{2,1,0} broadcast(reshape.2), dimensions={0,1,2}
reshape.3 = f32[16,128]{1,0} reshape(broadcast.2)
broadcast.3 = f32[16,128,30522]{2,1,0} broadcast(reshape.3), dimensions={0,1}
divide = f32[16,128,30522]{2,1,0} divide(exponential, broadcast.3)
ROOT convert = bf16[16,128,30522]{2,1,0} convert(divide)
}
)";
TestSoftmaxPatternMatching(softmax_hlo_string, 2);
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/onednn_softmax.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/tests/onednn_softmax_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a8637437-dc05-47df-af5a-2291897bf2a2 | cpp | tensorflow/tensorflow | onednn_layer_norm | third_party/xla/xla/service/cpu/onednn_layer_norm.cc | third_party/xla/xla/service/cpu/tests/onednn_layer_norm_test.cc | #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_layer_norm.h"
#include <algorithm>
#include <cmath>
#include <initializer_list>
#include <vector>
#define EIGEN_USE_THREADS
#include "absl/base/dynamic_annotations.h"
#include "dnnl.hpp"
#include "xla/executable_run_options.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/onednn_config.pb.h"
#include "xla/service/cpu/onednn_memory_util.h"
#include "xla/service/cpu/runtime_lightweight_check.h"
#include "xla/tsl/util/onednn_threadpool.h"
#include "unsupported/Eigen/CXX11/Tensor"
namespace xla {
namespace cpu {
namespace {
using dnnl::engine;
using dnnl::layer_normalization_forward;
using dnnl::memory;
using dnnl::normalization_flags;
using dnnl::prop_kind;
using dnnl::stream;
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_OneDnnLayerNorm(
void* result, void** args) {
int arg_indx = 1;
const xla::ExecutableRunOptions* run_options =
static_cast<const xla::ExecutableRunOptions*>(args[arg_indx++]);
XLA_LIGHTWEIGHT_CHECK(run_options != nullptr);
XLA_LIGHTWEIGHT_CHECK(run_options->intra_op_thread_pool() != nullptr);
tsl::OneDnnThreadPool thread_pool(
run_options->intra_op_thread_pool()->getPool(), false);
engine cpu_engine(engine::kind::cpu, 0);
#ifndef ENABLE_ONEDNN_OPENMP
auto onednn_stream =
stream(dnnl::threadpool_interop::make_stream(cpu_engine, &thread_pool));
#else
auto onednn_stream = stream(cpu_engine);
#endif
std::string config_str(static_cast<const char*>(args[arg_indx++]));
OneDnnNormConfig ln_config;
ln_config.ParseFromString(config_str);
MemrefInfo layer_minfo(args[arg_indx++]);
MemrefInfo gamma_minfo(args[arg_indx++]);
MemrefInfo beta_minfo(args[arg_indx++]);
MemrefInfo result_minfo(result);
auto src_md = layer_minfo.GetOneDnnMemDesc();
auto dst_md = result_minfo.GetOneDnnMemDesc();
auto scaleshift_md = beta_minfo.GetOneDnnMemDesc();
auto src_mem = memory(src_md, cpu_engine, layer_minfo.Data());
auto dst_mem = memory(dst_md, cpu_engine, result_minfo.Data());
auto scale_mem = memory(scaleshift_md, cpu_engine, gamma_minfo.Data());
auto shift_mem = memory(scaleshift_md, cpu_engine, beta_minfo.Data());
float epsilon;
*(reinterpret_cast<int32_t*>(&epsilon)) = ln_config.epsilon_typecast();
auto lnorm_pd = layer_normalization_forward::primitive_desc(
cpu_engine, prop_kind::forward_inference, src_md, dst_md, epsilon,
normalization_flags::use_scale | normalization_flags::use_shift);
auto lnorm_prim = layer_normalization_forward(lnorm_pd);
std::unordered_map<int, memory> ln_args;
ln_args.insert({DNNL_ARG_SRC, src_mem});
ln_args.insert({DNNL_ARG_SCALE, scale_mem});
ln_args.insert({DNNL_ARG_SHIFT, shift_mem});
ln_args.insert({DNNL_ARG_DST, dst_mem});
lnorm_prim.execute(onednn_stream, ln_args);
}
}
}
#endif | #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class LayerNormTest : public HloTestBase {
protected:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_cpu_use_thunk_runtime(false);
return debug_options;
}
const char* onednn_layer_norm_ =
R"(
; CHECK: custom_call_target="__onednn$layernorm",
; CHECK: backend_config={
; CHECK-DAG: "onednn_layer_norm_config":{
; CHECK-DAG: "rescale":"SCALE_AND_SHIFT"
; CHECK-DAG: }
; CHECK: }
)";
std::string common_hlo_region_ =
R"(
region_add {
Arg_0.7555 = f32[] parameter(0)
Arg_1.7556 = f32[] parameter(1)
ROOT add.7557 = f32[] add(Arg_0.7555, Arg_1.7556)
}
)";
std::string common_hlo_entry_computation_block_ =
R"(
Arg_0.2 = f32[768]{0} parameter(1), sharding={replicated}
Arg_0.3 = f32[768]{0} parameter(2), sharding={replicated}
convert.290 = f32[84,197,768]{2,1,0} convert(Arg_0.1)
constant.291 = f32[] constant(0)
convert.292 = f32[] convert(constant.291)
reduce.297 = f32[84,197]{1,0} reduce(convert.290, convert.292), dimensions={2}, to_apply=region_add
constant.298 = s32[] constant(768)
convert.299 = f32[] convert(constant.298)
broadcast.300 = f32[84,197]{1,0} broadcast(convert.299), dimensions={}
divide.301 = f32[84,197]{1,0} divide(reduce.297, broadcast.300)
convert.302 = f32[84,197]{1,0} convert(divide.301)
reshape.303 = f32[84,197,1]{2,1,0} reshape(convert.302)
reshape.304 = f32[84,197]{1,0} reshape(reshape.303)
broadcast.305 = f32[84,197,768]{2,1,0} broadcast(reshape.304), dimensions={0,1}
subtract.306 = f32[84,197,768]{2,1,0} subtract(Arg_0.1, broadcast.305)
multiply.307 = f32[84,197,768]{2,1,0} multiply(subtract.306, subtract.306)
convert.308 = f32[84,197,768]{2,1,0} convert(multiply.307)
constant.309 = f32[] constant(0)
convert.310 = f32[] convert(constant.309)
reduce.315 = f32[84,197]{1,0} reduce(convert.308, convert.310), dimensions={2}, to_apply=region_add
constant.316 = s32[] constant(768)
convert.317 = f32[] convert(constant.316)
broadcast.318 = f32[84,197]{1,0} broadcast(convert.317), dimensions={}
divide.319 = f32[84,197]{1,0} divide(reduce.315, broadcast.318)
convert.320 = f32[84,197]{1,0} convert(divide.319)
reshape.321 = f32[84,197,1]{2,1,0} reshape(convert.320)
constant.322 = f32[] constant(1e-12)
broadcast.323 = f32[84,197,1]{2,1,0} broadcast(constant.322), dimensions={}
add.324 = f32[84,197,1]{2,1,0} add(reshape.321, broadcast.323)
rsqrt.325 = f32[84,197,1]{2,1,0} rsqrt(add.324)
reshape.328 = f32[84,197]{1,0} reshape(rsqrt.325)
broadcast.329 = f32[84,197,768]{2,1,0} broadcast(reshape.328), dimensions={0,1}
broadcast.327 = f32[84,197,768]{2,1,0} broadcast(Arg_0.2), dimensions={2}
multiply.330 = f32[84,197,768]{2,1,0} multiply(broadcast.329, broadcast.327)
multiply.331 = f32[84,197,768]{2,1,0} multiply(Arg_0.1, multiply.330)
broadcast.336 = f32[84,197,768]{2,1,0} broadcast(Arg_0.3), dimensions={2}
reshape.332 = f32[84,197]{1,0} reshape(reshape.303)
broadcast.333 = f32[84,197,768]{2,1,0} broadcast(reshape.332), dimensions={0,1}
multiply.334 = f32[84,197,768]{2,1,0} multiply(multiply.330, broadcast.333)
subtract.337 = f32[84,197,768]{2,1,0} subtract(broadcast.336, multiply.334)
)";
};
TEST_F(LayerNormTest, LayerNormTest0_FP32) {
std::string layer_norm_module_str =
R"(HloModule layer_norm.test, entry_computation_layout={(f32[84,197,768]{2,1,0}, f32[768]{0}, f32[768]{0})->f32[84,197,768]{2,1,0}})" +
common_hlo_region_ + R"(
ENTRY main {
Arg_0.1 = f32[84,197,768]{2,1,0} parameter(0), sharding={replicated}
)" + common_hlo_entry_computation_block_ +
R"(
ROOT add.338 = f32[84,197,768]{2,1,0} add(multiply.331, subtract.337)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
TEST_F(LayerNormTest, LayerNormTest0_BF16) {
if (!xla::cpu::IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
std::string layer_norm_module_str =
R"(HloModule layer_norm.test, entry_computation_layout={(bf16[84,197,768]{2,1,0}, f32[768]{0}, f32[768]{0})->bf16[84,197,768]{2,1,0}})" +
common_hlo_region_ + R"(
ENTRY main {
Arg_0.1.0 = bf16[84,197,768]{2,1,0} parameter(0), sharding={replicated}
Arg_0.1 = f32[84,197,768]{2,1,0} convert(Arg_0.1.0)
)" + common_hlo_entry_computation_block_ +
R"(
add.338 = f32[84,197,768]{2,1,0} add(multiply.331, subtract.337)
ROOT convert.339 = bf16[84,197,768]{2,1,0} convert(add.338)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
TEST_F(LayerNormTest, LayerNormTest0_F16) {
if (!xla::cpu::IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
std::string layer_norm_module_str =
R"(HloModule layer_norm.test, entry_computation_layout={(f16[84,197,768]{2,1,0}, f32[768]{0}, f32[768]{0})->f16[84,197,768]{2,1,0}})" +
common_hlo_region_ + R"(
ENTRY main {
Arg_0.1.0 = f16[84,197,768]{2,1,0} parameter(0), sharding={replicated}
Arg_0.1 = f32[84,197,768]{2,1,0} convert(Arg_0.1.0)
)" + common_hlo_entry_computation_block_ +
R"(
add.338 = f32[84,197,768]{2,1,0} add(multiply.331, subtract.337)
ROOT convert.339 = f16[84,197,768]{2,1,0} convert(add.338)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
TEST_F(LayerNormTest, LayerNormTest1_F16) {
if (!xla::cpu::IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* layer_norm_module_str = R"(
HloModule layer_norm.test
region_add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add_0 = f32[] add(Arg_0, Arg_1)
}
ENTRY main {
Arg_2 = f16[2,4,8] parameter(0), sharding={replicated}
convert_0 = f32[2,4,8] convert(Arg_2)
constant_0 = f32[] constant(0)
convert_1 = f32[] convert(constant_0)
reduce_0 = f32[2,4] reduce(convert_0, convert_1), dimensions={2}, to_apply=region_add
constant_1 = s32[] constant(8)
convert_2 = f32[] convert(constant_1)
broadcast_0 = f32[2,4] broadcast(convert_2), dimensions={}
divide_0 = f32[2,4] divide(reduce_0, broadcast_0)
convert_3 = f16[2,4] convert(divide_0)
reshape_0 = f16[2,4,1] reshape(convert_3)
reshape_1 = f16[2,4] reshape(reshape_0)
broadcast_1 = f16[2,4,8] broadcast(reshape_1), dimensions={0,1}
subtract_0 = f16[2,4,8] subtract(Arg_2, broadcast_1)
multiply_0 = f16[2,4,8] multiply(subtract_0, subtract_0)
convert_4 = f32[2,4,8] convert(multiply_0)
constant_2 = f32[] constant(0)
convert_5 = f32[] convert(constant_2)
reduce_2 = f32[2,4] reduce(convert_4, convert_5), dimensions={2}, to_apply=region_add
constant_3 = s32[] constant(8)
convert_6 = f32[] convert(constant_3)
broadcast_2 = f32[2,4] broadcast(convert_6), dimensions={}
divide_1 = f32[2,4] divide(reduce_2, broadcast_2)
convert_7 = f16[2,4] convert(divide_1)
reshape_2 = f16[2,4,1] reshape(convert_7)
rsqrt_0 = f16[2,4,1] rsqrt(reshape_2)
reshape_3 = f16[2,4] reshape(rsqrt_0)
broadcast_3 = f16[2,4,8] broadcast(reshape_3), dimensions={0,1}
constant_4 = f16[8]{0} constant({1,1,1,1,1,1,1,1})
broadcast_4 = f16[2,4,8] broadcast(constant_4), dimensions={2}
multiply_1 = f16[2,4,8] multiply(broadcast_3, broadcast_4)
multiply_2 = f16[2,4,8] multiply(Arg_2, multiply_1)
constant_5 = f16[8]{0} constant({1,1,1,1,1,1,1,1})
broadcast_5 = f16[2,4,8] broadcast(constant_5), dimensions={2}
reshape_4 = f16[2,4] reshape(reshape_0)
broadcast_6 = f16[2,4,8] broadcast(reshape_4), dimensions={0,1}
multiply_3 = f16[2,4,8] multiply(multiply_1, broadcast_6)
subtract_1 = f16[2,4,8] subtract(broadcast_5, multiply_3)
ROOT add_1 = f16[2,4,8] add(multiply_2, subtract_1)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
TEST_F(LayerNormTest, LayerNormTest2_F16) {
if (!xla::cpu::IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* layer_norm_module_str = R"(
HloModule layer_norm.test
region_add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add_0 = f32[] add(Arg_0, Arg_1)
}
ENTRY main {
Arg_2 = f16[2,4,8] parameter(0), sharding={replicated}
convert_0 = f32[2,4,8] convert(Arg_2)
constant_0 = f32[] constant(0)
convert_1 = f32[] convert(constant_0)
reduce_0 = f32[2,4] reduce(convert_0, convert_1), dimensions={2}, to_apply=region_add
constant_1 = s32[] constant(8)
convert_2 = f32[] convert(constant_1)
broadcast_0 = f32[2,4] broadcast(convert_2), dimensions={}
divide_0 = f32[2,4] divide(reduce_0, broadcast_0)
convert_3 = f16[2,4] convert(divide_0)
reshape_0 = f16[2,4,1] reshape(convert_3)
reshape_1 = f16[2,4] reshape(reshape_0)
broadcast_1 = f16[2,4,8] broadcast(reshape_1), dimensions={0,1}
subtract_0 = f16[2,4,8] subtract(broadcast_1, Arg_2)
multiply_0 = f16[2,4,8] multiply(subtract_0, subtract_0)
convert_4 = f32[2,4,8] convert(multiply_0)
constant_2 = f32[] constant(0)
convert_5 = f32[] convert(constant_2)
reduce_1 = f32[2,4] reduce(convert_4, convert_5), dimensions={2}, to_apply=region_add
constant_3 = s32[] constant(8)
convert_6 = f32[] convert(constant_3)
broadcast_2 = f32[2,4] broadcast(convert_6), dimensions={}
divide_1 = f32[2,4] divide(reduce_1, broadcast_2)
convert_7 = f16[2,4] convert(divide_1)
reshape_2 = f16[2,4,1] reshape(convert_7)
rsqrt_0 = f16[2,4,1] rsqrt(reshape_2)
reshape_3 = f16[2,4] reshape(rsqrt_0)
broadcast_3 = f16[2,4,8] broadcast(reshape_3), dimensions={0,1}
constant_4 = f16[8] constant({1,1,1,1,1,1,1,1})
broadcast_4 = f16[2,4,8] broadcast(constant_4), dimensions={2}
multiply_1 = f16[2,4,8] multiply(broadcast_3, broadcast_4)
multiply_2 = f16[2,4,8] multiply(multiply_1, Arg_2)
constant_5 = f16[8] constant({1,1,1,1,1,1,1,1})
broadcast_5 = f16[2,4,8] broadcast(constant_5), dimensions={2}
reshape_4 = f16[2,4] reshape(reshape_0)
broadcast_6 = f16[2,4,8] broadcast(reshape_4), dimensions={0,1}
multiply_3 = f16[2,4,8] multiply(multiply_1, broadcast_6)
subtract_1 = f16[2,4,8] subtract(broadcast_5, multiply_3)
ROOT add_1 = f16[2,4,8] add(multiply_2, subtract_1)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
TEST_F(LayerNormTest, LayerNormTest1_BF16) {
if (!xla::cpu::IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* layer_norm_module_str = R"(
HloModule layer_norm.test
region_add {
Arg_0.7555 = f32[] parameter(0)
Arg_1.7556 = f32[] parameter(1)
ROOT add.7557 = f32[] add(Arg_0.7555, Arg_1.7556)
}
ENTRY main {
Arg_0.1 = bf16[160,197,768] parameter(0), sharding={replicated}
Arg_0.2 = bf16[768] parameter(1), sharding={replicated}
Arg_0.3 = bf16[768] parameter(2), sharding={replicated}
convert.80 = f32[160,197,768] convert(Arg_0.1)
constant.81 = f32[] constant(0)
convert.82 = f32[] convert(constant.81)
reduce.87 = f32[160,197] reduce(convert.80, convert.82), dimensions={2}, to_apply=region_add
constant.88 = s32[] constant(768)
convert.89 = f32[] convert(constant.88)
broadcast.90 = f32[160,197] broadcast(convert.89), dimensions={}
divide.91 = f32[160,197] divide(reduce.87, broadcast.90)
convert.92 = bf16[160,197] convert(divide.91)
reshape.93 = bf16[160,197,1] reshape(convert.92)
reshape.94 = bf16[160,197] reshape(reshape.93)
broadcast.95 = bf16[160,197,768] broadcast(reshape.94), dimensions={0,1}
subtract.96 = bf16[160,197,768] subtract(Arg_0.1, broadcast.95)
multiply.97 = bf16[160,197,768] multiply(subtract.96, subtract.96)
convert.98 = f32[160,197,768] convert(multiply.97)
constant.99 = f32[] constant(0)
convert.100 = f32[] convert(constant.99)
reduce.105 = f32[160,197] reduce(convert.98, convert.100), dimensions={2}, to_apply=region_add
constant.106 = s32[] constant(768)
convert.107 = f32[] convert(constant.106)
broadcast.108 = f32[160,197] broadcast(convert.107), dimensions={}
divide.109 = f32[160,197] divide(reduce.105, broadcast.108)
convert.110 = bf16[160,197] convert(divide.109)
reshape.111 = bf16[160,197,1] reshape(convert.110)
constant.112 = bf16[] constant(1.002e-12)
broadcast.113 = bf16[160,197,1] broadcast(constant.112), dimensions={}
add.114 = bf16[160,197,1] add(reshape.111, broadcast.113)
rsqrt.115 = bf16[160,197,1] rsqrt(add.114)
reshape.118 = bf16[160,197] reshape(rsqrt.115)
broadcast.119 = bf16[160,197,768] broadcast(reshape.118), dimensions={0,1}
broadcast.117 = bf16[160,197,768] broadcast(Arg_0.2), dimensions={2}
multiply.120 = bf16[160,197,768] multiply(broadcast.119, broadcast.117)
multiply.121 = bf16[160,197,768] multiply(Arg_0.1, multiply.120)
broadcast.126 = bf16[160,197,768] broadcast(Arg_0.3), dimensions={2}
reshape.122 = bf16[160,197] reshape(reshape.93)
broadcast.123 = bf16[160,197,768] broadcast(reshape.122), dimensions={0,1}
multiply.124 = bf16[160,197,768] multiply(multiply.120, broadcast.123)
subtract.127 = bf16[160,197,768] subtract(broadcast.126, multiply.124)
ROOT add.128 = bf16[160,197,768] add(multiply.121, subtract.127)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/onednn_layer_norm.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/tests/onednn_layer_norm_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
16a28cdd-9ae5-45f7-b5be-371ad5429024 | cpp | tensorflow/tensorflow | runtime_topk | third_party/xla/xla/service/cpu/runtime_topk.cc | third_party/xla/xla/tests/runtime_topk_test.cc | #include "xla/service/cpu/runtime_topk.h"
#include <algorithm>
#include <cstdint>
#include <cstring>
#include <limits>
#include <numeric>
#include <vector>
#include "absl/base/casts.h"
#include "absl/base/dynamic_annotations.h"
template <typename T>
static void TopK(int64_t batch_size, int64_t input_size, int64_t k,
const T* values, T* out_values, int32_t* out_indices) {
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(values,
input_size * batch_size * sizeof(T));
static constexpr auto convert_to_int = [](T value) {
uint32_t x = absl::bit_cast<uint32_t>(value);
return static_cast<int32_t>(x) < 0 ? std::numeric_limits<int32_t>::max() - x
: x;
};
std::vector<int32_t> temp_indices(input_size);
for (int64_t batch = 0; batch != batch_size; ++batch) {
std::iota(temp_indices.begin(), temp_indices.end(), 0);
const T* values_batch = values + batch * input_size;
auto kth_element = temp_indices.begin() + k;
std::partial_sort(temp_indices.begin(), kth_element, temp_indices.end(),
[values_batch](size_t i1, size_t i2) {
int32_t v1 = convert_to_int(values_batch[i1]);
int32_t v2 = convert_to_int(values_batch[i2]);
if (v1 == v2) {
return i1 < i2;
}
return v1 > v2;
});
T* out_values_batch = out_values + batch * k;
int32_t* out_indices_batch = out_indices + batch * k;
std::copy(temp_indices.begin(), kth_element, out_indices_batch);
for (int64_t i = 0; i < k; i++) {
out_values_batch[i] = values_batch[temp_indices[i]];
}
}
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_TopKF32(
int64_t batch_size, int64_t input_size, int64_t k, const float* values,
float* out_values, int32_t* out_indices) {
TopK(batch_size, input_size, k, values, out_values, out_indices);
} | #include <string_view>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_macros.h"
#include "tsl/platform/statusor.h"
namespace xla::cpu {
namespace {
class TopkTest : public HloTestBase {};
XLA_TEST_F(TopkTest, CustomCallTarget) {
std::string_view hlo_text_module = R"(
HloModule topk
ENTRY TopK {
x = f32[10,10] parameter(0)
ROOT topk = (f32[10,3], s32[10,3]) custom-call(x), custom_call_target="TopK"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_text_module));
auto input =
LiteralUtil::CreateR2<float>({{98, 21, 67, 27, 54, 67, 98, 84, 9, 62},
{65, 68, 49, 3, 9, 0, 52, 78, 36, 96},
{44, 50, 35, 62, 33, 19, 37, 26, 23, 90},
{34, 55, 10, 98, 19, 35, 11, 77, 25, 1},
{87, 19, 15, 98, 35, 90, 64, 60, 80, 12},
{8, 11, 77, 52, 76, 33, 39, 55, 74, 96},
{75, 69, 2, 85, 85, 65, 48, 29, 91, 25},
{26, 4, 76, 48, 88, 96, 71, 2, 58, 68},
{42, 90, 38, 86, 18, 0, 22, 28, 1, 39},
{90, 34, 63, 92, 30, 54, 3, 98, 85, 4}});
TF_ASSERT_OK_AND_ASSIGN(auto result, Execute(std::move(module), {&input}));
std::vector<Literal> results = result.DecomposeTuple();
ASSERT_EQ(results.size(), 2);
LiteralTestUtil::ExpectR2Equal<float>({{98, 98, 84},
{96, 78, 68},
{90, 62, 50},
{98, 77, 55},
{98, 90, 87},
{96, 77, 76},
{91, 85, 85},
{96, 88, 76},
{90, 86, 42},
{98, 92, 90}},
results[0]);
LiteralTestUtil::ExpectR2Equal({{0, 6, 7},
{9, 7, 1},
{9, 3, 1},
{3, 7, 1},
{3, 5, 0},
{9, 2, 4},
{8, 3, 4},
{5, 4, 2},
{1, 3, 0},
{7, 3, 0}},
results[1]);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/runtime_topk.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/runtime_topk_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
07a76b80-b96d-4919-abec-99658cb925d4 | cpp | tensorflow/tensorflow | onednn_matmul | third_party/xla/xla/service/cpu/onednn_matmul.cc | third_party/xla/xla/service/cpu/tests/onednn_matmul_test.cc | #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_matmul.h"
#include <algorithm>
#include <cmath>
#include <cstring>
#include <initializer_list>
#include <iterator>
#include <utility>
#include <vector>
#include "absl/base/dynamic_annotations.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "dnnl.hpp"
#include "xla/executable_run_options.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/onednn_config.pb.h"
#include "xla/service/cpu/onednn_memory_util.h"
#include "xla/service/cpu/onednn_util.h"
#include "xla/service/cpu/runtime_lightweight_check.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tsl/util/onednn_threadpool.h"
#include "tsl/platform/logging.h"
#define EIGEN_USE_THREADS
namespace xla {
namespace cpu {
namespace {
using dnnl::engine;
using dnnl::matmul;
using dnnl::memory;
using dnnl::stream;
dnnl::memory::desc OneDnnMatMulOptWeightsDesc(
const dnnl::engine& engine, const dnnl::memory::desc& input_md,
const dnnl::memory::desc& weights_md, const dnnl::memory::desc& bias_md,
const dnnl::memory::desc& output_md) {
auto weights_any_md =
memory::desc(weights_md.get_dims(), weights_md.get_data_type(),
dnnl::memory::format_tag::any);
auto matmul_pd = matmul::primitive_desc(engine, input_md, weights_any_md,
bias_md, output_md);
return matmul_pd.weights_desc();
}
dnnl::memory::desc OneDnnMatMulOptWeightsDesc(
const dnnl::engine& engine, const Shape& input_shape,
const Shape& weights_shape, const Shape& bias_shape,
const Shape& output_shape, const OneDnnMatMulConfig* matmul_config) {
auto input_md = ShapeToMemDesc(input_shape);
auto weights_md = ShapeToMemDesc(weights_shape);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config->transpose_a(), input_md);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config->transpose_b(), weights_md);
auto bias_md = absl::c_count(matmul_config->fusions().ops(),
OneDnnFusionConfig::BIAS) > 0
? ShapeToMemDesc(bias_shape)
: dnnl::memory::desc{};
auto output_md = ShapeToMemDesc(output_shape);
auto missed_rank = output_md.get_ndims() - bias_md.get_ndims();
XLA_LIGHTWEIGHT_CHECK(missed_rank >= 0);
if (!bias_md.is_zero() && missed_rank > 0) {
auto bias_dims = bias_md.get_dims();
bias_dims.insert(bias_dims.begin(), missed_rank, 1);
bias_md = bias_md.reshape(bias_dims);
}
return OneDnnMatMulOptWeightsDesc(engine, input_md, weights_md, bias_md,
output_md);
}
}
Shape OneDnnMatMulOptWeightsShape(const Shape& input_shape,
const Shape& weights_shape,
const Shape& bias_shape,
const Shape& output_shape,
const OneDnnMatMulConfig* matmul_config) {
engine cpu_engine(engine::kind::cpu, 0);
auto optimized_weights_md =
OneDnnMatMulOptWeightsDesc(cpu_engine, input_shape, weights_shape,
bias_shape, output_shape, matmul_config);
return MemDescToXlaShapeFlattened(optimized_weights_md);
}
struct FusedOperandsRef {
const std::vector<void*>& bufs;
std::vector<std::pair<int, dnnl::memory>>& postop_args;
};
std::unique_ptr<matmul::primitive_desc> CreateMatMulPrimDesc(
const engine& cpu_engine, const memory::desc& input_md,
const memory::desc& plain_weights_md, const memory::desc& output_md,
const std::vector<memory::desc>& fused_mds,
const OneDnnMatMulConfig& matmul_config,
FusedOperandsRef* fused_operands_ref = nullptr) {
auto bias_md = memory::desc();
bool weights_packed = matmul_config.optimization_config().weights_prepacked();
auto weights_md = plain_weights_md;
if (weights_packed) {
weights_md = memory::desc(weights_md.get_dims(), weights_md.get_data_type(),
memory::format_tag::any);
}
dnnl::post_ops post_ops;
int fused_operand_idx = 0;
for (auto& fused_op : matmul_config.fusions().ops()) {
switch (fused_op) {
case OneDnnFusionConfig::RELU:
post_ops.append_eltwise(dnnl::algorithm::eltwise_relu, 0.f, 0.f);
break;
case OneDnnFusionConfig::TANH:
post_ops.append_eltwise(dnnl::algorithm::eltwise_tanh, 0.f, 0.f);
break;
case OneDnnFusionConfig::GELU_TANH:
post_ops.append_eltwise(dnnl::algorithm::eltwise_gelu_tanh, 0.f, 0.f);
break;
case OneDnnFusionConfig::GELU_ERF:
post_ops.append_eltwise(dnnl::algorithm::eltwise_gelu_erf, 0.f, 0.f);
break;
case OneDnnFusionConfig::RELU6:
post_ops.append_eltwise(dnnl::algorithm::eltwise_clip_v2, 0.f, 6.0f);
break;
case OneDnnFusionConfig::SIGMOID:
post_ops.append_eltwise(dnnl::algorithm::eltwise_logistic, 0.f, 0.f);
break;
case OneDnnFusionConfig::BIAS: {
bias_md = fused_mds.at(fused_operand_idx);
auto missed_rank = output_md.get_ndims() - bias_md.get_ndims();
XLA_LIGHTWEIGHT_CHECK(missed_rank >= 0);
if (missed_rank > 0) {
auto bias_dims = bias_md.get_dims();
bias_dims.insert(bias_dims.begin(), missed_rank, 1);
bias_md = bias_md.reshape(bias_dims);
}
if (fused_operands_ref) {
fused_operands_ref->postop_args.emplace_back(
DNNL_ARG_BIAS,
dnnl::memory(bias_md, cpu_engine,
fused_operands_ref->bufs[fused_operand_idx]));
}
fused_operand_idx++;
} break;
case OneDnnFusionConfig::ELU:
post_ops.append_eltwise(dnnl::algorithm::eltwise_elu, 1.0f, 0.0f);
break;
case OneDnnFusionConfig::BINARY_ADD: {
auto binary_md = fused_mds.at(fused_operand_idx);
auto missed_rank = output_md.get_ndims() - binary_md.get_ndims();
XLA_LIGHTWEIGHT_CHECK(missed_rank >= 0);
if (missed_rank > 0) {
auto binary_dims = binary_md.get_dims();
binary_dims.insert(binary_dims.begin(), missed_rank, 1);
binary_md = binary_md.reshape(binary_dims);
}
if (fused_operands_ref) {
auto arg_idx =
DNNL_ARG_ATTR_MULTIPLE_POST_OP(post_ops.len()) | DNNL_ARG_SRC_1;
fused_operands_ref->postop_args.emplace_back(
arg_idx,
dnnl::memory(binary_md, cpu_engine,
fused_operands_ref->bufs[fused_operand_idx]));
}
post_ops.append_binary(dnnl::algorithm::binary_add, binary_md);
fused_operand_idx++;
} break;
case OneDnnFusionConfig::LINEAR: {
float const_float;
*(reinterpret_cast<int32_t*>(&const_float)) =
matmul_config.fusions().alpha_typecast();
post_ops.append_eltwise(dnnl::algorithm::eltwise_linear, const_float,
0.f);
} break;
default:
LOG(FATAL) << __FILE__ << ":" << __LINE__
<< " Attempt to call OneDNN MatMul runtime library with "
"unsupported post op."
<< std::endl;
}
}
dnnl::primitive_attr attrs;
if (matmul_config.optimization_config().user_scratchpad()) {
attrs.set_scratchpad_mode(dnnl::scratchpad_mode::user);
}
if (post_ops.len() > 0) {
attrs.set_post_ops(post_ops);
}
return std::make_unique<matmul::primitive_desc>(
cpu_engine, input_md, weights_md, bias_md, output_md, attrs);
}
std::unique_ptr<matmul::primitive_desc> CreateMatMulPrimDesc(
const Shape& input_shape, const Shape& weights_shape,
const Shape& output_shape, const std::vector<Shape>& fused_shapes,
const OneDnnMatMulConfig& matmul_config) {
auto input_md = ShapeToMemDesc(input_shape);
auto weights_md = ShapeToMemDesc(weights_shape);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config.transpose_a(), input_md);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config.transpose_b(), weights_md);
auto output_md = ShapeToMemDesc(output_shape);
std::vector<memory::desc> fused_mds;
std::transform(fused_shapes.begin(), fused_shapes.end(),
std::back_inserter(fused_mds),
[](const Shape& shape) { return ShapeToMemDesc(shape); });
return CreateMatMulPrimDesc(engine(engine::kind::cpu, 0), input_md,
weights_md, output_md, fused_mds, matmul_config);
}
template <>
typename PrimitiveTrait<kOnednnMatmulConfig>::pointer_type
GetKernelConfig<kOnednnMatmulConfig>(
absl::StatusOr<BackendConfig>* backend_config) {
return (*backend_config)->mutable_onednn_matmul_config();
}
template <>
std::unique_ptr<dnnl::matmul::primitive_desc>
CreateOneDnnPrimDesc<dnnl::matmul::primitive_desc>(HloInstruction* instr) {
if (instr->opcode() != HloOpcode::kCustomCall) {
return nullptr;
}
auto custom_call = Cast<xla::HloCustomCallInstruction>(instr);
auto backend_config = custom_call->backend_config<BackendConfig>();
if (!backend_config.ok()) {
return nullptr;
}
auto& matmul_config = backend_config.value().onednn_matmul_config();
auto operands = custom_call->operands();
auto input = operands[0];
auto weight = operands[1];
auto input_shape = input->shape();
auto weight_shape = weight->shape();
auto output_shape = custom_call->shape().IsTuple()
? custom_call->shape().tuple_shapes(0)
: custom_call->shape();
auto fused_operands =
HloInstruction::InstructionVector(operands.begin() + 2, operands.end());
std::vector<Shape> fused_shapes;
std::transform(fused_operands.begin(), fused_operands.end(),
std::back_inserter(fused_shapes),
[](const HloInstruction* instr) { return instr->shape(); });
return CreateMatMulPrimDesc(input_shape, weight_shape, output_shape,
fused_shapes, matmul_config);
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_OneDnnMatMul(
void* result, void* scratch, void** args) {
int arg_indx = 0;
const int64_t num_args = *(static_cast<int64_t*>(args[arg_indx++]));
const xla::ExecutableRunOptions* run_options =
static_cast<const xla::ExecutableRunOptions*>(args[arg_indx++]);
auto thread_pool = CreateOneDnnThreadPool(
run_options ? run_options->intra_op_thread_pool() : nullptr);
engine cpu_engine(engine::kind::cpu, 0);
auto onednn_stream = MakeOneDnnStream(cpu_engine, thread_pool.get());
std::string config_str(static_cast<const char*>(args[arg_indx++]));
OneDnnMatMulConfig matmul_config;
matmul_config.ParseFromString(config_str);
MemrefInfo input_minfo(args[arg_indx++]);
MemrefInfo weights_minfo(args[arg_indx++]);
MemrefInfo output_minfo(result);
auto input_md = input_minfo.GetOneDnnMemDesc();
auto weights_md = weights_minfo.GetOneDnnMemDesc();
TRANSPOSE_LAST_TWO_DIMS_IF(
matmul_config.transpose_a() && input_md.get_ndims() > 1, input_md);
TRANSPOSE_LAST_TWO_DIMS_IF(
matmul_config.transpose_b() && weights_md.get_ndims() > 1, weights_md);
auto output_md = output_minfo.GetOneDnnMemDesc();
if (matmul_config.optimization_config().weights_prepacked()) {
weights_md =
memory::desc({input_md.get_dims().back(), output_md.get_dims().back()},
weights_md.get_data_type(), memory::format_tag::ab);
}
const int64_t num_fused_operands = num_args - arg_indx;
std::vector<memory::desc> fused_mds;
std::vector<void*> fused_bufs;
for (int64_t i = 0; i < num_fused_operands; ++i) {
MemrefInfo operand_minfo(args[arg_indx++]);
fused_mds.push_back(operand_minfo.GetOneDnnMemDesc());
fused_bufs.push_back(operand_minfo.Data());
}
std::vector<std::pair<int, dnnl::memory>> postop_args;
FusedOperandsRef fused_operands_ref{fused_bufs, postop_args};
auto matmul_pd =
CreateMatMulPrimDesc(cpu_engine, input_md, weights_md, output_md,
fused_mds, matmul_config, &fused_operands_ref);
XLA_LIGHTWEIGHT_CHECK(num_args == arg_indx);
auto lhs_mem = memory(input_md, cpu_engine, input_minfo.Data());
auto rhs_mem =
memory(matmul_pd->weights_desc(), cpu_engine, weights_minfo.Data());
auto result_mem = memory(output_md, cpu_engine, output_minfo.Data());
if (std::strstr(matmul_pd->impl_info_str(), "ref") != nullptr) {
LOG(WARNING) << "[Perf]: MatMul reference implementation being executed";
}
auto matmul_prim = matmul(*matmul_pd);
std::unordered_map<int, memory> matmul_args{{DNNL_ARG_SRC, lhs_mem},
{DNNL_ARG_WEIGHTS, rhs_mem},
{DNNL_ARG_DST, result_mem}};
if (matmul_config.optimization_config().user_scratchpad()) {
XLA_LIGHTWEIGHT_CHECK(scratch != nullptr);
MemrefInfo scratch_minfo(scratch);
auto scratchpad_md = matmul_pd->scratchpad_desc();
auto scratch_mem = memory(scratchpad_md, cpu_engine, scratch_minfo.Data());
matmul_args.insert({DNNL_ARG_SCRATCHPAD, scratch_mem});
}
matmul_args.insert(postop_args.begin(), postop_args.end());
matmul_prim.execute(onednn_stream, matmul_args);
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_OneDnnMatMulReorder(
void* result, void** args) {
int arg_indx = 0;
const int64_t num_args = *(static_cast<int64_t*>(args[arg_indx++]));
const xla::ExecutableRunOptions* run_options =
static_cast<const xla::ExecutableRunOptions*>(args[arg_indx++]);
auto thread_pool = CreateOneDnnThreadPool(
run_options ? run_options->intra_op_thread_pool() : nullptr);
engine cpu_engine(engine::kind::cpu, 0);
auto onednn_stream = MakeOneDnnStream(cpu_engine, thread_pool.get());
std::string config_str(static_cast<const char*>(args[arg_indx++]));
OneDnnMatMulConfig matmul_config;
matmul_config.ParseFromString(config_str);
MemrefInfo input_minfo(args[arg_indx++]);
MemrefInfo weight_minfo(args[arg_indx++]);
MemrefInfo output_minfo(args[arg_indx++]);
MemrefInfo result_minfo(result);
auto input_md = input_minfo.GetOneDnnMemDesc();
auto weight_md = weight_minfo.GetOneDnnMemDesc();
auto output_md = output_minfo.GetOneDnnMemDesc();
auto bias_md = dnnl::memory::desc{};
if (absl::c_count(matmul_config.fusions().ops(), OneDnnFusionConfig::BIAS) >
0) {
MemrefInfo bias_minfo(args[arg_indx++]);
bias_md = bias_minfo.GetOneDnnMemDesc();
}
XLA_LIGHTWEIGHT_CHECK(num_args >= arg_indx);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config.transpose_a(), input_md);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config.transpose_b(), weight_md);
if (!bias_md.is_zero()) {
auto missed_rank = output_md.get_ndims() - bias_md.get_ndims();
XLA_LIGHTWEIGHT_CHECK(missed_rank >= 0);
if (missed_rank > 0) {
auto bias_dims = bias_md.get_dims();
bias_dims.insert(bias_dims.begin(), missed_rank, 1);
bias_md = bias_md.reshape(bias_dims);
}
}
auto result_md = OneDnnMatMulOptWeightsDesc(cpu_engine, input_md, weight_md,
bias_md, output_md);
XLA_LIGHTWEIGHT_CHECK(result_minfo.GetOneDnnMemDesc().get_size() ==
result_md.get_size());
auto weight_mem = dnnl::memory{weight_md, cpu_engine, weight_minfo.Data()};
auto result_mem = dnnl::memory{result_md, cpu_engine, result_minfo.Data()};
dnnl::reorder rdr{weight_mem, result_mem};
rdr.execute(onednn_stream, weight_mem, result_mem);
onednn_stream.wait();
}
}
}
#endif | #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include <utility>
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/service/cpu/onednn_contraction_rewriter.h"
#include "xla/service/cpu/onednn_util.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
#include "tsl/platform/cpu_info.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace cpu {
class MatmulTest : public HloTestBase {
protected:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_cpu_use_thunk_runtime(false);
return debug_options;
}
const char* fused_matmul_bias_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_binary_add_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BINARY_ADD"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* matmul_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_gelu_tanh_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","GELU_TANH"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_gelu_erf_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","GELU_ERF"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_elu_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","ELU"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_tanh_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","TANH"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_relu6_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","RELU6"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_sigmoid_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","SIGMOID"]
; CHECK-DAG: }
; CHECK: }
)";
};
TEST_F(MatmulTest, SimpleTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[32,8,128,64] parameter(0), parameter_replication={false}
arg.1 = f32[32,8,64,128] parameter(1), parameter_replication={false}
ROOT onednn.matmul.0 = f32[32,8,128,128] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, SimpleTestBF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.bf16
ENTRY matmul.test.bf16 {
arg.0 = bf16[32,8,128,64] parameter(0), parameter_replication={false}
arg.1 = bf16[32,8,64,128] parameter(1), parameter_replication={false}
ROOT onednn.matmul.0 = bf16[32,8,128,128] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, SimpleTestF16) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.f16
ENTRY matmul.test.f16 {
arg.0 = f16[32,8,128,64] parameter(0), parameter_replication={false}
arg.1 = f16[32,8,64,128] parameter(1), parameter_replication={false}
ROOT onednn.matmul.0 = f16[32,8,128,128] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, SimpleTestF32TransposeB) {
const char* matmul_module_str = R"(
HloModule matmul.test.1
ENTRY matmul.test.1 {
arg.0 = f32[32,8,128,64]{3,1,2,0} parameter(0), parameter_replication={false}
arg.1 = f32[32,8,128,64]{3,1,2,0} parameter(1), parameter_replication={false}
ROOT onednn.matmul.0 = f32[32,8,128,128] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAddFusion1) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[32,32,40,30] parameter(0), parameter_replication={false}
reshape.2 = f32[32,32,40,30] reshape(arg0.1)
constant.3 = f32[] constant(1)
broadcast.4 = f32[32,32,30,40] broadcast(constant.3), dimensions={}
dot.7 = f32[32,32,40,40] dot(reshape.2, broadcast.4), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
constant.5 = f32[] constant(15)
broadcast.6 = f32[40] broadcast(constant.5), dimensions={}
broadcast.9 = f32[32,32,40,40] broadcast(broadcast.6), dimensions={3}
add.10 = f32[32,32,40,40] add(dot.7, broadcast.9)
reshape.11 = f32[32,32,40,40] reshape(add.10)
tuple.12 = (f32[32,32,40,40]) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[32,32,40,40] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_binary_add_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAddFusion2) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[400,300] parameter(0), parameter_replication={false}
reshape.2 = f32[400,300] reshape(arg0.1)
constant.3 = f32[] constant(1)
broadcast.4 = f32[300,400] broadcast(constant.3), dimensions={}
dot.7 = f32[400,400] dot(reshape.2, broadcast.4), lhs_batch_dims={}, lhs_contracting_dims={1}, rhs_batch_dims={}, rhs_contracting_dims={0}
reshape.1 = f32[400,1,400] reshape(dot.7)
constant.5 = f32[] constant(15)
broadcast.6 = f32[400] broadcast(constant.5), dimensions={}
broadcast.9 = f32[400,1,400] broadcast(broadcast.6), dimensions={2}
add.10 = f32[400,1,400] add(reshape.1, broadcast.9)
tuple.12 = (f32[400,1,400]) tuple(add.10)
ROOT get-tuple-element.13 = f32[400,1,400] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_binary_add_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAsParameter1) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[32,32,40,30] parameter(0), parameter_replication={false}
arg0.2 = f32[32,32,30,40] parameter(1), parameter_replication={false}
arg0.3 = f32[32,32,40,40] parameter(2), parameter_replication={false}
dot.7 = f32[32,32,40,40] dot(arg0.1, arg0.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
add.10 = f32[32,32,40,40] add(dot.7, arg0.3)
reshape.11 = f32[32,32,40,40] reshape(add.10)
tuple.12 = (f32[32,32,40,40]) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[32,32,40,40] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_binary_add_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAsParameter2) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[32,32,40,30] parameter(0), parameter_replication={false}
arg0.2 = f32[32,32,30,40] parameter(1), parameter_replication={false}
arg0.3 = f32[40]{0} parameter(2), parameter_replication={false}
dot.7 = f32[32,32,40,40] dot(arg0.1, arg0.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
broad.1 = f32[32,32,40,40] broadcast(arg0.3), dimensions={3}
add.10 = f32[32,32,40,40] add(dot.7, broad.1)
reshape.11 = f32[32,32,40,40] reshape(add.10)
tuple.12 = (f32[32,32,40,40]) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[32,32,40,40] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAsParameter2D) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[2,2,400,30] parameter(0), parameter_replication={false}
arg0.2 = f32[2,2,30,400] parameter(1), parameter_replication={false}
arg0.3 = f32[2,400] parameter(2), parameter_replication={false}
dot.7 = f32[2,2,400,400] dot(arg0.1, arg0.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
broad.1 = f32[2,2,400,400] broadcast(arg0.3), dimensions={0,3}
add.10 = f32[2,2,400,400] add(dot.7, broad.1)
reshape.11 = f32[2,2,400,400] reshape(add.10)
tuple.12 = (f32[2,2,400,400]) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[2,2,400,400] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_binary_add_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAsParameter2D1B) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[1,2,400,30] parameter(0), parameter_replication={false}
arg0.2 = f32[1,2,30,400] parameter(1), parameter_replication={false}
arg0.3 = f32[1,400] parameter(2), parameter_replication={false}
dot.7 = f32[1,2,400,400] dot(arg0.1, arg0.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
broad.1 = f32[1,2,400,400] broadcast(arg0.3), dimensions={0,3}
add.10 = f32[1,2,400,400] add(dot.7, broad.1)
reshape.11 = f32[1,2,400,400] reshape(add.10)
tuple.12 = (f32[1,2,400,400]) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[1,2,400,400] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAsParameter3) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[16,128,768] parameter(0), sharding={replicated}
arg0.2 = f32[768,768] parameter(1), sharding={replicated}
dot.84 = f32[16,128,768] dot(arg0.1, arg0.2), lhs_contracting_dims={2}, rhs_contracting_dims={0}
arg0.3 = f32[768]{0} parameter(2), sharding={replicated}
reshape.85 = f32[1,1,768] reshape(arg0.3)
broadcast.86 = f32[1,1,768] broadcast(reshape.85), dimensions={0,1,2}
reshape.87 = f32[768]{0} reshape(broadcast.86)
broadcast.88 = f32[16,128,768] broadcast(reshape.87), dimensions={2}
ROOT add.89 = f32[16,128,768] add(dot.84, broadcast.88)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_);
}
TEST_F(MatmulTest, SimpleTestF32TransposeBWithBiasAddFusion) {
const char* matmul_module_str = R"(
HloModule matmul.test.1
ENTRY matmul.test.1 {
arg.0 = f32[32,8,4,16]{3,1,2,0} parameter(0), parameter_replication={false}
arg.1 = f32[32,8,16,16]{3,1,2,0} parameter(1), parameter_replication={false}
dot.7 = f32[32,8,4,16]{3,2,1,0} dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}
constant.5 = f32[] constant(15)
broadcast.6 = f32[16]{0} broadcast(constant.5), dimensions={}
broadcast.9 = f32[32,8,4,16]{3,2,1,0} broadcast(broadcast.6), dimensions={3}
add.10 = f32[32,8,4,16]{3,2,1,0} add(dot.7, broadcast.9)
reshape.11 = f32[32,8,4,16]{3,2,1,0} reshape(add.10)
tuple.12 = (f32[32,8,4,16]{3,2,1,0}) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[32,8,4,16]{3,2,1,0} get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_binary_add_);
}
TEST_F(MatmulTest, F32BiasAddFusionNonCompatibleBias) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.1 {
arg.0 = f32[12288,2] parameter(0), parameter_replication={false}
arg.1 = f32[2,1024] parameter(1), parameter_replication={false}
dot.0 = f32[12288,1024] dot(arg.0, arg.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
reshape.0 = f32[32,384,1024] reshape(dot.0)
constant.0 = f32[1,384,1024] constant(15)
reshape.1 = f32[384,1024] reshape(constant.0)
broadcast.0 = f32[32,384,1024] broadcast(reshape.1), dimensions={1,2}
add.0 = f32[32,384,1024] add(reshape.0, broadcast.0)
tuple.0 = (f32[32,384,1024]) tuple(add.0)
ROOT get-tuple-element.0 = f32[32,384,1024] get-tuple-element(tuple.0), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, ApproxGELUTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[32,32,4,16] parameter(0), parameter_replication={false}
arg.1 = f32[32,32,16,32] parameter(1), parameter_replication={false}
onednn.matmul.0 = f32[32,32,4,32] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
mul.0 = f32[32,32,4,32] multiply(onednn.matmul.0, onednn.matmul.0)
mul.1 = f32[32,32,4,32] multiply(onednn.matmul.0, mul.0)
const.0 = f32[] constant(0.044715)
bcast.0 = f32[32,32,4,32] broadcast(const.0), dimensions={}
mul.2 = f32[32,32,4,32] multiply(mul.1, bcast.0)
add.0 = f32[32,32,4,32] add(onednn.matmul.0, mul.2)
const.1 = f32[] constant(0.797884583)
bcast.1 = f32[32,32,4,32] broadcast(const.1), dimensions={}
mul.3 = f32[32,32,4,32] multiply(add.0, bcast.1)
tanh = f32[32,32,4,32] tanh(mul.3)
const.2 = f32[] constant(1)
bcast.2 = f32[32,32,4,32] broadcast(const.2), dimensions={}
add.2 = f32[32,32,4,32] add(tanh, bcast.2)
const.3 = f32[] constant(0.5)
bcast.3 = f32[32,32,4,32] broadcast(const.3), dimensions={}
mul.4 = f32[32,32,4,32] multiply(add.2, bcast.3)
ROOT out = f32[32,32,4,32] multiply(onednn.matmul.0, mul.4)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["GELU_TANH"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)");
}
TEST_F(MatmulTest, BiasAndApproxGELUTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
Arg_5.6 = f32[32,32,64] parameter(0), sharding={replicated}
Arg_7.8 = f32[64,256] parameter(1), sharding={replicated}
dot.232 = f32[32,32,256] dot(Arg_5.6, Arg_7.8), lhs_contracting_dims={2}, rhs_contracting_dims={0}
Arg_6.7 = f32[256] parameter(2), sharding={replicated}
reshape.233 = f32[1,1,256] reshape(Arg_6.7)
broadcast.234 = f32[1,1,256] broadcast(reshape.233), dimensions={0,1,2}
reshape.235 = f32[256] reshape(broadcast.234)
broadcast.236 = f32[32,32,256] broadcast(reshape.235), dimensions={2}
add.237 = f32[32,32,256] add(dot.232, broadcast.236)
multiply.238 = f32[32,32,256] multiply(add.237, add.237)
multiply.239 = f32[32,32,256] multiply(add.237, multiply.238)
constant.20 = f32[] constant(0.044715)
broadcast.21 = f32[32,32,256] broadcast(constant.20), dimensions={}
multiply.240 = f32[32,32,256] multiply(multiply.239, broadcast.21)
add.241 = f32[32,32,256] add(add.237, multiply.240)
constant.18 = f32[] constant(0.797884583)
broadcast.19 = f32[32,32,256] broadcast(constant.18), dimensions={}
multiply.242 = f32[32,32,256] multiply(add.241, broadcast.19)
tanh.243 = f32[32,32,256] tanh(multiply.242)
constant.16 = f32[] constant(1)
broadcast.17 = f32[32,32,256] broadcast(constant.16), dimensions={}
add.244 = f32[32,32,256] add(tanh.243, broadcast.17)
constant.14 = f32[] constant(0.5)
broadcast.15 = f32[32,32,256] broadcast(constant.14), dimensions={}
multiply.245 = f32[32,32,256] multiply(add.244, broadcast.15)
ROOT out = f32[32,32,256] multiply(add.237, multiply.245)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_tanh_);
}
TEST_F(MatmulTest, BiasAndApproxTFGELUTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg0.1 = f32[1024,512] parameter(0), parameter_replication={false}
arg1.2 = f32[256,512] parameter(1), parameter_replication={false}
dot.7 = f32[1024,256] dot(arg0.1, arg1.2), lhs_contracting_dims={1}, rhs_contracting_dims={1}, frontend_attributes={grad_x="false",grad_y="false"}
arg2.3 = f32[256] parameter(2), parameter_replication={false}
broadcast.9 = f32[1024,256] broadcast(arg2.3), dimensions={1}
add.10 = f32[1024,256] add(dot.7, broadcast.9)
constant.12 = f32[] constant(0.044715)
broadcast.13 = f32[1024,256] broadcast(constant.12), dimensions={}
multiply.14 = f32[1024,256] multiply(broadcast.13, add.10)
multiply.11 = f32[1024,256] multiply(add.10, add.10)
multiply.15 = f32[1024,256] multiply(multiply.14, multiply.11)
add.16 = f32[1024,256] add(add.10, multiply.15)
constant.17 = f32[] constant(0.797884583)
broadcast.18 = f32[1024,256] broadcast(constant.17), dimensions={}
multiply.19 = f32[1024,256] multiply(add.16, broadcast.18)
tanh.20 = f32[1024,256] tanh(multiply.19)
constant.21 = f32[] constant(1)
broadcast.22 = f32[1024,256] broadcast(constant.21), dimensions={}
add.23 = f32[1024,256] add(tanh.20, broadcast.22)
constant.24 = f32[] constant(0.5)
broadcast.25 = f32[1024,256] broadcast(constant.24), dimensions={}
multiply.26 = f32[1024,256] multiply(add.23, broadcast.25)
ROOT multiply.27 = f32[1024,256] multiply(add.10, multiply.26)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_tanh_);
}
TEST_F(MatmulTest, BiasAndApproxTFGELUTestBF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg0.1 = f32[1024,512] parameter(0), parameter_replication={false}
convert.8 = bf16[1024,512] convert(arg0.1)
arg1.2 = f32[256,512] parameter(1), parameter_replication={false}
convert.9 = bf16[256,512] convert(arg1.2)
dot.10 = bf16[1024,256] dot(convert.8, convert.9), lhs_contracting_dims={1}, rhs_contracting_dims={1}, frontend_attributes={grad_x="false",grad_y="false"}
convert = f32[1024,256] convert(dot.10)
arg2.3 = f32[256] parameter(2), parameter_replication={false}
broadcast = f32[1024,256] broadcast(arg2.3), dimensions={1}
add.13 = f32[1024,256] add(convert, broadcast)
constant.16 = f32[] constant(0.044715)
broadcast.17 = f32[1024,256] broadcast(constant.16), dimensions={}
multiply.18 = f32[1024,256] multiply(broadcast.17, add.13)
multiply.15 = f32[1024,256] multiply(add.13, add.13)
multiply.19 = f32[1024,256] multiply(multiply.18, multiply.15)
add.20 = f32[1024,256] add(add.13, multiply.19)
constant.21 = f32[] constant(0.797884583)
broadcast.22 = f32[1024,256] broadcast(constant.21), dimensions={}
multiply.23 = f32[1024,256] multiply(add.20, broadcast.22)
tanh.24 = f32[1024,256] tanh(multiply.23)
constant.25 = f32[] constant(1)
broadcast.26 = f32[1024,256] broadcast(constant.25), dimensions={}
add.27 = f32[1024,256] add(tanh.24, broadcast.26)
constant.1 = f32[] constant(0.5)
broadcast.2 = f32[1024,256] broadcast(constant.1), dimensions={}
multiply.30 = f32[1024,256] multiply(add.13, broadcast.2)
ROOT multiply.32 = f32[1024,256] multiply(add.27, multiply.30)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_tanh_);
}
TEST_F(MatmulTest, BiasAndApproxTFGELUTestF16) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg0.1 = f16[1024,512] parameter(0), parameter_replication={false}
reshape.4 = f16[1024,512] reshape(arg0.1)
arg1.2 = f16[256,512] parameter(1), parameter_replication={false}
reshape.5 = f16[256,512] reshape(arg1.2)
dot.7 = f16[1024,256] dot(reshape.4, reshape.5), lhs_contracting_dims={1}, rhs_contracting_dims={1}, frontend_attributes={grad_x="false",grad_y="false"}
transpose.8 = f16[1024,256] transpose(dot.7), dimensions={0,1}
arg2.3 = f16[256] parameter(2), parameter_replication={false}
reshape.6 = f16[256] reshape(arg2.3)
broadcast.9 = f16[1024,256] broadcast(reshape.6), dimensions={1}
add.10 = f16[1024,256] add(transpose.8, broadcast.9)
constant.12 = f16[] constant(0.044708)
broadcast.13 = f16[1024,256] broadcast(constant.12), dimensions={}
multiply.14 = f16[1024,256] multiply(broadcast.13, add.10)
multiply.11 = f16[1024,256] multiply(add.10, add.10)
multiply.15 = f16[1024,256] multiply(multiply.14, multiply.11)
add.16 = f16[1024,256] add(add.10, multiply.15)
constant.17 = f16[] constant(0.79785)
broadcast.18 = f16[1024,256] broadcast(constant.17), dimensions={}
multiply.19 = f16[1024,256] multiply(add.16, broadcast.18)
tanh.20 = f16[1024,256] tanh(multiply.19)
constant.21 = f16[] constant(1)
broadcast.22 = f16[1024,256] broadcast(constant.21), dimensions={}
add.23 = f16[1024,256] add(tanh.20, broadcast.22)
constant.24 = f16[] constant(0.5)
broadcast.25 = f16[1024,256] broadcast(constant.24), dimensions={}
multiply.26 = f16[1024,256] multiply(add.23, broadcast.25)
ROOT multiply.27 = f16[1024,256] multiply(add.10, multiply.26)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_tanh_);
}
TEST_F(MatmulTest, ExactGELUTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[32,32,4,16] parameter(0), parameter_replication={false}
arg.1 = f32[32,32,16,32] parameter(1), parameter_replication={false}
onednn.matmul.0 = f32[32,32,4,32] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
const.0 = f32[] constant(0.707106769)
bcast.0 = f32[32,32,4,32] broadcast(const.0), dimensions={}
mul.0 = f32[32,32,4,32] multiply(onednn.matmul.0, bcast.0)
erf.0 = f32[32,32,4,32] erf(mul.0)
const.1 = f32[] constant(1)
bcast.1 = f32[32,32,4,32] broadcast(const.1), dimensions={}
add.0 = f32[32,32,4,32] add(erf.0, bcast.1)
const.2 = f32[] constant(0.5)
bcast.2 = f32[32,32,4,32] broadcast(const.2), dimensions={}
mul.1 = f32[32,32,4,32] multiply(add.0, bcast.2)
ROOT out = f32[32,32,4,32] multiply(onednn.matmul.0, mul.1)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["GELU_ERF"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)");
}
TEST_F(MatmulTest, BiasAndExactGELUTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[6304,768] parameter(0), parameter_replication={false}
arg.1 = f32[768,3072] parameter(1), parameter_replication={false}
dot.378 = f32[6304,3072] dot(arg.0, arg.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
reshape.11 = f32[32,197,3072]reshape(dot.378)
constant.381 = f32[3072] constant(0.3)
broadcast.382 = f32[32,197,3072] broadcast(constant.381), dimensions={2}
add.383 = f32[32,197,3072] add(reshape.11, broadcast.382)
constant.384 = f32[] constant(0.707106769)
broadcast.385 = f32[32,197,3072] broadcast(constant.384), dimensions={}
multiply.386 = f32[32,197,3072] multiply(broadcast.385, add.383)
erf.387 = f32[32,197,3072] erf(multiply.386)
constant.388 = f32[] constant(1)
broadcast.389 = f32[32,197,3072] broadcast(constant.388), dimensions={}
add.390 = f32[32,197,3072] add(erf.387, broadcast.389)
constant.391 = f32[] constant(0.5)
broadcast.392 = f32[32,197,3072] broadcast(constant.391)
multiply.393 = f32[32,197,3072] multiply(add.390, broadcast.392)
multiply.394 = f32[32,197,3072] multiply(multiply.393, add.383)
ROOT out = f32[6304,3072] reshape(multiply.394)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_erf_);
}
TEST_F(MatmulTest, BiasAndExactGELUTestBF16) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[6304,768] parameter(0), parameter_replication={false}
convert.0 = bf16[6304,768] convert(arg.0)
arg.1 = f32[768,3072] parameter(1), parameter_replication={false}
convert.1 = bf16[768,3072] convert(arg.1)
dot.378 = bf16[6304,3072] dot(convert.0, convert.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
convert.2 = f32[6304,3072] convert(dot.378)
constant.381 = f32[3072] constant(0.3)
broadcast.382 = f32[6304,3072] broadcast(constant.381), dimensions={1}
add.383 = f32[6304,3072] add(convert.2, broadcast.382)
constant.384 = f32[] constant(0.707106769)
broadcast.385 = f32[6304,3072] broadcast(constant.384), dimensions={}
multiply.386 = f32[6304,3072] multiply(broadcast.385, add.383)
erf.387 = f32[6304,3072] erf(multiply.386)
constant.388 = f32[] constant(1)
broadcast.389 = f32[6304,3072] broadcast(constant.388), dimensions={}
add.390 = f32[6304,3072] add(erf.387, broadcast.389)
constant.391 = f32[] constant(0.5)
broadcast.392 = f32[6304,3072] broadcast(constant.391)
multiply.393 = f32[6304,3072] multiply(add.390, broadcast.392)
ROOT out = f32[6304,3072] multiply(multiply.393, add.383)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_erf_);
}
TEST_F(MatmulTest, BiasAndExactJaxGELUTestBF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[6304,768] parameter(0), parameter_replication={false}
convert.0 = bf16[6304,768] convert(arg.0)
arg.1 = f32[768,3072] parameter(1), parameter_replication={false}
convert.1 = bf16[768,3072] convert(arg.1)
dot.378 = bf16[6304,3072] dot(convert.0, convert.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
convert.2 = f32[6304,3072] convert(dot.378)
reshape.0 = f32[32,197,3072] reshape(convert.2)
constant.381 = f32[3072] constant(0.3)
broadcast.382 = f32[32,197,3072] broadcast(constant.381), dimensions={2}
add.383 = f32[32,197,3072] add(reshape.0, broadcast.382)
constant.384 = f32[] constant(0.707182348)
broadcast.385 = f32[32,197,3072] broadcast(constant.384), dimensions={}
multiply.386 = f32[32,197,3072] multiply(broadcast.385, add.383)
erf.387 = f32[32,197,3072] erf(multiply.386)
constant.388 = f32[] constant(1)
broadcast.389 = f32[32,197,3072] broadcast(constant.388), dimensions={}
add.390 = f32[32,197,3072] add(erf.387, broadcast.389)
multiply.393 = f32[32,197,3072] multiply(add.390, add.383)
constant.391 = f32[] constant(0.5)
broadcast.392 = f32[32,197,3072] broadcast(constant.391)
ROOT multiply.394 = f32[32,197,3072] multiply(multiply.393, broadcast.392)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_erf_);
}
TEST_F(MatmulTest, BiasAndExactTFGELUTestBF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.bf16
ENTRY matmul.test.bf16 {
arg0.1 = f32[1024,512] parameter(0), parameter_replication={false}
convert.8 = bf16[1024,512] convert(arg0.1)
arg1.2 = f32[512,256] parameter(1), parameter_replication={false}
convert.9 = bf16[512,256] convert(arg1.2)
dot.10 = bf16[1024,256] dot(convert.8, convert.9), lhs_contracting_dims={1}, rhs_contracting_dims={0}, frontend_attributes={grad_x="false",grad_y="false"}
convert = f32[1024,256] convert(dot.10)
arg2.3 = f32[256] parameter(2), parameter_replication={false}
broadcast = f32[1024,256] broadcast(arg2.3), dimensions={1}
add.13 = f32[1024,256] add(convert, broadcast)
constant.1 = f32[] constant(0.70703125)
broadcast.2 = f32[1024,256] broadcast(constant.1), dimensions={}
multiply.16 = f32[1024,256] multiply(add.13, broadcast.2)
erf.17 = f32[1024,256] erf(multiply.16)
constant.3 = f32[] constant(1)
broadcast.4 = f32[1024,256] broadcast(constant.3), dimensions={}
add.20 = f32[1024,256] add(erf.17, broadcast.4)
constant.5 = f32[] constant(0.5)
broadcast.6 = f32[1024,256] broadcast(constant.5), dimensions={}
multiply.23 = f32[1024,256] multiply(add.20, broadcast.6)
ROOT multiply.24 = f32[1024,256] multiply(add.13, multiply.23)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_erf_);
}
TEST_F(MatmulTest, BiasAndExactGELUTestF16) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.f16
ENTRY matmul.test.f16 {
arg.0 = f16[6304,768] parameter(0), parameter_replication={false}
arg.1 = f16[768,3072] parameter(1), parameter_replication={false}
dot.378 = f16[6304,3072] dot(arg.0, arg.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
constant.381 = f16[3072] constant(0.3)
broadcast.382 = f16[6304,3072] broadcast(constant.381), dimensions={1}
add.383 = f16[6304,3072] add(dot.378, broadcast.382)
constant.384 = f16[] constant(0.707106769)
broadcast.385 = f16[6304,3072] broadcast(constant.384), dimensions={}
multiply.386 = f16[6304,3072] multiply(broadcast.385, add.383)
erf.387 = f16[6304,3072] erf(multiply.386)
constant.388 = f16[] constant(1)
broadcast.389 = f16[6304,3072] broadcast(constant.388), dimensions={}
add.390 = f16[6304,3072] add(erf.387, broadcast.389)
constant.391 = f16[] constant(0.5)
broadcast.392 = f16[6304,3072] broadcast(constant.391)
multiply.393 = f16[6304,3072] multiply(add.390, broadcast.392)
ROOT out = f16[6304,3072] multiply(multiply.393, add.383)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_erf_);
}
TEST_F(MatmulTest, TestNonScalarConstantEltwiseLinearF32) {
const char* matmul_module_str = R"(
HloModule matmul.nonscalar.test.1
ENTRY matmul.nonscalar.test.f32 {
arg.0 = f32[16,400,500] parameter(0)
arg.1 = f32[16,500,3] parameter(1)
onednn.matmul.0 = f32[16,400,3] dot(arg.0, arg.1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1}
constant.0 = f32[3]{0} constant({0.625, 0.875, 0.375})
broadcast.0 = f32[16,400,3] broadcast(constant.0), dimensions={2}
ROOT mult.0 = f32[16,400,3] multiply(onednn.matmul.0, broadcast.0)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec(1e-4, 1e-4)));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-NOT: "fusions":{
; CHECK-NOT: "ops":["LINEAR"]
; CHECK-NOT: }
; CHECK-DAG: }
; CHECK: }
)");
}
TEST_F(MatmulTest, ReLUTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
relu.1 {
Arg_0.3 = f32[32,32,4,32] parameter(0)
constant.4 = f32[] constant(0)
broadcast.5 = f32[32,32,4,32] broadcast(constant.4), dimensions={}
ROOT maximum.6 = f32[32,32,4,32] maximum(Arg_0.3, broadcast.5)
}
ENTRY matmul.test.f32 {
arg.0 = f32[32,32,4,16] parameter(0), parameter_replication={false}
arg.1 = f32[32,32,16,32] parameter(1), parameter_replication={false}
onednn.matmul.0 = f32[32,32,4,32] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
ROOT call.7 = f32[32,32,4,32] call(onednn.matmul.0), to_apply=relu.1
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["RELU"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)");
}
TEST_F(MatmulTest, SimpleBiasTestBF16_PARAM_F32) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule jit_apply
ENTRY matmul.test.bf16 {
Arg_2.3 = f32[16,128,768] parameter(2), sharding={replicated}
convert.4 = bf16[16,128,768] convert(Arg_2.3)
Arg_1.2 = f32[768,3072] parameter(1), sharding={replicated}
convert.5 = bf16[768,3072] convert(Arg_1.2)
dot.7 = bf16[16,128,3072] dot(convert.4, convert.5), lhs_contracting_dims={2}, rhs_contracting_dims={0}
Arg_0.1 = f32[3072] parameter(0), sharding={replicated}
convert.6 = bf16[3072] convert(Arg_0.1)
reshape.8 = bf16[1,1,3072] reshape(convert.6)
broadcast.9 = bf16[1,1,3072] broadcast(reshape.8), dimensions={0,1,2}
reshape.10 = bf16[3072] reshape(broadcast.9)
broadcast.11 = bf16[16,128,3072] broadcast(reshape.10), dimensions={2}
ROOT add.12 = bf16[16,128,3072] add(dot.7, broadcast.11)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_);
}
TEST_F(MatmulTest, SimpleBiasTestBF16_PARAM_BF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule jit_apply
ENTRY matmul.test.bf16 {
Arg_2.3 = f32[16,128,768] parameter(2), sharding={replicated}
convert.4 = bf16[16,128,768] convert(Arg_2.3)
Arg_1.2 = bf16[768,3072] parameter(1), sharding={replicated}
dot.5 = bf16[16,128,3072] dot(convert.4, Arg_1.2), lhs_contracting_dims={2}, rhs_contracting_dims={0}
Arg_0.1 = bf16[3072] parameter(0), sharding={replicated}
reshape.6 = bf16[1,1,3072] reshape(Arg_0.1)
broadcast.7 = bf16[1,1,3072] broadcast(reshape.6), dimensions={0,1,2}
reshape.8 = bf16[3072] reshape(broadcast.7)
broadcast.9 = bf16[16,128,3072] broadcast(reshape.8), dimensions={2}
ROOT add.10 = bf16[16,128,3072] add(dot.5, broadcast.9)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_);
}
TEST_F(MatmulTest, DivisionByConstantWithEltwiseLinearF32) {
const char* matmul_module_str = R"(
HloModule matmul.divide.test.1
ENTRY matmul.divide.test.f32 {
Arg_4.5 = f32[16,128,768] parameter(0), sharding={replicated}
Arg_2.3 = f32[768,12,64] parameter(1), sharding={replicated}
onednn.matmul.0 = f32[16,128,12,64] dot(Arg_4.5, Arg_2.3), lhs_contracting_dims={2}, rhs_contracting_dims={0}
constant.8 = f32[] constant(8)
broadcast.9 = f32[16,128,12,64] broadcast(constant.8), dimensions={}
ROOT divide.16 = f32[16,128,12,64] divide(onednn.matmul.0, broadcast.9)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec(1e-4, 1e-4)));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["LINEAR"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)");
}
TEST_F(MatmulTest, SimpleBiasTestF16_PARAM_F32) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* matmul_module_str = R"(
HloModule jit_apply
ENTRY matmul.test.f16 {
Arg_2.3 = f32[16,128,768] parameter(2), sharding={replicated}
convert.4 = f16[16,128,768] convert(Arg_2.3)
Arg_1.2 = f32[768,3072] parameter(1), sharding={replicated}
convert.5 = f16[768,3072] convert(Arg_1.2)
dot.7 = f16[16,128,3072] dot(convert.4, convert.5), lhs_contracting_dims={2}, rhs_contracting_dims={0}
Arg_0.1 = f32[3072] parameter(0), sharding={replicated}
convert.6 = f16[3072] convert(Arg_0.1)
reshape.8 = f16[1,1,3072] reshape(convert.6)
broadcast.9 = f16[1,1,3072] broadcast(reshape.8), dimensions={0,1,2}
reshape.10 = f16[3072] reshape(broadcast.9)
broadcast.11 = f16[16,128,3072] broadcast(reshape.10), dimensions={2}
ROOT add.12 = f16[16,128,3072] add(dot.7, broadcast.11)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_);
}
TEST_F(MatmulTest, SimpleBiasTestF16_PARAM_F16) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* matmul_module_str = R"(
HloModule jit_apply
ENTRY matmul.test.f16 {
Arg_2.3 = f32[16,128,768] parameter(2), sharding={replicated}
convert.4 = f16[16,128,768] convert(Arg_2.3)
Arg_1.2 = f16[768,3072] parameter(1), sharding={replicated}
dot.5 = f16[16,128,3072] dot(convert.4, Arg_1.2), lhs_contracting_dims={2}, rhs_contracting_dims={0}
Arg_0.1 = f16[3072] parameter(0), sharding={replicated}
reshape.6 = f16[1,1,3072] reshape(Arg_0.1)
broadcast.7 = f16[1,1,3072] broadcast(reshape.6), dimensions={0,1,2}
reshape.8 = f16[3072] reshape(broadcast.7)
broadcast.9 = f16[16,128,3072] broadcast(reshape.8), dimensions={2}
ROOT add.10 = f16[16,128,3072] add(dot.5, broadcast.9)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_);
}
TEST_F(MatmulTest, TestF32NonConstantWeights) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[64,256,16] parameter(0), parameter_replication={false}
arg.1 = f32[16,32] parameter(1), parameter_replication={false}
ROOT onednn.matmul.0 = f32[64,256,32] dot(arg.0, arg.1), lhs_contracting_dims={2}, rhs_contracting_dims={0}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: %matmul.test.f32
; CHECK-NOT: custom_call_target="__onednn$matmul_reorder",
; CHECK: custom-call(%{{[a-z,A-Z,0-9,\.]*}}, %arg.1), custom_call_target="__onednn$matmul",
)");
}
TEST_F(MatmulTest, TestF32ConstantWeights) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[64,256,16] parameter(0), parameter_replication={false}
constant = f32[] constant(1)
arg.1 = f32[16,32] broadcast(constant), dimensions={}
ROOT onednn.matmul.0 = f32[64,256,32] dot(arg.0, arg.1), lhs_contracting_dims={2}, rhs_contracting_dims={0}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: %matmul.test.f32
; CHECK-NOT: custom_call_target="__onednn$matmul_reorder",
; CHECK: custom-call(%{{[a-z,A-Z,0-9,\.]*}}, %constant{{[a-z,A-Z,0-9,\.]*}}), custom_call_target="__onednn$matmul",
)");
}
TEST_F(MatmulTest, BiasAddELUFusion_F32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg0.1 = f32[1024,1024] parameter(0)
arg1.2 = f32[1024,1024] parameter(1)
dot.3 = f32[1024,1024] dot(arg1.2, arg0.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
arg2.4 = f32[1024] parameter(2)
broadcast.5 = f32[1024,1024] broadcast(arg2.4), dimensions={1}
add.6 = f32[1024,1024] add(dot.3, broadcast.5)
constant.7 = f32[] constant(0)
broadcast.8 = f32[1024,1024] broadcast(constant.7), dimensions={}
compare.9 = pred[1024,1024] compare(add.6, broadcast.8), direction=GT
exponential-minus-one.10 = f32[1024,1024] exponential-minus-one(add.6)
ROOT select.11 = f32[1024,1024] select(compare.9, add.6, exponential-minus-one.10)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_elu_rewrite_str_);
}
TEST_F(MatmulTest, BiasAddELUFusion_BF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.bf16
ENTRY matmul.test.bf16 {
arg0.1 = f32[1024,512] parameter(0)
convert.2 = bf16[1024,512] convert(arg0.1)
arg1.3 = f32[256,512] parameter(1)
convert.4 = bf16[256,512] convert(arg1.3)
dot.5 = bf16[1024,256] dot(convert.2, convert.4), lhs_contracting_dims={1}, rhs_contracting_dims={1}
convert.6 = f32[1024,256] convert(dot.5)
arg2.7 = f32[256] parameter(2)
broadcast.8 = f32[1024,256] broadcast(arg2.7), dimensions={1}
add.9 = f32[1024,256] add(convert.6, broadcast.8)
constant.10 = f32[] constant(0)
broadcast.11 = f32[1024,256] broadcast(constant.10), dimensions={}
compare.12 = pred[1024,256] compare(add.9, broadcast.11), direction=GT
convert.13 = bf16[1024,256] convert(add.9)
exponential-minus-one.14 = f32[1024,256] exponential-minus-one(add.9)
convert.15 = bf16[1024,256] convert(exponential-minus-one.14)
select.16 = bf16[1024,256] select(compare.12, convert.13, convert.15)
ROOT convert.17 = f32[1024,256] convert(select.16)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_elu_rewrite_str_);
}
TEST_F(MatmulTest, BiasAddELUFusion_F16) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.f16
ENTRY matmul.test.f16 {
arg0.1 = f16[1024,1024] parameter(0)
arg1.2 = f16[1024,1024] parameter(1)
dot.3 = f16[1024,1024] dot(arg1.2, arg0.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
arg2.4 = f16[1024] parameter(2)
broadcast.5 = f16[1024,1024] broadcast(arg2.4), dimensions={1}
add.6 = f16[1024,1024] add(dot.3, broadcast.5)
constant.7 = f16[] constant(0)
broadcast.8 = f16[1024,1024] broadcast(constant.7), dimensions={}
compare.9 = pred[1024,1024] compare(add.6, broadcast.8), direction=GT
exponential-minus-one.10 = f16[1024,1024] exponential-minus-one(add.6)
ROOT select.11 = f16[1024,1024] select(compare.9, add.6, exponential-minus-one.10)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_elu_rewrite_str_);
}
TEST_F(MatmulTest, BiasAddELUFusion_F16_2) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.f16
ENTRY matmul.test.f16 {
arg0.1 = f32[1024,1024] parameter(0)
convert.2 = f16[1024,1024] convert(arg0.1)
arg1.3 = f32[1024,1024] parameter(2)
convert.4 = f16[1024,1024] convert(arg1.3)
dot.5 = f16[1024,1024] dot(convert.2, convert.4), lhs_contracting_dims={1}, rhs_contracting_dims={0}
arg2.6 = f32[1024] parameter(1)
convert.7 = f16[1024] convert(arg2.6)
broadcast.8 = f16[1024,1024] broadcast(convert.7), dimensions={1}
add.9 = f16[1024,1024] add(dot.5, broadcast.8)
constant.10 = f16[] constant(0)
broadcast.11 = f16[1024,1024] broadcast(constant.10), dimensions={}
compare.12 = pred[1024,1024] compare(add.9, broadcast.11), direction=GT
exponential-minus-one.13 = f16[1024,1024] exponential-minus-one(add.9)
select.14 = f16[1024,1024] select(compare.12, add.9, exponential-minus-one.13)
dot.15 = f16[1024,1024] dot(select.14, convert.4), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT convert.16 = f32[1024,1024] convert(dot.15)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_elu_rewrite_str_);
}
TEST_F(MatmulTest, SIGMOIDTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.bias.sigmoid.test.f32
ENTRY matmul.bias.sigmoid.test.f32 {
arg.0 = f32[32,32,4,16] parameter(0), parameter_replication={false}
arg.1 = f32[32,32,16,32] parameter(1), parameter_replication={false}
onednn.matmul.0 = f32[32,32,4,32] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
const.0 = f32[32]{0} constant(5)
bcast.0 = f32[32,32,4,32] broadcast(const.0), dimensions={3}
add.0 = f32[32,32,4,32] add(onednn.matmul.0, bcast.0)
const.1 = f32[] constant(1)
bcast.1 = f32[32,32,4,32] broadcast(const.1), dimensions={}
negate.0 = f32[32,32,4,32] negate(add.0)
exponential.0 = f32[32,32,4,32] exponential(negate.0)
add.1 = f32[32,32,4,32] add(bcast.1, exponential.0)
divide.0 = f32[32,32,4,32] divide(bcast.1, add.1)
tuple.0 =(f32[32,32,4,32]) tuple(divide.0)
ROOT get-tuple-element.0 = f32[32,32,4,32] get-tuple-element(tuple.0), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_sigmoid_rewrite_str_);
}
TEST_F(MatmulTest, SIGMOIDTestBF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16";
}
const char* matmul_module_str = R"(
HloModule matmul.bias.sigmoid.test.bf16
ENTRY matmul.bias.sigmoid.test.bf16 {
arg.0 = f32[32,32,4,16] parameter(0), parameter_replication={false}
convert.0 = bf16[32,32,4,16] convert(arg.0)
arg.1 = f32[32,32,16,32] parameter(1), parameter_replication={false}
convert.1 = bf16[32,32,16,32] convert(arg.1)
onednn.matmul.0 = bf16[32,32,4,32] dot(convert.0, convert.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
convert.2 = f32[32,32,4,32] convert(onednn.matmul.0)
const.0 = f32[32]{0} constant(5)
bcast.0 = f32[32,32,4,32] broadcast(const.0), dimensions={3}
add.0 = f32[32,32,4,32] add(convert.2, bcast.0)
const.1 = f32[] constant(1)
bcast.1 = f32[32,32,4,32] broadcast(const.1), dimensions={}
negate.0 = f32[32,32,4,32] negate(add.0)
exponential.0 = f32[32,32,4,32] exponential(negate.0)
add.1 = f32[32,32,4,32] add(bcast.1, exponential.0)
divide.0 = f32[32,32,4,32] divide(bcast.1, add.1)
tuple.0 =(f32[32,32,4,32]) tuple(divide.0)
ROOT get-tuple-element.0 = f32[32,32,4,32] get-tuple-element(tuple.0), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_sigmoid_rewrite_str_);
}
TEST_F(MatmulTest, SIGMOIDTestF16) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16";
}
const char* matmul_module_str = R"(
HloModule matmul.bias.sigmoid.test.f16
ENTRY matmul.bias.sigmoid.test.f16 {
arg.0 = f32[32,32,4,16] parameter(0), parameter_replication={false}
convert.0 = f16[32,32,4,16] convert(arg.0)
arg.1 = f32[32,32,16,32] parameter(1), parameter_replication={false}
convert.1 = f16[32,32,16,32] convert(arg.1)
onednn.matmul.0 = f16[32,32,4,32] dot(convert.0, convert.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
convert.2 = f32[32,32,4,32] convert(onednn.matmul.0)
const.0 = f32[32]{0} constant(5)
bcast.0 = f32[32,32,4,32] broadcast(const.0), dimensions={3}
add.0 = f32[32,32,4,32] add(convert.2, bcast.0)
const.1 = f32[] constant(1)
bcast.1 = f32[32,32,4,32] broadcast(const.1), dimensions={}
negate.0 = f32[32,32,4,32] negate(add.0)
exponential.0 = f32[32,32,4,32] exponential(negate.0)
add.1 = f32[32,32,4,32] add(bcast.1, exponential.0)
divide.0 = f32[32,32,4,32] divide(bcast.1, add.1)
tuple.0 =(f32[32,32,4,32]) tuple(divide.0)
ROOT get-tuple-element.0 = f32[32,32,4,32] get-tuple-element(tuple.0), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_sigmoid_rewrite_str_);
}
TEST_F(MatmulTest, SimpleTestBF16Gemv1) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.bf16
ENTRY matmul.test.bf16 {
arg.0 = bf16[1000,10000] parameter(0)
arg.1 = bf16[10000] parameter(1)
ROOT onednn.matmul.0 = bf16[1000] dot(arg.0, arg.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{2e-2, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, SimpleTestBF16Gemv2) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.bf16
ENTRY matmul.test.bf16 {
arg.0 = bf16[100,300,300] parameter(0)
arg.1 = bf16[300] parameter(1)
ROOT onednn.matmul.0 = bf16[100,300] dot(arg.0, arg.1), lhs_contracting_dims={2}, rhs_contracting_dims={0}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{2e-2, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, TestTransposeBNoRewriteF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[384,1024]{1,0} parameter(0), parameter_replication={false}
arg.1 = f32[2,1024]{1,0} parameter(1), parameter_replication={false}
ROOT dot.2 = f32[384,2]{1,0} dot(arg.0, arg.1), lhs_contracting_dims={1}, rhs_contracting_dims={1}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: %matmul.test.f32
; CHECK-NOT: custom_call_target="__onednn$matmul",
; CHECK: f32[384,2]{1,0} dot(%arg.0, %arg.1), lhs_contracting_dims={1}, rhs_contracting_dims={1}
)");
}
TEST_F(MatmulTest, SimpleTestF32WithMulAndAddFusion) {
const char* matmul_module_str = R"(
ENTRY matmul.mul.add.test.f32 {
arg0.1 = f32[32,32,40,30] parameter(0), parameter_replication={false}
arg0.2 = f32[32,32,30,40] parameter(1), parameter_replication={false}
dot.7 = f32[32,32,40,40] dot(arg0.1, arg0.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
const.0 = f32[] constant(0.044715)
bcast.0 = f32[32,32,40,40] broadcast(const.0), dimensions={}
mul.0 = f32[32,32,40,40] multiply(dot.7,bcast.0)
const.1 = f32[] constant(0.65)
bcast.1 = f32[32,32,40,40] broadcast(const.1), dimensions={}
add.0 = f32[32,32,40,40] add(mul.0, bcast.1)
const.2 = f32[] constant(0.65)
bcast.2 = f32[32,32,40,40] broadcast(const.2), dimensions={}
add.1 = f32[32,32,40,40] add(bcast.2, bcast.1)
tuple.12 = (f32[32,32,40,40]) tuple(add.0)
ROOT get-tuple-element.13 = f32[32,32,40,40] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["LINEAR","BINARY_ADD"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)");
}
TEST_F(MatmulTest, BiasAddTanhFusionTest_F32) {
const char* matmul_module_str = R"(
HloModule matmul.bias.tanh.test.f32
ENTRY matmul.bias.tanh.test.f32 {
arg.0 = f32[32,32,40,30] parameter(0)
arg.1 = f32[32,32,30,40] parameter(1)
dot.2 = f32[32,32,40,40] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
const.3 = f32[40] constant(15)
bcast.4 = f32[32,32,40,40] broadcast(const.3), dimensions={3}
add.5 = f32[32,32,40,40] add(dot.2, bcast.4)
tanh.6 = f32[32,32,40,40] tanh(add.5)
tuple.7 = (f32[32,32,40,40]) tuple(tanh.6)
ROOT get-tuple-element.8 = f32[32,32,40,40] get-tuple-element(tuple.7), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_tanh_rewrite_str_);
}
TEST_F(MatmulTest, BiasAddTanhFusionTest_BF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.bias.tanh.test.f32
ENTRY matmul.bias.tanh.test.f32 {
arg0.1 = f32[1024,512] parameter(0)
convert.2 = bf16[1024,512] convert(arg0.1)
arg1.3 = f32[256,512] parameter(1)
convert.4 = bf16[256,512] convert(arg1.3)
dot.5 = bf16[1024,256] dot(convert.2, convert.4), lhs_contracting_dims={1}, rhs_contracting_dims={1}
convert.6 = f32[1024,256] convert(dot.5)
arg2.7 = f32[256] parameter(2)
broadcast.8 = f32[1024,256] broadcast(arg2.7), dimensions={1}
add.9 = f32[1024,256] add(convert.6, broadcast.8)
ROOT tanh.10 = f32[1024,256] tanh(add.9)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_tanh_rewrite_str_);
}
TEST_F(MatmulTest, BiasAddTanhFusionTest_F16) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* matmul_module_str = R"(
HloModule matmul.bias.tanh.test.f16
ENTRY matmul.bias.tanh.test.f16 {
arg0.1 = f16[1024,1024] parameter(0)
arg1.2 = f16[1024,1024] parameter(1)
dot.3 = f16[1024,1024] dot(arg1.2, arg0.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
arg2.4 = f16[1024] parameter(2)
broadcast.5 = f16[1024,1024] broadcast(arg2.4), dimensions={1}
add.6 = f16[1024,1024] add(dot.3, broadcast.5)
ROOT tanh.7 = f16[1024,1024] tanh(add.6)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_tanh_rewrite_str_);
}
TEST_F(MatmulTest, BiasAddRelu6Fusion_F32) {
const char* matmul_module_str = R"(
HloModule matmul.bias.relu6.test.f32
ENTRY matmul.bias.relu6.test.f32 {
constant.1 = f32[] constant(0)
broadcast.2 = f32[1024,1024] broadcast(constant.1), dimensions={}
arg1.3 = f32[1024,1024] parameter(1)
arg2.4 = f32[1024,1024] parameter(0)
dot.5 = f32[1024,1024] dot(arg1.3, arg2.4), lhs_contracting_dims={1}, rhs_contracting_dims={0}
arg3.6 = f32[1024] parameter(2)
broadcast.7 = f32[1024,1024] broadcast(arg3.6), dimensions={1}
add.8 = f32[1024,1024] add(dot.5, broadcast.7)
constant.9 = f32[] constant(6)
broadcast.10 = f32[1024,1024] broadcast(constant.9), dimensions={}
ROOT clamp.11 = f32[1024,1024] clamp(broadcast.2, add.8, broadcast.10)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_relu6_rewrite_str_);
}
TEST_F(MatmulTest, BiasAddRelu6Fusion_BF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.bias.relu6.test.bf16
ENTRY matmul.bias.relu6.test.bf16 {
constant.1 = f32[] constant(0)
broadcast.2 = f32[1024,256] broadcast(constant.1), dimensions={}
arg0.3 = f32[1024,512] parameter(0)
convert.4 = bf16[1024,512] convert(arg0.3)
arg1.5 = f32[256,512] parameter(1)
convert.6 = bf16[256,512] convert(arg1.5)
dot.7 = bf16[1024,256] dot(convert.4, convert.6), lhs_contracting_dims={1}, rhs_contracting_dims={1}
convert.8 = f32[1024,256] convert(dot.7)
arg2.9 = f32[256] parameter(2)
broadcast.10 = f32[1024,256] broadcast(arg2.9), dimensions={1}
add.11 = f32[1024,256] add(convert.8, broadcast.10)
constant.12 = f32[] constant(6)
broadcast.13 = f32[1024,256] broadcast(constant.12), dimensions={}
ROOT clamp.14 = f32[1024,256] clamp(broadcast.2, add.11, broadcast.13)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_relu6_rewrite_str_);
}
TEST_F(MatmulTest, BiasAddRelu6Fusion_F16) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* matmul_module_str = R"(
HloModule matmul.bias.relu6.test.f16
ENTRY matmul.bias.relu6.test.f16 {
constant.1 = f16[] constant(0)
broadcast.2 = f16[1024,1024] broadcast(constant.1), dimensions={}
arg0.3 = f16[1024,1024] parameter(0)
arg1.4 = f16[1024,1024] parameter(1)
dot.5 = f16[1024,1024] dot(arg1.4, arg0.3), lhs_contracting_dims={1}, rhs_contracting_dims={0}
arg2.6 = f16[1024] parameter(2)
broadcast.7 = f16[1024,1024] broadcast(arg2.6), dimensions={1}
add.8 = f16[1024,1024] add(dot.5, broadcast.7)
constant.9 = f16[] constant(6)
broadcast.10 = f16[1024,1024] broadcast(constant.9), dimensions={}
ROOT clamp.11 = f16[1024,1024] clamp(broadcast.2, add.8, broadcast.10)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_relu6_rewrite_str_);
}
TEST_F(MatmulTest, SimpleTestBF16WithMulAndAddFusion) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
ENTRY matmul.mul.add.test.bf16 {
arg0.1 = f32[32,32,40,30] parameter(0), parameter_replication={false}
convert0 = bf16[32,32,40,30] convert(arg0.1)
arg0.2 = f32[32,32,30,40] parameter(1), parameter_replication={false}
convert1 = bf16[32,32,30,40] convert(arg0.2)
dot.7 = bf16[32,32,40,40] dot(convert0, convert1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
convert2 = f32[32,32,40,40] convert(dot.7)
const.0 = f32[] constant(0.044715)
bcast.0 = f32[32,32,40,40] broadcast(const.0), dimensions={}
mul.0 = f32[32,32,40,40] multiply(convert2,bcast.0)
const.1 = f32[] constant(0.65)
bcast.1 = f32[32,32,40,40] broadcast(const.1), dimensions={}
add.0 = f32[32,32,40,40] add(mul.0, bcast.1)
convert3 = bf16[32,32,40,40] convert(add.0)
tuple.12 = (bf16[32,32,40,40]) tuple(convert3)
ROOT get-tuple-element.13 = bf16[32,32,40,40] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["LINEAR","BINARY_ADD"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)");
}
TEST_F(MatmulTest, WeightsPrepackAndScratch) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[64,256,16] parameter(0), parameter_replication={false}
constant = f32[] constant(1)
arg.1 = f32[16,32] broadcast(constant), dimensions={}
ROOT onednn.matmul.0 = f32[64,256,32] dot(arg.0, arg.1), lhs_contracting_dims={2}, rhs_contracting_dims={0}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: %matmul.test.f32
; CHECK: custom_call_target="__onednn$matmul",
; CHECK-SAME: backend_config={
; CHECK-SAME: "outer_dimension_partitions":[],
; CHECK-SAME: "onednn_matmul_config":{
; CHECK-SAME: "weights_prepacked":true,"user_scratchpad":true
; CHECK-SAME: }
; CHECK-SAME: }
)");
}
TEST_F(MatmulTest, ColMajorBF16DotBeforeLayoutAssignment) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.colmajor.test
ENTRY matmul.colmajor.test.bf16 {
arg.0 = bf16[500,500]{0,1} parameter(0)
arg.1 = bf16[500,500]{1,0} parameter(1)
transpose.0 = bf16[500,500]{0,1} transpose(arg.1), dimensions={1,0}
ROOT dot.0 = bf16[500,500]{1,0} dot(arg.0, arg.1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec(1e-2, 1e-2)));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: (bf16[500,500]{1,0}, u8[{{.*}}]{0})
; CHECK-SAME: custom_call_target="__onednn$matmul"
)");
}
TEST_F(MatmulTest, ConsecutiveBinaryAdd) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg0.1 = f32[128,32,4,4] parameter(0)
arg0.2 = f32[128,32,4,4] parameter(1)
dot.7 = f32[128,32,4,4] dot(arg0.1, arg0.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
const.0 = f32[128,32] constant({...})
bcast.1 = f32[128,32,4,4] broadcast(const.0), dimensions={0,1}
add.0 = f32[128,32,4,4] add(dot.7,bcast.1)
const.1 = f32[4] constant({1,2,3,4})
bcast.2 = f32[128,32,4,4] broadcast(const.1), dimensions={3}
add.1 = f32[128,32,4,4] add(add.0, bcast.2)
tuple.12 = (f32[128,32,4,4]) tuple(add.1)
ROOT get-tuple-element.13 = f32[128,32,4,4] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
}
TEST_F(MatmulTest, BroadcastedAddAfterFusion) {
const char* matmul_module_str = R"(
HloModule matmul.nonscalar.test
ENTRY matmul.nonscalar.test.f32 {
arg.0 = f32[16,400,500] parameter(0)
arg.1 = f32[16,500,3] parameter(1)
onednn.matmul.0 = f32[16,400,3] dot(arg.0, arg.1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1}
constant.0 = f32[] constant(6)
broadcast.0 = f32[16,400,3] broadcast(constant.0), dimensions={}
mult.0 = f32[16,400,3] multiply(onednn.matmul.0, broadcast.0)
constant.1 = f32[3]{0} constant({0.625, 0.875, 0.375})
broadcast.2 = f32[16,400,3] broadcast(constant.1), dimensions={2}
ROOT add.0 = f32[16,400,3] add(mult.0, broadcast.2)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec(1e-4, 1e-4)));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["LINEAR"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)");
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/onednn_matmul.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/tests/onednn_matmul_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dc8b016d-3194-4fce-b2a4-a05da0640f45 | cpp | tensorflow/tensorflow | onednn_convolution | third_party/xla/xla/service/cpu/onednn_convolution.cc | third_party/xla/xla/service/cpu/tests/onednn_convolution_test.cc | #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_convolution.h"
#include <algorithm>
#include <cmath>
#include <cstring>
#include <initializer_list>
#include <utility>
#include <vector>
#define EIGEN_USE_THREADS
#include "absl/base/dynamic_annotations.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "dnnl.hpp"
#include "xla/executable_run_options.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/onednn_config.pb.h"
#include "xla/service/cpu/onednn_memory_util.h"
#include "xla/service/cpu/runtime_lightweight_check.h"
#include "xla/tsl/util/onednn_threadpool.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace cpu {
namespace {
using dnnl::algorithm;
using dnnl::convolution_forward;
using dnnl::memory;
using dnnl::prop_kind;
using dnnl::stream;
}
dnnl::memory ReorderMemory(const dnnl::engine& engine,
const dnnl::memory::desc& dest_md,
dnnl::memory& src_mem,
const dnnl::stream& onednn_stream) {
auto dest_mem = memory(dest_md, engine);
dnnl::reorder(src_mem, dest_mem).execute(onednn_stream, src_mem, dest_mem);
return dest_mem;
}
dnnl::memory::format_tag GetFormatTag(const int dims) {
return (dims == 3) ? dnnl::memory::format_tag::nwc
: (dims == 4) ? dnnl::memory::format_tag::nhwc
: (dims == 5) ? dnnl::memory::format_tag::ndhwc
: dnnl::memory::format_tag::any;
}
template <>
typename PrimitiveTrait<kOnednnConvConfig>::pointer_type
GetKernelConfig<kOnednnConvConfig>(
absl::StatusOr<BackendConfig>* backend_config) {
return (*backend_config)->mutable_onednn_conv_config();
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_OneDnnConvolution(
void* result, void** args) {
int arg_indx = 0;
const int64_t num_args = *(static_cast<int64_t*>(args[arg_indx++]));
const xla::ExecutableRunOptions* run_options =
static_cast<const xla::ExecutableRunOptions*>(args[arg_indx++]);
XLA_LIGHTWEIGHT_CHECK(run_options != nullptr);
XLA_LIGHTWEIGHT_CHECK(run_options->intra_op_thread_pool() != nullptr);
tsl::OneDnnThreadPool thread_pool(
run_options->intra_op_thread_pool()->getPool(), false);
dnnl::engine cpu_engine(dnnl::engine::kind::cpu, 0);
#ifndef ENABLE_ONEDNN_OPENMP
auto onednn_stream =
stream(dnnl::threadpool_interop::make_stream(cpu_engine, &thread_pool));
#else
auto onednn_stream = stream(cpu_engine);
#endif
std::string config_str(static_cast<const char*>(args[arg_indx++]));
OneDnnConvolutionConfig conv_config;
conv_config.ParseFromString(config_str);
std::vector<int64_t> inp_perm_axes(conv_config.dims());
std::vector<int64_t> ker_perm_axes(conv_config.dims());
std::vector<int64_t> out_perm_axes(conv_config.dims());
int index_i = 0;
int index_o = 0;
int index_k = 0;
inp_perm_axes[conv_config.input().data().batch_dim()] = index_i++;
out_perm_axes[conv_config.output().data().batch_dim()] = index_o++;
ker_perm_axes[conv_config.kernel().filter().output_feature_dim()] = index_k++;
inp_perm_axes[conv_config.input().data().feature_dim()] = index_i++;
out_perm_axes[conv_config.output().data().feature_dim()] = index_o++;
ker_perm_axes[conv_config.kernel().filter().input_feature_dim()] = index_k++;
std::vector<int64_t> inp_dim_axes(
conv_config.input().data().spatial_dims().begin(),
conv_config.input().data().spatial_dims().end());
std::vector<int64_t> ker_dim_axes(
conv_config.kernel().filter().spatial_dims().begin(),
conv_config.kernel().filter().spatial_dims().end());
std::vector<int64_t> out_dim_axes(
conv_config.output().data().spatial_dims().begin(),
conv_config.output().data().spatial_dims().end());
std::for_each(inp_dim_axes.begin(), inp_dim_axes.end(),
[&inp_perm_axes, &index_i](int64_t& n) {
n -= 1;
inp_perm_axes[n] = index_i++;
});
std::for_each(ker_dim_axes.begin(), ker_dim_axes.end(),
[&ker_perm_axes, &index_k](int64_t& n) {
n -= 1;
ker_perm_axes[n] = index_k++;
});
std::for_each(out_dim_axes.begin(), out_dim_axes.end(),
[&out_perm_axes, &index_o](int64_t& n) {
n -= 1;
out_perm_axes[n] = index_o++;
});
memory::dims strides(conv_config.window().strides().begin(),
conv_config.window().strides().end());
memory::dims pad_left(conv_config.window().pad_left().begin(),
conv_config.window().pad_left().end());
memory::dims pad_right(conv_config.window().pad_right().begin(),
conv_config.window().pad_right().end());
memory::dims rhs_dilations(conv_config.window().window_dilations().begin(),
conv_config.window().window_dilations().end());
std::for_each(strides.begin(), strides.end(), [](int64_t& n) { n -= 1; });
std::for_each(pad_left.begin(), pad_left.end(), [](int64_t& n) { n -= 1; });
std::for_each(pad_right.begin(), pad_right.end(), [](int64_t& n) { n -= 1; });
std::for_each(rhs_dilations.begin(), rhs_dilations.end(),
[](int64_t& n) { n -= 2; });
auto groups = conv_config.feature_groups();
MemrefInfo inp_minfo(args[arg_indx++]);
MemrefInfo ker_minfo(args[arg_indx++]);
MemrefInfo res_minfo(result);
auto inp_md = inp_minfo.GetOneDnnMemDesc();
auto ker_md = ker_minfo.GetOneDnnMemDesc();
auto res_md = res_minfo.GetOneDnnMemDesc();
std::vector<int> inp_axes(inp_perm_axes.begin(), inp_perm_axes.end());
std::vector<int> ker_axes(ker_perm_axes.begin(), ker_perm_axes.end());
std::vector<int> out_axes(out_perm_axes.begin(), out_perm_axes.end());
auto new_inp_md = inp_md.permute_axes(inp_axes);
auto new_ker_md = ker_md.permute_axes(ker_axes);
auto new_res_md = res_md.permute_axes(out_axes);
if (groups > 1) {
auto corr_dims = new_ker_md.get_dims();
corr_dims.insert(corr_dims.begin(), 1, groups);
corr_dims[1] = corr_dims[1] / groups;
new_ker_md = new_ker_md.reshape(corr_dims);
}
const int64_t num_fused_operands = num_args - arg_indx;
std::vector<memory::desc> fused_mds;
std::vector<void*> fused_bufs;
for (int64_t i = 0; i < num_fused_operands; ++i) {
MemrefInfo operand_minfo(args[arg_indx++]);
fused_mds.push_back(operand_minfo.GetOneDnnMemDesc());
fused_bufs.push_back(operand_minfo.Data());
}
std::vector<std::pair<int, dnnl::memory>> postop_args;
auto bias_md = memory::desc();
dnnl::post_ops post_ops;
int fused_operand_idx = 0;
for (auto& fused_op : conv_config.fusions().ops()) {
switch (fused_op) {
case OneDnnFusionConfig::BIAS: {
bias_md = fused_mds.at(fused_operand_idx);
postop_args.emplace_back(
DNNL_ARG_BIAS,
dnnl::memory(bias_md, cpu_engine, fused_bufs[fused_operand_idx]));
fused_operand_idx++;
} break;
case OneDnnFusionConfig::BINARY_ADD: {
auto binary_md = fused_mds.at(fused_operand_idx);
binary_md = binary_md.permute_axes(out_axes);
auto arg_idx =
DNNL_ARG_ATTR_MULTIPLE_POST_OP(post_ops.len()) | DNNL_ARG_SRC_1;
postop_args.emplace_back(
arg_idx,
dnnl::memory(binary_md, cpu_engine, fused_bufs[fused_operand_idx]));
post_ops.append_binary(dnnl::algorithm::binary_add, binary_md);
fused_operand_idx++;
} break;
default:
LOG(FATAL)
<< __FILE__ << ":" << __LINE__
<< " Attempt to call OneDNN Convolution runtime library with "
"unsupported post op."
<< std::endl;
}
}
auto any_ker_md =
memory::desc(new_ker_md.get_dims(), new_ker_md.get_data_type(),
dnnl::memory::format_tag::any);
auto any_inp_md =
memory::desc(new_inp_md.get_dims(), new_inp_md.get_data_type(),
GetFormatTag(new_inp_md.get_ndims()));
auto any_res_md =
memory::desc(new_res_md.get_dims(), new_res_md.get_data_type(),
GetFormatTag(new_res_md.get_ndims()));
XLA_LIGHTWEIGHT_CHECK(num_args == arg_indx);
dnnl::primitive_attr attrs;
if (post_ops.len() > 0) {
attrs.set_post_ops(post_ops);
}
auto conv_pd = std::make_unique<convolution_forward::primitive_desc>(
cpu_engine, prop_kind::forward_inference, algorithm::convolution_direct,
any_inp_md, any_ker_md, bias_md, any_res_md, strides, rhs_dilations,
pad_left, pad_right, attrs);
auto inp_mem = memory(new_inp_md, cpu_engine, inp_minfo.Data());
auto ker_mem = memory(new_ker_md, cpu_engine, ker_minfo.Data());
auto res_mem = memory(new_res_md, cpu_engine, res_minfo.Data());
auto new_inp_mem = (conv_pd->src_desc() == inp_mem.get_desc())
? inp_mem
: ReorderMemory(cpu_engine, conv_pd->src_desc(),
inp_mem, onednn_stream);
auto new_ker_mem = (conv_pd->weights_desc() == ker_mem.get_desc())
? ker_mem
: ReorderMemory(cpu_engine, conv_pd->weights_desc(),
ker_mem, onednn_stream);
auto new_res_mem = (conv_pd->dst_desc() == res_mem.get_desc())
? res_mem
: memory(conv_pd->dst_desc(), cpu_engine);
auto conv_prim = convolution_forward(*conv_pd);
std::unordered_map<int, memory> conv_args{{DNNL_ARG_SRC, new_inp_mem},
{DNNL_ARG_WEIGHTS, new_ker_mem},
{DNNL_ARG_DST, new_res_mem}};
conv_args.insert(postop_args.begin(), postop_args.end());
conv_prim.execute(onednn_stream, conv_args);
if (conv_pd->dst_desc() == res_mem.get_desc()) {
res_mem = new_res_mem;
} else {
dnnl::reorder(new_res_mem, res_mem)
.execute(onednn_stream, new_res_mem, res_mem);
}
}
}
}
#endif | #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include <utility>
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/service/cpu/onednn_contraction_rewriter.h"
#include "xla/service/cpu/onednn_util.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
#include "tsl/platform/cpu_info.h"
namespace xla {
namespace cpu {
class ConvolutionTest : public HloTestBase {
protected:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_cpu_use_thunk_runtime(false);
return debug_options;
}
const char* conv_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$convolution",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_conv_config":{
; CHECK-DAG: }
; CHECK: }
)";
const char* conv_rewrite_bias_str_ = R"(
; CHECK: custom_call_target="__onednn$convolution",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_conv_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_convolution_binary_add_ = R"(
; CHECK: custom_call_target="__onednn$convolution",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_conv_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BINARY_ADD"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
};
TEST_F(ConvolutionTest, Simple2DTestF32) {
const char* convolution_module_str = R"(
HloModule convolution.test.f32
ENTRY convolution.test.f32 {
arg.0 = f32[1,22,22,1] parameter(0)
reshape.0 = f32[1,22,22,1] reshape(arg.0)
arg.1 = f32[8,8,1,1] parameter(1)
reshape.1 = f32[8,8,1,1] reshape(arg.1)
convolution.0 = f32[1,11,11,1] convolution(reshape.0, reshape.1), window={size=8x8 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f
reshape.2 = f32[1,11,11,1] reshape(convolution.0)
tuple.0 = (f32[1,11,11,1]) tuple(reshape.2)
ROOT get-tuple-element.0 = f32[1,11,11,1] get-tuple-element(tuple.0), index=0
})";
EXPECT_TRUE(RunAndCompare(convolution_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(convolution_module_str, conv_rewrite_str_);
}
TEST_F(ConvolutionTest, Simple3DTestBF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* convolution_module_str = R"(
HloModule convolution.test.bf16
ENTRY convolution.test.bf16 {
p0 = bf16[8,4,5,5,1] parameter(0)
p1 = bf16[3,3,3,1,32] parameter(1)
ROOT conv = bf16[8,4,5,5,32] convolution(p0, p1), window={size=3x3x3 pad=1_1x1_1x1_1}, dim_labels=b012f_012io->b012f
})";
EXPECT_TRUE(RunAndCompare(convolution_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(convolution_module_str, conv_rewrite_str_);
}
TEST_F(ConvolutionTest, Simple2DTestF16) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* convolution_module_str = R"(
HloModule convolution.test.f16
ENTRY convolution.test.bf16 {
p0 = f16[8,4,5,5,1] parameter(0)
p1 = f16[3,3,3,1,32] parameter(1)
ROOT conv = f16[8,4,5,5,32] convolution(p0, p1), window={size=3x3x3 pad=1_1x1_1x1_1}, dim_labels=b012f_012io->b012f
})";
EXPECT_TRUE(RunAndCompare(convolution_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(convolution_module_str, conv_rewrite_str_);
}
TEST_F(ConvolutionTest, Conv3DWithBiasBF16) {
const char* convolution_module_str = R"(
HloModule convolution.test.with.bias.relu.bf16.3D
ENTRY TestComputation {
arg.0 = bf16[15,4,5,5,28] parameter(0)
arg.1 = bf16[3,3,3,28,64] parameter(1)
conv = bf16[15,4,5,5,64] convolution(arg.0, arg.1), window={size=3x3x3 pad=1_1x1_1x1_1}, dim_labels=b012f_012io->b012f
bias = bf16[64] parameter(2)
broadcasted_bias = bf16[15,4,5,5,64] broadcast(bias), dimensions={4}
ROOT add = bf16[15,4,5,5,64] add(conv, broadcasted_bias)
})";
EXPECT_TRUE(RunAndCompare(convolution_module_str, ErrorSpec{0.01, 0.01}));
MatchOptimizedHlo(convolution_module_str, conv_rewrite_bias_str_);
}
TEST_F(ConvolutionTest, SimpleTestF32WithBinaryAddFusion1) {
const char* convolution_module_str = R"(
HloModule conv.binaryadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[1,22,22,1] parameter(0)
constant.3 = f32[] constant(1)
broadcast.4 = f32[8,8,1,1] broadcast(constant.3), dimensions={}
convolution.0 = f32[1,11,11,1] convolution(arg0.1, broadcast.4), window={size=8x8 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f
constant.5 = f32[] constant(15)
broadcast.6 = f32[1] broadcast(constant.5), dimensions={}
broadcast.9 = f32[1,11,11,1] broadcast(broadcast.6), dimensions={3}
ROOT add.10 = f32[1,11,11,1] add(convolution.0, broadcast.9)
})";
EXPECT_TRUE(RunAndCompare(convolution_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(convolution_module_str, fused_convolution_binary_add_);
}
TEST_F(ConvolutionTest, SimpleTestBF16WithBiasAndAddFusion) {
const char* convolution_module_str = R"(
HloModule convolution.add.test.bf16
ENTRY convolution.add.test.bf16 {
arg0.1 = bf16[1,22,22,1] parameter(0)
arg0.2 = bf16[8,8,1,10] parameter(1)
convolution.0 = bf16[1,11,11,10] convolution(arg0.1, arg0.2), window={size=8x8 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f
const.0 = bf16[10] constant(15)
bcast.1 = bf16[1,11,11,10] broadcast(const.0), dimensions={3}
add.0 = bf16[1,11,11,10] add(convolution.0, bcast.1)
const.1 = bf16[1,11,11,10] constant({...})
ROOT add.1 = bf16[1,11,11,10] add(add.0, const.1)
})";
EXPECT_TRUE(RunAndCompare(convolution_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(convolution_module_str, conv_rewrite_bias_str_);
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/onednn_convolution.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/tests/onednn_convolution_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
42ba027f-473e-4e70-a1e2-6fa80dbda591 | cpp | tensorflow/tensorflow | cpu_instruction_fusion | third_party/xla/xla/service/cpu/cpu_instruction_fusion.cc | third_party/xla/xla/service/cpu/cpu_instruction_fusion_test.cc | #include "xla/service/cpu/cpu_instruction_fusion.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/fusion_node_indexing_evaluation.h"
#include "xla/service/instruction_fusion.h"
#include "xla/service/llvm_ir/fused_ir_emitter.h"
namespace xla {
namespace cpu {
namespace {
bool CanBeLoopFused(const HloInstruction& hlo) {
return hlo.IsElementwise() ||
hlo.opcode() == HloOpcode::kBitcast ||
hlo.opcode() == HloOpcode::kBroadcast ||
hlo.opcode() == HloOpcode::kConcatenate ||
hlo.opcode() == HloOpcode::kDynamicSlice ||
hlo.opcode() == HloOpcode::kDynamicUpdateSlice ||
hlo.opcode() == HloOpcode::kGather ||
hlo.opcode() == HloOpcode::kIota || hlo.opcode() == HloOpcode::kPad ||
hlo.opcode() == HloOpcode::kReduce ||
hlo.opcode() == HloOpcode::kReshape ||
hlo.opcode() == HloOpcode::kReverse ||
hlo.opcode() == HloOpcode::kSlice ||
hlo.opcode() == HloOpcode::kTranspose;
}
bool IsNonComplexNonBatchedMatrixVectorDot(const HloInstruction* hlo) {
const Shape& hlo_shape = hlo->shape();
return !ShapeUtil::ElementIsComplex(hlo_shape) &&
hlo->opcode() == HloOpcode::kDot && hlo_shape.dimensions_size() <= 1 &&
hlo->dot_dimension_numbers().lhs_batch_dimensions_size() == 0;
}
bool HasExactlyOneUse(const HloInstruction& hlo_instr) {
return hlo_instr.user_count() == 1 &&
absl::c_count(hlo_instr.users().front()->operands(), &hlo_instr) == 1;
}
bool CanBeOutputFused(const HloInstruction* producer,
const HloInstruction* consumer) {
return consumer->opcode() == HloOpcode::kAdd &&
IsNonComplexNonBatchedMatrixVectorDot(producer) &&
HasExactlyOneUse(*producer) == 1;
}
bool CanBeOutputFusedIntoSomeOperand(const HloInstruction* consumer) {
return consumer->opcode() == HloOpcode::kAdd &&
(CanBeOutputFused(consumer->operand(0), consumer) ||
CanBeOutputFused(consumer->operand(1), consumer));
}
}
FusionDecision CpuInstructionFusion::ShouldFuse(HloInstruction* consumer,
int64_t operand_index) {
HloInstruction* producer = consumer->mutable_operand(operand_index);
VLOG(2) << "Considering for fusion: operand " << operand_index << " of "
<< consumer->ToString();
constexpr int kFusionThresholdBytes = 16 * 1024;
if (CanBeOutputFused(producer, consumer)) {
VLOG(2) << "Fusion OK: Can create output fusion.";
return FusionDecision::Allow();
}
if (CanBeOutputFusedIntoSomeOperand(producer)) {
return FusionDecision::Forbid(
"Bailing because producer can be output-fused into some operand.");
}
if (!CanBeLoopFused(*producer)) {
return FusionDecision::Forbid("Producer is not loop-fusible.");
}
if (producer->opcode() != HloOpcode::kFusion && is_expensive(*producer) &&
ReusesOperandElements(consumer, operand_index)) {
return FusionDecision::Forbid("Fusion is not profitable.");
}
RETURN_IF_NOT_FUSIBLE(InstructionFusion::ShouldFuse(consumer, operand_index));
if (producer->opcode() == HloOpcode::kConstant &&
consumer->opcode() != HloOpcode::kFusion) {
return FusionDecision::Forbid(
"Not fusing: insufficient non-constant nodes.");
}
if (producer->opcode() == HloOpcode::kFusion) {
return FusionDecision::Forbid(
"Not fusing: producer is itself a fusion node.");
}
if (consumer->opcode() == HloOpcode::kFusion) {
if (fusion_node_evaluations_.find(consumer) ==
fusion_node_evaluations_.end()) {
fusion_node_evaluations_.emplace(consumer,
FusionNodeIndexingEvaluation(consumer));
}
if (fusion_node_evaluations_.at(consumer).CodeDuplicationTooHigh(
producer)) {
return FusionDecision::Forbid("Code duplication too high");
}
}
if (consumer->opcode() == HloOpcode::kDot) {
const Shape& output_shape = consumer->shape();
if (output_shape.dimensions_size() <= 1) {
if (consumer->operand(0)->shape().rank() == 1 && operand_index == 1 &&
ShapeUtil::ByteSizeOfElements(consumer->operand(0)->shape()) <
kFusionThresholdBytes) {
VLOG(2) << "Fusing small matrix-vector product.";
return FusionDecision::Allow();
} else if (consumer->operand(1)->shape().rank() == 1 &&
operand_index == 0 &&
ShapeUtil::ByteSizeOfElements(consumer->operand(1)->shape()) <
kFusionThresholdBytes) {
VLOG(2) << "Fusing small matrix-vector product.";
return FusionDecision::Allow();
}
}
}
if (consumer->opcode() == HloOpcode::kReduce &&
!absl::c_linear_search(
consumer->dimensions(),
LayoutUtil::Minor(consumer->operand(0)->shape().layout(), 0))) {
return FusionDecision::Forbid(
"Not fusing reductions over major dimensions");
}
if (producer->opcode() == HloOpcode::kReduce &&
!absl::c_linear_search(
producer->dimensions(),
LayoutUtil::Minor(producer->operand(0)->shape().layout(), 0))) {
return FusionDecision::Forbid(
"Not fusing reductions over major dimensions");
}
if (consumer->IsLoopFusion()) {
VLOG(2) << "Fusing: consumer is a fusion node.";
return FusionDecision::Allow();
}
if (CanBeLoopFused(*consumer)) {
VLOG(2) << "Fusing: consumer is elementwise or fusible.";
return FusionDecision::Allow();
}
return FusionDecision::Forbid("Not fusing: not found a fusible case");
}
HloInstruction::FusionKind CpuInstructionFusion::ChooseKind(
const HloInstruction* producer, const HloInstruction* consumer) {
return CanBeOutputFused(producer, consumer)
? HloInstruction::FusionKind::kOutput
: HloInstruction::FusionKind::kLoop;
}
HloInstruction* CpuInstructionFusion::FuseInstruction(
HloInstruction* fusion_instruction, HloInstruction* producer) {
auto evaluation = fusion_node_evaluations_.find(fusion_instruction);
if (evaluation == fusion_node_evaluations_.end()) {
evaluation = fusion_node_evaluations_
.emplace(fusion_instruction,
FusionNodeIndexingEvaluation(fusion_instruction))
.first;
}
auto indexing_users = evaluation->second.RemoveFusionOperand(producer);
HloInstruction* new_producer =
InstructionFusion::FuseInstruction(fusion_instruction, producer);
evaluation->second.UpdateEvaluationCache(new_producer, indexing_users);
return new_producer;
}
}
} | #include "xla/service/cpu/cpu_instruction_fusion.h"
#include <algorithm>
#include <memory>
#include <set>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/transpose_folding.h"
#include "xla/shape.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "tsl/platform/statusor.h"
namespace op = xla::testing::opcode_matchers;
namespace xla::cpu {
namespace {
using InstructionFusionTest = HloTestBase;
std::unique_ptr<HloInstruction> MakeDot(const Shape& shape, HloInstruction* lhs,
HloInstruction* rhs) {
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(lhs->shape().rank() - 1);
dot_dnums.add_rhs_contracting_dimensions(0);
PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
2, PrecisionConfig::DEFAULT);
return HloInstruction::CreateDot(shape, lhs, rhs, dot_dnums,
precision_config);
}
TEST_F(InstructionFusionTest, DotOperationFusion_Basic_0) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1024, 256}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {256}), "arg1"));
HloInstruction* exp0 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {1024, 256}), HloOpcode::kExp, arg0));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {1024}), exp0, arg1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_TRUE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(), op::Fusion());
}
TEST_F(InstructionFusionTest, DotOperationFusion_Basic_1) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {256}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {256, 1024}), "arg1"));
HloInstruction* exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {256, 1024}), HloOpcode::kExp, arg1));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {1024}), arg0, exp1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_TRUE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(), op::Fusion());
}
TEST_F(InstructionFusionTest, DotOperationFusion_Bitcast) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {2, 512, 2, 128}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {256}), "arg1"));
HloInstruction* exp0 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {2, 512, 2, 128}), HloOpcode::kExp, arg0));
HloInstruction* bitcast0 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {1024, 256}), HloOpcode::kBitcast, exp0));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {1024}), bitcast0, arg1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_TRUE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(), op::Fusion());
}
TEST_F(InstructionFusionTest, DotOperationFusion_Reshape) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {2, 512, 2, 128}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {256}), "arg1"));
HloInstruction* exp0 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {2, 512, 2, 128}), HloOpcode::kExp, arg0));
HloInstruction* reshape0 =
builder.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {1024, 256}), exp0));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {1024}), reshape0, arg1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_TRUE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(), op::Fusion());
}
TEST_F(InstructionFusionTest, DotOperationFusion_TooLarge) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {32 * 1024}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {32 * 1024, 256}), "arg1"));
HloInstruction* exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {32 * 1024, 256}), HloOpcode::kExp, arg1));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {256}), arg0, exp1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_FALSE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_EQ(dot, computation->root_instruction());
}
TEST_F(InstructionFusionTest, DotOperationFusion_ElementReuse) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {2, 256}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {256, 1024}), "arg1"));
HloInstruction* exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {256, 1024}), HloOpcode::kExp, arg1));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {2, 1024}), arg0, exp1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_FALSE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_EQ(dot, computation->root_instruction());
}
TEST_F(InstructionFusionTest, DotOperationFusion_TransposeFusion_RHS) {
std::string hlo_string = R"(
HloModule DotOperationFusion_TransposeFusion
ENTRY DotOperationFusion_TransposeFusion {
arg0 = f32[1,256] parameter(0)
arg1 = f32[1024,256] parameter(1)
exponential = f32[1024,256] exponential(arg1)
transpose = f32[256,1024] transpose(exponential), dimensions={1,0}
ROOT dot = f32[1,1024] dot(arg0, transpose), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* computation = module->entry_computation();
TF_ASSERT_OK_AND_ASSIGN(bool changed, TransposeFolding().Run(module.get()));
ASSERT_TRUE(changed);
ASSERT_THAT(computation->root_instruction(),
op::Dot(op::Parameter(0), op::Exp(op::Parameter(1)),
1, 1));
}
TEST_F(InstructionFusionTest, DotOperationFusion_TransposeFusion_LHS) {
std::string hlo_string = R"(
HloModule DotOperationFusion_TransposeFusion
ENTRY DotOperationFusion_TransposeFusion {
arg0 = f32[256,1] parameter(0)
arg1 = f32[256,1024] parameter(1)
transpose = f32[1,256] transpose(arg0), dimensions={1,0}
exponential = f32[256,1024] exponential(arg1)
ROOT dot = f32[1,1024] dot(transpose, exponential), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* computation = module->entry_computation();
TF_ASSERT_OK_AND_ASSIGN(bool changed, TransposeFolding().Run(module.get()));
ASSERT_TRUE(changed);
ASSERT_THAT(computation->root_instruction(),
op::Dot(op::Parameter(0), op::Exp(op::Parameter(1)),
0, 0));
}
TEST_F(InstructionFusionTest,
DotOperationFusion_TransposeFusion_LHS_NonDefault) {
std::string hlo_string = R"(
HloModule DotOperationFusion_TransposeFusion
ENTRY DotOperationFusion_TransposeFusion {
arg0 = f32[1,256] parameter(0)
arg1 = f32[256,1024] parameter(1)
transpose = f32[256,1] transpose(arg0), dimensions={1,0}
exponential = f32[256,1024] exponential(arg1)
ROOT dot = f32[1,1024] dot(transpose, exponential), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* computation = module->entry_computation();
TF_ASSERT_OK_AND_ASSIGN(bool changed, TransposeFolding().Run(module.get()));
ASSERT_TRUE(changed);
ASSERT_THAT(computation->root_instruction(),
op::Dot(op::Parameter(0), op::Exp(op::Parameter(1)),
1, 0));
}
class OpcodeFusionTest : public InstructionFusionTest {
protected:
void RunFusionAndCheckOpcodesWereFused(
HloModule* module, const std::multiset<HloOpcode>& expected_opcodes,
HloInstruction::FusionKind fusion_kind =
HloInstruction::FusionKind::kLoop) {
auto computation = module->entry_computation();
auto did_fusion = CpuInstructionFusion().Run(module);
ASSERT_TRUE(did_fusion.ok());
EXPECT_TRUE(did_fusion.value());
HloInstruction* root = computation->root_instruction();
ASSERT_THAT(root, op::Fusion());
EXPECT_EQ(root->fusion_kind(), fusion_kind);
std::vector<HloOpcode> fused_opcodes(root->fused_instruction_count());
std::transform(root->fused_instructions().begin(),
root->fused_instructions().end(), fused_opcodes.begin(),
[](const HloInstruction* hlo) { return hlo->opcode(); });
EXPECT_EQ(
std::multiset<HloOpcode>(fused_opcodes.begin(), fused_opcodes.end()),
expected_opcodes);
}
HloComputation* CreateAdderToOne(HloModule* module) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "arg0"));
HloInstruction* one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd, arg0, one));
return module->AddEmbeddedComputation(builder.Build());
}
HloComputation* CreateMax(HloModule* module) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "arg0"));
HloInstruction* arg1 =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "arg1"));
builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kMaximum, arg0, arg1));
return module->AddEmbeddedComputation(builder.Build());
}
};
TEST_F(OpcodeFusionTest, Exponential_Reshape_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {1, 4});
Shape result_shape = ShapeUtil::MakeShape(F32, {4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(param_shape, HloOpcode::kExp, param0));
HloInstruction* reshape2 =
builder.AddInstruction(HloInstruction::CreateReshape(result_shape, exp1));
builder.AddInstruction(
HloInstruction::CreateUnary(result_shape, HloOpcode::kNegate, reshape2));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(), {HloOpcode::kNegate, HloOpcode::kReshape, HloOpcode::kExp,
HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Broadcast_Reshape_DynamicSlice_Tanh) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {8});
Shape starts_shape = ShapeUtil::MakeShape(S32, {});
Shape broadcast_shape = ShapeUtil::MakeShape(F32, {1, 8, 8});
Shape reshape_shape = ShapeUtil::MakeShape(F32, {8, 8});
Shape dynamic_slice_shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, starts_shape, "starts"));
HloInstruction* param2 = builder.AddInstruction(
HloInstruction::CreateParameter(2, starts_shape, "starts"));
HloInstruction* broadcast2 = builder.AddInstruction(
HloInstruction::CreateBroadcast(broadcast_shape, param0, {1}));
HloInstruction* reshape3 = builder.AddInstruction(
HloInstruction::CreateReshape(reshape_shape, broadcast2));
HloInstruction* dynamic_slice4 =
builder.AddInstruction(HloInstruction::CreateDynamicSlice(
dynamic_slice_shape, reshape3, {param1, param2}, {4, 4}));
builder.AddInstruction(HloInstruction::CreateUnary(
dynamic_slice_shape, HloOpcode::kTanh, dynamic_slice4));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kTanh, HloOpcode::kDynamicSlice, HloOpcode::kReshape,
HloOpcode::kBroadcast, HloOpcode::kParameter, HloOpcode::kParameter,
HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Broadcast_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {8});
Shape result_shape = ShapeUtil::MakeShape(F32, {8, 8});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* broadcast1 = builder.AddInstruction(
HloInstruction::CreateBroadcast(result_shape, param0, {1}));
builder.AddInstruction(HloInstruction::CreateUnary(
result_shape, HloOpcode::kNegate, broadcast1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kNegate, HloOpcode::kBroadcast, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, DynamicSlice_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {4});
Shape slice_shape = ShapeUtil::MakeShape(S32, {});
Shape result_shape = ShapeUtil::MakeShape(F32, {2});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, slice_shape, "starts"));
HloInstruction* dynamic_slice2 = builder.AddInstruction(
HloInstruction::CreateDynamicSlice(result_shape, param0, {param1}, {2}));
builder.AddInstruction(HloInstruction::CreateUnary(
result_shape, HloOpcode::kNegate, dynamic_slice2));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(), {HloOpcode::kNegate, HloOpcode::kDynamicSlice,
HloOpcode::kParameter, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Exponential_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(param_shape, HloOpcode::kExp, param0));
builder.AddInstruction(
HloInstruction::CreateUnary(param_shape, HloOpcode::kNegate, exp1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kNegate, HloOpcode::kExp, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Reshape_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {4, 4});
Shape result_shape = ShapeUtil::MakeShape(F32, {16});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* reshape1 = builder.AddInstruction(
HloInstruction::CreateReshape(result_shape, param0));
builder.AddInstruction(
HloInstruction::CreateUnary(result_shape, HloOpcode::kNegate, reshape1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kNegate, HloOpcode::kReshape, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Reverse_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {8});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* reverse1 = builder.AddInstruction(
HloInstruction::CreateReverse(param_shape, param0, {0}));
builder.AddInstruction(
HloInstruction::CreateUnary(param_shape, HloOpcode::kNegate, reverse1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kNegate, HloOpcode::kReverse, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Slice_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {4});
Shape slice_shape = ShapeUtil::MakeShape(F32, {2});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* slice1 = builder.AddInstruction(
HloInstruction::CreateSlice(slice_shape, param0, {0}, {4}, {2}));
builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {2}), HloOpcode::kNegate, slice1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kNegate, HloOpcode::kSlice, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Exponential_Transpose_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {3, 4});
Shape result_shape = ShapeUtil::MakeShape(F32, {4, 3});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(param_shape, HloOpcode::kExp, param0));
HloInstruction* transpose2 = builder.AddInstruction(
HloInstruction::CreateTranspose(result_shape, exp1, {1, 0}));
builder.AddInstruction(HloInstruction::CreateUnary(
result_shape, HloOpcode::kNegate, transpose2));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(), {HloOpcode::kNegate, HloOpcode::kTranspose, HloOpcode::kExp,
HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, UnaryMapOfExp) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {3, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
HloInstruction* exp = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kExp, param0));
builder.AddInstruction(
HloInstruction::CreateMap(shape, {exp}, CreateAdderToOne(module.get())));
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(), {HloOpcode::kParameter, HloOpcode::kExp, HloOpcode::kMap});
}
TEST_F(OpcodeFusionTest, BinaryMapOfExps) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {3, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "param"));
HloInstruction* exp0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kExp, param0));
HloInstruction* exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kExp, param1));
builder.AddInstruction(
HloInstruction::CreateMap(shape, {exp0, exp1}, CreateMax(module.get())));
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(), {HloOpcode::kParameter, HloOpcode::kParameter,
HloOpcode::kExp, HloOpcode::kExp, HloOpcode::kMap});
}
TEST_F(OpcodeFusionTest, DynamicSliceWithDynamicUpdateSlice) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
Shape full_shape = ShapeUtil::MakeShape(F32, {10, 100, 1000});
Shape slice_shape = ShapeUtil::MakeShape(F32, {10, 1, 1000});
std::vector<HloInstruction*> slice_indices, update_indices;
for (int i = 0; i < 3; ++i) {
slice_indices.push_back(
builder.AddInstruction(HloInstruction::CreateParameter(
1 + i, ShapeUtil::MakeShape(U32, {}), "slice_indices")));
update_indices.push_back(
builder.AddInstruction(HloInstruction::CreateParameter(
5 + i, ShapeUtil::MakeShape(U32, {}), "update_indices")));
}
HloInstruction* slice =
builder.AddInstruction(HloInstruction::CreateDynamicSlice(
slice_shape,
builder.AddInstruction(
HloInstruction::CreateParameter(0, full_shape, "slice_from")),
slice_indices,
{10, 1, 1000}));
builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
full_shape,
builder.AddInstruction(
HloInstruction::CreateParameter(4, full_shape, "to_update")),
slice, update_indices));
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kDynamicSlice, HloOpcode::kDynamicUpdateSlice,
HloOpcode::kParameter, HloOpcode::kParameter, HloOpcode::kParameter,
HloOpcode::kParameter, HloOpcode::kParameter, HloOpcode::kParameter,
HloOpcode::kParameter, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, MessOfFusibleNodes) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
Shape full_shape = ShapeUtil::MakeShape(F32, {4, 100, 10, 100, 50});
auto loop_idx = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(S32, {}), "param0"));
auto param1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(S32, {}), "param1"));
auto idx_choice = builder.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(S32, {}),
builder.AddInstruction(HloInstruction::CreateDynamicSlice(
ShapeUtil::MakeShape(S32, {1}),
builder.AddInstruction(HloInstruction::CreateParameter(
2, ShapeUtil::MakeShape(S32, {4}), "param2")),
{loop_idx},
{1}))));
auto zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(0)));
auto slice = builder.AddInstruction(HloInstruction::CreateDynamicSlice(
ShapeUtil::MakeShape(F32, {1, 100, 10, 100, 50}),
builder.AddInstruction(HloInstruction::CreateParameter(
3, ShapeUtil::MakeShape(F32, {100, 100, 10, 100, 50}), "param3")),
{idx_choice, zero, zero, zero, zero},
{1, 100, 10, 100, 50}));
builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
full_shape,
builder.AddInstruction(
HloInstruction::CreateParameter(4, full_shape, "param4")),
slice, {loop_idx, param1, param1, param1, param1}));
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kDynamicSlice, HloOpcode::kDynamicSlice,
HloOpcode::kDynamicUpdateSlice, HloOpcode::kReshape,
HloOpcode::kConstant, HloOpcode::kParameter, HloOpcode::kParameter,
HloOpcode::kParameter, HloOpcode::kParameter, HloOpcode::kParameter});
}
void CreateComputationForDotAddOutputFusionTest(const std::string& test_name,
HloModule* module, int m, int k,
int n,
bool add_extra_use_for_dot) {
HloComputation::Builder builder(test_name);
Shape dot_lhs_shape = ShapeUtil::MakeShape(F32, {m, k});
Shape dot_rhs_shape = ShapeUtil::MakeShape(F32, {k, n});
Shape dot_shape = ShapeUtil::MakeShape(F32, {m, n});
if (m == 1) {
dot_lhs_shape = ShapeUtil::MakeShape(F32, {k});
dot_shape = ShapeUtil::MakeShape(F32, {n});
} else if (n == 1) {
dot_rhs_shape = ShapeUtil::MakeShape(F32, {k});
dot_shape = ShapeUtil::MakeShape(F32, {m});
}
auto* dot_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, dot_lhs_shape, "param0"));
auto* dot_rhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, dot_rhs_shape, "param1"));
auto* addend = builder.AddInstruction(
HloInstruction::CreateParameter(2, dot_shape, "param2"));
auto* dot =
builder.AddInstruction(CreateCanonicalDot(dot_shape, dot_lhs, dot_rhs));
builder.AddInstruction(
HloInstruction::CreateBinary(dot_shape, HloOpcode::kAdd, dot, addend));
if (add_extra_use_for_dot) {
auto* token = builder.AddInstruction(HloInstruction::CreateToken());
builder.AddInstruction(
HloInstruction::CreateOutfeed(dot_shape, dot, token, "no_config"));
}
module->AddEntryComputation(builder.Build());
}
TEST_F(OpcodeFusionTest, DotAddOutputFusion_1x50x19) {
auto module = CreateNewVerifiedModule();
CreateComputationForDotAddOutputFusionTest(TestName(), module.get(), 1,
50, 19,
false);
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kDot, HloOpcode::kAdd, HloOpcode::kParameter,
HloOpcode::kParameter, HloOpcode::kParameter},
HloInstruction::FusionKind::kOutput);
}
TEST_F(OpcodeFusionTest, DotAddOutputFusion_19x50x1) {
auto module = CreateNewVerifiedModule();
CreateComputationForDotAddOutputFusionTest(TestName(), module.get(), 19,
50, 1,
false);
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kDot, HloOpcode::kAdd, HloOpcode::kParameter,
HloOpcode::kParameter, HloOpcode::kParameter},
HloInstruction::FusionKind::kOutput);
}
TEST_F(OpcodeFusionTest, DotAddOutputFusion_19x50x19) {
auto module = CreateNewVerifiedModule();
CreateComputationForDotAddOutputFusionTest(TestName(), module.get(), 19,
50, 19,
false);
TF_ASSERT_OK_AND_ASSIGN(bool fused_something,
CpuInstructionFusion().Run(module.get()));
EXPECT_FALSE(fused_something);
EXPECT_THAT(module->entry_computation()->root_instruction(),
Not(op::Fusion()));
}
TEST_F(OpcodeFusionTest, DotAddOutputFusion_19x50x1_multi_use) {
auto module = CreateNewVerifiedModule();
CreateComputationForDotAddOutputFusionTest(TestName(), module.get(), 19,
50, 1,
true);
TF_ASSERT_OK_AND_ASSIGN(bool fused_something,
CpuInstructionFusion().Run(module.get()));
EXPECT_FALSE(fused_something);
EXPECT_THAT(module->entry_computation()->root_instruction(),
Not(op::Fusion()));
}
TEST_F(InstructionFusionTest,
DotOperationFusion_DontOutputFuseDuplicateOperands) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
a = f32[50,60]{1,0} parameter(0)
b = f32[60,1]{1,0} parameter(1)
c = f32[50,1]{1,0} dot(a, b), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT d = f32[50,1]{1,0} add(c, c)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool fused_something,
CpuInstructionFusion().Run(module.get()));
EXPECT_FALSE(fused_something);
EXPECT_THAT(module->entry_computation()->root_instruction(),
Not(op::Fusion()));
}
struct GatherLoopFusionTestSpec {
std::string test_name;
std::string hlo_computation_text;
static std::string Name(
const ::testing::TestParamInfo<GatherLoopFusionTestSpec>& info) {
return info.param.test_name;
}
};
class GatherLoopFusionTest
: public OpcodeFusionTest,
public ::testing::WithParamInterface<GatherLoopFusionTestSpec> {};
TEST_P(GatherLoopFusionTest, GatherLoopFusion) {
const GatherLoopFusionTestSpec& spec = GetParam();
std::string hlo_string = absl::StrCat("HloModule ", spec.test_name, "\n\n",
spec.hlo_computation_text);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kGather, HloOpcode::kAdd, HloOpcode::kBroadcast,
HloOpcode::kConstant, HloOpcode::kParameter, HloOpcode::kParameter});
}
std::vector<GatherLoopFusionTestSpec> GetGatherLoopFusionTestSpecs() {
std::vector<GatherLoopFusionTestSpec> result;
result.push_back({"FusedTensorFlowGatherV2", R"(
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
gather = s32[3,2] gather(operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3, 1}
one = s32[] constant(1)
one_broadcasted = s32[3,2] broadcast(one), dimensions={}
ROOT result = s32[3,2]{1,0} add(gather, one_broadcasted)
}
)"});
result.push_back({"FusedTensorFlowGatherMultipleBatchDims", R"(
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2,2] parameter(1)
gather = s32[2,3,2] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=2,
slice_sizes={3, 1}
one = s32[] constant(1)
one_broadcasted = s32[2,3,2] broadcast(one), dimensions={}
ROOT result = s32[2,3,2]{2,1,0} add(gather, one_broadcasted)
}
)"});
result.push_back({"FusedTensorFlowGatherNdMultipleBatchDims", R"(
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2,2,2] parameter(1)
gather = s32[2,2] gather(operand, indices),
offset_dims={},
collapsed_slice_dims={0,1},
start_index_map={0,1},
index_vector_dim=2,
slice_sizes={1, 1}
one = s32[] constant(1)
one_broadcasted = s32[2,2] broadcast(one), dimensions={}
ROOT result = s32[2,2]{1,0} add(gather, one_broadcasted)
}
)"});
result.push_back({"FusedTensorFlowGatherNd_0", R"(
ENTRY main {
operand = s32[3,3,2] parameter(0)
indices = s32[2,2] parameter(1)
gather = s32[2,2] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0,1},
start_index_map={0,1},
index_vector_dim=1,
slice_sizes={1,1,2}
one = s32[] constant(1)
one_broadcasted = s32[2,2] broadcast(one), dimensions={}
ROOT result = s32[2,2]{1,0} add(gather, one_broadcasted)
}
)"});
result.push_back({"FusedTensorFlowGatherNd_1", R"(
ENTRY main {
operand = s32[3,3,2] parameter(0)
indices = s32[2,2] parameter(1)
gather = s32[2,2] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0,1},
start_index_map={0,1},
index_vector_dim=0,
slice_sizes={1,1,2}
one = s32[] constant(1)
one_broadcasted = s32[2,2] broadcast(one), dimensions={}
ROOT result = s32[2,2]{1,0} add(gather, one_broadcasted)
}
)"});
result.push_back({"FusedDynamicSlice", R"(
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
gather = s32[1,1] gather(operand, indices),
offset_dims={0,1},
collapsed_slice_dims={},
start_index_map={0,1},
index_vector_dim=0,
slice_sizes={1,1}
one = s32[] constant(1)
one_broadcasted = s32[1,1] broadcast(one), dimensions={}
ROOT result = s32[1,1]{1,0} add(gather, one_broadcasted)
}
)"});
result.push_back({"FusedBatchDynamicSlice", R"(
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2,2] parameter(1)
gather = s32[2,1,1] gather(operand, indices),
offset_dims={1,2},
collapsed_slice_dims={},
start_index_map={0,1},
index_vector_dim=0,
slice_sizes={1,1}
one = s32[] constant(1)
one_broadcasted = s32[2,1,1] broadcast(one), dimensions={}
ROOT result = s32[2,1,1]{2,1,0} add(gather, one_broadcasted)
}
)"});
return result;
}
INSTANTIATE_TEST_SUITE_P(GatherLoopFusionTestInstantiation,
GatherLoopFusionTest,
::testing::ValuesIn(GetGatherLoopFusionTestSpecs()),
GatherLoopFusionTestSpec::Name);
TEST_F(InstructionFusionTest, NoFuseReduceMajor) {
absl::string_view module_string = R"(
HloModule module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY main {
a = f32[50,60]{1,0} parameter(0)
b = f32[50,60]{1,0} parameter(1)
c = f32[50,60]{1,0} add(a, b)
init = f32[] constant(0)
ROOT r = f32[60]{0} reduce(c, init), dimensions={0}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool fused_something,
CpuInstructionFusion().Run(module.get()));
EXPECT_FALSE(fused_something);
EXPECT_THAT(module->entry_computation()->root_instruction(),
Not(op::Fusion()));
}
TEST_F(InstructionFusionTest, FuseReduceMinor) {
absl::string_view module_string = R"(
HloModule module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY main {
a = f32[50,60]{1,0} parameter(0)
b = f32[50,60]{1,0} parameter(1)
c = f32[50,60]{1,0} add(a, b)
init = f32[] constant(0)
ROOT r = f32[] reduce(c, init), dimensions={0,1}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool fused_something,
CpuInstructionFusion().Run(module.get()));
EXPECT_TRUE(fused_something);
EXPECT_THAT(module->entry_computation()->root_instruction(), op::Fusion());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/cpu_instruction_fusion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/cpu_instruction_fusion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
df3a25ed-d89e-4a8e-9675-4e294c559107 | cpp | tensorflow/tensorflow | shape_partition | third_party/xla/xla/service/cpu/shape_partition.cc | third_party/xla/xla/service/cpu/shape_partition_test.cc | #include "xla/service/cpu/shape_partition.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <utility>
#include <vector>
namespace xla {
namespace cpu {
std::vector<int64_t> ShapePartitionAssigner::Run(
int64_t target_partition_count) {
std::vector<int64_t> outer_dims;
int64_t outer_dim_size = 1;
for (int i = shape_.layout().minor_to_major_size() - 1; i >= 0; --i) {
const int64_t dimension = shape_.layout().minor_to_major(i);
outer_dims.push_back(dimension);
outer_dim_size *= shape_.dimensions(dimension);
if (outer_dim_size >= target_partition_count) {
break;
}
}
target_partition_count = std::min(outer_dim_size, target_partition_count);
const int64_t target_dim_partition_count = std::pow(
static_cast<double>(target_partition_count), 1.0 / outer_dims.size());
std::vector<int64_t> dimension_partition_counts(outer_dims.size());
for (int64_t i = 0; i < outer_dims.size(); ++i) {
dimension_partition_counts[i] =
std::min(static_cast<int64_t>(shape_.dimensions(outer_dims[i])),
target_dim_partition_count);
}
if (GetTotalPartitionCount(dimension_partition_counts) <
target_partition_count) {
for (int64_t i = 0; i < dimension_partition_counts.size(); ++i) {
const int64_t current_dim_partition_count = dimension_partition_counts[i];
const int64_t other_dims_partition_count =
GetTotalPartitionCount(dimension_partition_counts) /
current_dim_partition_count;
int64_t additional_partition_count =
target_partition_count / other_dims_partition_count -
current_dim_partition_count;
additional_partition_count = std::min(
shape_.dimensions(outer_dims[i]) - dimension_partition_counts[i],
additional_partition_count);
if (additional_partition_count > 0) {
dimension_partition_counts[i] += additional_partition_count;
}
}
}
return dimension_partition_counts;
}
int64_t ShapePartitionAssigner::GetTotalPartitionCount(
const std::vector<int64_t>& dimension_partition_counts) {
int64_t total_partition_count = 1;
for (int64_t dim_partition_count : dimension_partition_counts) {
total_partition_count *= dim_partition_count;
}
return total_partition_count;
}
ShapePartitionIterator::ShapePartitionIterator(
const Shape& shape, absl::Span<const int64_t> dimension_partition_counts)
: shape_(shape),
dimension_partition_counts_(dimension_partition_counts.begin(),
dimension_partition_counts.end()),
dimensions_(dimension_partition_counts_.size()),
dimension_partition_sizes_(dimension_partition_counts_.size()),
dimension_partition_strides_(dimension_partition_counts_.size()) {
for (int i = 0; i < dimensions_.size(); ++i) {
dimensions_[i] = shape_.layout().minor_to_major(
shape_.layout().minor_to_major_size() - 1 - i);
}
for (int i = 0; i < dimension_partition_sizes_.size(); ++i) {
const int64_t dim_size = shape_.dimensions(dimensions_[i]);
dimension_partition_sizes_[i] =
std::max(int64_t{1}, dim_size / dimension_partition_counts_[i]);
}
dimension_partition_strides_[dimension_partition_strides_.size() - 1] = 1;
for (int i = dimension_partition_strides_.size() - 2; i >= 0; --i) {
dimension_partition_strides_[i] = dimension_partition_strides_[i + 1] *
dimension_partition_counts_[i + 1];
}
}
std::vector<std::pair<int64_t, int64_t>> ShapePartitionIterator::GetPartition(
int64_t index) const {
std::vector<std::pair<int64_t, int64_t>> partition(dimensions_.size());
for (int64_t i = 0; i < partition.size(); ++i) {
const int64_t partition_index = index / dimension_partition_strides_[i];
partition[i].first = partition_index * dimension_partition_sizes_[i];
if (partition_index == dimension_partition_counts_[i] - 1) {
partition[i].second =
shape_.dimensions(dimensions_[i]) - partition[i].first;
} else {
partition[i].second = dimension_partition_sizes_[i];
}
CHECK_GT(partition[i].second, 0);
index -= partition_index * dimension_partition_strides_[i];
}
return partition;
}
int64_t ShapePartitionIterator::GetTotalPartitionCount() const {
return ShapePartitionAssigner::GetTotalPartitionCount(
dimension_partition_counts_);
}
}
} | #include "xla/service/cpu/shape_partition.h"
#include <algorithm>
#include <random>
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
namespace xla {
namespace cpu {
namespace {
class ShapePartitionAssignerTest : public HloTestBase {
protected:
typedef std::vector<int64_t> Vec;
void RunR2Test(const Shape& shape, int64_t max_target_partition_count,
const std::vector<int64_t>* expected_partitions) {
ShapePartitionAssigner assigner(shape);
for (int64_t i = 1; i <= max_target_partition_count; ++i) {
std::vector<int64_t> actual_partitions =
assigner.Run(i);
EXPECT_THAT(actual_partitions, expected_partitions[i - 1]);
}
}
};
TEST_F(ShapePartitionAssignerTest, Shape13WithLayout10) {
std::vector<int64_t> expected_partitions[] = {{1} , {1, 2} };
RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 3}, {1, 0}), 2,
expected_partitions);
}
TEST_F(ShapePartitionAssignerTest, Shape31WithLayout01) {
std::vector<int64_t> expected_partitions[] = {
{1} , {1, 2}
};
RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 1}, {0, 1}), 2,
expected_partitions);
}
TEST_F(ShapePartitionAssignerTest, Shape53WithLayout10) {
std::vector<int64_t> expected_partitions[] = {{1} , {2} ,
{3} , {4} ,
{5} , {3, 2} };
RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3}, {1, 0}), 6,
expected_partitions);
}
TEST_F(ShapePartitionAssignerTest, Shape53WithLayout01) {
std::vector<int64_t> expected_partitions[] = {
{1} , {2} , {3} , {2, 2} };
RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3}, {0, 1}), 4,
expected_partitions);
}
TEST_F(ShapePartitionAssignerTest, Shape532WithLayout210) {
std::vector<int64_t> expected_partitions[] = {
{1} , {2} , {3} , {4} ,
{5} , {3, 2} , {3, 2} , {4, 2} ,
{3, 3} , {3, 3} , {3, 3} , {4, 3} ,
{4, 3} , {4, 3} , {5, 3} , {4, 2, 2} };
RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3, 2}, {2, 1, 0}), 16,
expected_partitions);
}
TEST_F(ShapePartitionAssignerTest, Shape532WithLayout201) {
std::vector<int64_t> expected_partitions[] = {
{1} , {2} , {3} , {2, 2} ,
{2, 2} , {3, 2} , {3, 2} , {3, 2} ,
{3, 3} , {3, 3} , {3, 3} , {3, 4} ,
{3, 4} , {3, 4} , {3, 5} , {3, 2, 2} };
RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3, 2}, {2, 0, 1}), 16,
expected_partitions);
}
class ShapePartitionIteratorTest : public HloTestBase {
protected:
typedef std::vector<std::pair<int64_t, int64_t>> Partition;
};
TEST_F(ShapePartitionIteratorTest, Shape53WithLayout10) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3}, {1, 0});
{
ShapePartitionIterator iterator(shape, {1});
EXPECT_EQ(1, iterator.GetTotalPartitionCount());
EXPECT_TRUE(absl::c_equal(Partition({{0, 5}}), iterator.GetPartition(0)));
}
{
ShapePartitionIterator iterator(shape, {2});
EXPECT_EQ(2, iterator.GetTotalPartitionCount());
EXPECT_TRUE(absl::c_equal(Partition({{0, 2}}), iterator.GetPartition(0)));
EXPECT_TRUE(absl::c_equal(Partition({{2, 3}}), iterator.GetPartition(1)));
}
{
ShapePartitionIterator iterator(shape, {3});
EXPECT_EQ(3, iterator.GetTotalPartitionCount());
EXPECT_TRUE(absl::c_equal(Partition({{0, 1}}), iterator.GetPartition(0)));
EXPECT_TRUE(absl::c_equal(Partition({{1, 1}}), iterator.GetPartition(1)));
EXPECT_TRUE(absl::c_equal(Partition({{2, 3}}), iterator.GetPartition(2)));
}
}
TEST_F(ShapePartitionIteratorTest, Shape532WithLayout210) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3, 2}, {2, 1, 0});
{
ShapePartitionIterator iterator(shape, {1, 1});
EXPECT_EQ(1, iterator.GetTotalPartitionCount());
EXPECT_TRUE(
absl::c_equal(Partition({{0, 5}, {0, 3}}), iterator.GetPartition(0)));
}
{
ShapePartitionIterator iterator(shape, {2, 2});
EXPECT_EQ(4, iterator.GetTotalPartitionCount());
EXPECT_TRUE(
absl::c_equal(Partition({{0, 2}, {0, 1}}), iterator.GetPartition(0)));
EXPECT_TRUE(
absl::c_equal(Partition({{0, 2}, {1, 2}}), iterator.GetPartition(1)));
EXPECT_TRUE(
absl::c_equal(Partition({{2, 3}, {0, 1}}), iterator.GetPartition(2)));
EXPECT_TRUE(
absl::c_equal(Partition({{2, 3}, {1, 2}}), iterator.GetPartition(3)));
}
}
class RandomShapePartitionIteratorTest : public HloTestBase {
protected:
typedef std::vector<std::pair<int64_t, int64_t>> Partition;
RandomShapePartitionIteratorTest()
: generator_(rd_()), distribution_(1, 10) {}
std::vector<int64_t> RandR4Dims() { return {Rand(), Rand(), Rand(), Rand()}; }
int64_t Rand() { return distribution_(generator_); }
std::random_device rd_;
std::mt19937 generator_;
std::uniform_int_distribution<int> distribution_;
};
TEST_F(RandomShapePartitionIteratorTest, RandomShapeAndPartitions) {
Shape shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, RandR4Dims(), {3, 2, 1, 0});
const int num_outer_dims_to_partition = 1 + (Rand() % 3);
std::vector<int64_t> dim_sizes(num_outer_dims_to_partition);
std::vector<int64_t> dim_partition_counts(num_outer_dims_to_partition);
int64_t total_dim_size = 1;
for (int i = 0; i < num_outer_dims_to_partition; ++i) {
const int64_t dimension = shape.layout().minor_to_major(
shape.layout().minor_to_major_size() - 1 - i);
dim_sizes[i] = shape.dimensions(dimension);
total_dim_size *= dim_sizes[i];
const int64_t dim_partition_count = 1 + Rand() % dim_sizes[i];
dim_partition_counts[i] = dim_partition_count;
}
std::vector<std::map<int64_t, int64_t>> ranges(num_outer_dims_to_partition);
ShapePartitionIterator partition_iterator(shape, dim_partition_counts);
const int64_t partition_count = partition_iterator.GetTotalPartitionCount();
for (int64_t i = 0; i < partition_count; ++i) {
const auto& dim_partition = partition_iterator.GetPartition(i);
for (int dim = 0; dim < dim_partition.size(); ++dim) {
ranges[dim].insert(
std::make_pair(dim_partition[dim].first,
dim_partition[dim].first + dim_partition[dim].second));
}
}
for (int i = 0; i < ranges.size(); ++i) {
int64_t expected_index = 0;
for (auto& r : ranges[i]) {
EXPECT_EQ(expected_index, r.first);
expected_index = r.second;
}
EXPECT_EQ(expected_index, dim_sizes[i]);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/shape_partition.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/shape_partition_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bb58629f-8bc9-481c-a8f4-361cf88721bc | cpp | tensorflow/tensorflow | parallel_task_assignment | third_party/xla/xla/service/cpu/parallel_task_assignment.cc | third_party/xla/xla/service/cpu/parallel_task_assignment_test.cc | #include "xla/service/cpu/parallel_task_assignment.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/ir_emission_utils.h"
#include "xla/service/cpu/shape_partition.h"
#include "xla/service/cpu/target_machine_features.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/llvm_ir/dynamic_update_slice_util.h"
#include "xla/util.h"
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
namespace cpu {
class SimpleCostModel : public ParallelCostModel {
public:
SimpleCostModel(const int64_t max_parallelism,
const HloCostAnalysis::ShapeSizeFunction& shape_size)
: max_parallelism_(max_parallelism), shape_size_(shape_size) {}
~SimpleCostModel() override {}
int64_t GetParallelTaskCount(HloInstruction* instruction) override {
const int64_t instruction_cost = shape_size_(instruction->shape());
const int64_t min_cost_per_thread = 256LL << 10;
return std::min(
max_parallelism_,
std::max(int64_t{1}, instruction_cost / min_cost_per_thread));
}
private:
const int64_t max_parallelism_;
const HloCostAnalysis::ShapeSizeFunction shape_size_;
};
class DefaultCostModel : public ParallelCostModel {
public:
DefaultCostModel(const int64_t max_parallelism,
const HloCostAnalysis::ShapeSizeFunction& shape_size,
std::unique_ptr<HloCostAnalysis> cost_analysis)
: max_parallelism_(max_parallelism),
shape_size_(shape_size),
cost_analysis_(std::move(cost_analysis)) {}
~DefaultCostModel() override {}
int64_t GetParallelTaskCount(HloInstruction* instruction) override {
int64_t instruction_cost;
int64_t min_cost_per_thread;
int64_t max_parallelism;
const int64_t bytes_accessed =
std::max(int64_t{1}, cost_analysis_->bytes_accessed(*instruction));
const float flops_to_bytes_ratio =
cost_analysis_->flop_count(*instruction) /
static_cast<float>(bytes_accessed);
if (flops_to_bytes_ratio <= 1.0) {
max_parallelism = std::min<int64_t>(
max_parallelism_, std::ceil(std::sqrt(tsl::port::MaxParallelism())));
instruction_cost = bytes_accessed;
min_cost_per_thread = 256LL << 10;
} else {
max_parallelism = max_parallelism_;
instruction_cost =
1 * cost_analysis_->flop_count(*instruction) +
2 * cost_analysis_->transcendental_count(*instruction) +
10 * cost_analysis_->bytes_accessed(*instruction);
min_cost_per_thread = 100000;
}
return std::min(
max_parallelism,
std::max(int64_t{1}, instruction_cost / min_cost_per_thread));
}
private:
const int64_t max_parallelism_;
const HloCostAnalysis::ShapeSizeFunction shape_size_;
const std::unique_ptr<HloCostAnalysis> cost_analysis_;
};
ParallelTaskAssignment::ParallelTaskAssignment(
const int64_t max_parallelism,
const HloCostAnalysis::ShapeSizeFunction& shape_size, HloModule* module,
const TargetMachineFeatures* target_machine_features)
: target_machine_features_(*target_machine_features) {
VLOG(1) << "ParallelTaskAssignment max_parallelism: " << max_parallelism;
auto cost_analysis = std::make_unique<HloCostAnalysis>(shape_size);
HloComputation* computation = module->entry_computation();
absl::Status status =
computation->root_instruction()->Accept(cost_analysis.get());
if (status.ok()) {
cost_model_ = std::make_unique<DefaultCostModel>(
max_parallelism, shape_size, std::move(cost_analysis));
} else {
cost_model_ =
std::make_unique<SimpleCostModel>(max_parallelism, shape_size);
}
}
int64_t ParallelTaskAssignment::GetTargetParallelTaskCount(
HloInstruction* instruction) {
auto opcode = instruction->opcode();
if (llvm_ir::MayBeImplementedAsInPlaceDynamicUpdateSlice(instruction) ||
instruction->shape().IsTuple() || opcode == HloOpcode::kRng ||
opcode == HloOpcode::kConstant) {
return 1;
}
if (instruction->IsElementwise() || instruction->IsLoopFusion() ||
opcode == HloOpcode::kBroadcast || opcode == HloOpcode::kConcatenate ||
opcode == HloOpcode::kDynamicSlice ||
opcode == HloOpcode::kDynamicUpdateSlice ||
opcode == HloOpcode::kGather || opcode == HloOpcode::kIota ||
opcode == HloOpcode::kPad || opcode == HloOpcode::kReduce ||
opcode == HloOpcode::kReduceWindow || opcode == HloOpcode::kReshape ||
opcode == HloOpcode::kReverse || opcode == HloOpcode::kSlice ||
opcode == HloOpcode::kTranspose ||
(opcode == HloOpcode::kConvolution &&
!PotentiallyImplementedAsEigenConvolution(*instruction,
target_machine_features_))) {
return cost_model_->GetParallelTaskCount(instruction);
}
return 1;
}
absl::StatusOr<bool> ParallelTaskAssigner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(2, "ParallelTaskAssigner ENTRY");
XLA_VLOG_LINES(3, module->ToString());
HloToParallelTasks hlo_to_parallel_tasks;
ComputeTargetParallelTasks(module, &hlo_to_parallel_tasks);
bool changed = AssignParallelTasks(module, hlo_to_parallel_tasks);
XLA_VLOG_LINES(2, "ParallelTaskAssigner EXIT");
XLA_VLOG_LINES(3, module->ToString());
return changed;
}
bool ParallelTaskAssigner::AssignParallelTasks(
HloModule* module, const HloToParallelTasks& hlo_to_parallel_tasks) {
return AssignParallelTasksHelper(module, module->entry_computation(),
hlo_to_parallel_tasks);
}
bool ParallelTaskAssigner::AssignParallelTasksHelper(
HloModule* module, HloComputation* computation,
const HloToParallelTasks& hlo_to_parallel_tasks) {
bool changed = false;
std::vector<HloInstruction*> instructions(computation->instructions().begin(),
computation->instructions().end());
for (auto* instruction : instructions) {
if (instruction->opcode() == HloOpcode::kWhile) {
changed |= AssignParallelTasksHelper(module, instruction->while_body(),
hlo_to_parallel_tasks);
continue;
} else if (instruction->opcode() == HloOpcode::kCall) {
changed |= AssignParallelTasksHelper(module, instruction->to_apply(),
hlo_to_parallel_tasks);
continue;
}
auto it = hlo_to_parallel_tasks.find(instruction);
if (it == hlo_to_parallel_tasks.end()) {
continue;
}
const int64_t target_parallel_task_count = (*it).second;
auto dim_partition_counts = ShapePartitionAssigner(instruction->shape())
.Run(target_parallel_task_count);
const int64_t total_partition_count =
ShapePartitionAssigner::GetTotalPartitionCount(dim_partition_counts);
if (total_partition_count <= 1) {
continue;
}
auto* call = module->OutlineExpressionFromComputation(
{instruction}, absl::StrCat("parallel_", instruction->name()),
computation);
auto* new_root = call->to_apply()->root_instruction();
BackendConfig backend_config;
absl::c_copy(dim_partition_counts,
tsl::protobuf::RepeatedFieldBackInserter(
backend_config.mutable_outer_dimension_partitions()));
TF_CHECK_OK(new_root->set_backend_config(backend_config));
VLOG(2) << "Assigned parallel task count: " << total_partition_count
<< " to instruction: " << new_root->name()
<< " parent: " << new_root->parent()->name();
changed = true;
}
return changed;
}
void ParallelTaskAssigner::ComputeTargetParallelTasks(
HloModule* module, HloToParallelTasks* hlo_to_parallel_tasks) {
ParallelTaskAssignment parallel_task_assignment(max_parallelism_,
shape_size_function_, module,
&target_machine_features_);
for (auto* computation : module->MakeNonfusionComputations()) {
for (auto* instruction : computation->instructions()) {
const int64_t target_parallel_task_count =
parallel_task_assignment.GetTargetParallelTaskCount(instruction);
if (target_parallel_task_count > 1) {
hlo_to_parallel_tasks->insert(
{instruction, target_parallel_task_count});
}
}
}
}
}
} | #include "xla/service/cpu/parallel_task_assignment.h"
#include <cstdint>
#include <memory>
#include <string>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/cpu_executable.h"
#include "xla/service/cpu/target_machine_features.h"
#include "xla/service/cpu/target_machine_features_fake.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class ParallelTaskAssignmentTest : public HloTestBase {
protected:
const int max_parallelism_ = 10;
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features_;
ParallelTaskAssignmentTest()
: HloTestBase(), target_machine_features_([](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
}) {}
absl::StatusOr<bool> RunParallelTaskAssigner(HloModule* module) {
return cpu::ParallelTaskAssigner(max_parallelism_, shape_size_func_,
&target_machine_features_)
.Run(module);
}
const HloCostAnalysis::ShapeSizeFunction shape_size_func_ =
cpu::CpuExecutable::ShapeSizeBytes;
};
TEST_F(ParallelTaskAssignmentTest, ReduceWindowParallelized) {
constexpr char hlo_string[] = R"(
HloModule m
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY e {
p0 = f32[512,256] parameter(0)
p1 = f32[] parameter(1)
ROOT reduce-window = f32[16,256] reduce-window(p0, p1),
window={size=32x1 stride=32x1}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_TRUE(changed);
auto* reduce_window = FindInstruction(m.get(), HloOpcode::kReduceWindow);
TF_ASSERT_OK_AND_ASSIGN(auto backend_config,
reduce_window->backend_config<cpu::BackendConfig>());
EXPECT_EQ(backend_config.outer_dimension_partitions_size(), 1);
EXPECT_EQ(backend_config.outer_dimension_partitions(0), 2);
}
TEST_F(ParallelTaskAssignmentTest, DotOperationNotParallelized) {
const std::string hlo_string = R"(
HloModule TestTaskParallel_Dot
ENTRY Dot {
dot_lhs = f32[196614,2]{1,0} parameter(0)
dot_rhs = f32[2,1]{1,0} parameter(1)
ROOT dot = f32[196614,1]{1,0} dot(dot_lhs, dot_rhs),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest,
FusedComputationWithDotOperationNotParallelized) {
const std::string hlo_string = R"(
HloModule TestTaskParallel_DotNestedInFusedComp
fused_computation.0 {
parameter.0 = f32[196614,2]{1,0} parameter(0)
parameter.0.1 = f32[2,1]{1,0} parameter(1)
parameter.0.2 = f32[196614,1]{1,0} parameter(2)
dot.0 = f32[196614,1]{1,0} dot(parameter.0, parameter.0.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT add.0 = f32[196614,1]{1,0} add(dot.0, parameter.0.2)
}
ENTRY DotNestedInFusedComp {
parameter = f32[196614,2]{1,0} parameter(0)
parameter.1 = f32[2,1]{1,0} parameter(1)
parameter.2 = f32[196614,1]{1,0} parameter(2)
ROOT fusion = f32[196614,1]{1,0} fusion(parameter, parameter.1,
parameter.2), kind=kOutput, calls=fused_computation.0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest, RngOperationNotParallelized) {
const std::string hlo_string = R"(
HloModule TestTaskParallel_rng
ENTRY Rng {
src0 = f32[] parameter(0)
src1 = f32[] parameter(1)
ROOT rng0 = f32[1234567,2]{1,0} rng(f32[] src0, f32[] src1),
distribution=rng_uniform
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest, InfeedOutfeedOperationNotParallelized) {
const std::string hlo_string = R"(
HloModule TestTaskParallel_infeed_outfeed
ENTRY InfeedOutfeed {
token0 = token[] after-all()
infeed0 = (u32[12345678,2]{1,0}, token[]) infeed(token0)
infeed0.data = u32[12345678,2]{1,0} get-tuple-element((u32[12345678,2]{1,0}, token[]) infeed0), index=0
ROOT outfeed0 = token[] outfeed(infeed0.data, token0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest, InPlaceDynamicUpdateSliceNotParallelized) {
const std::string hlo_string = R"(
HloModule test
body {
zero = s32[] constant(0)
one = s32[] constant(1)
ten = s32[] constant(10)
loop_carry = (s32[], u32[1,100], u32[10000,100]) parameter(0)
i = s32[] get-tuple-element(loop_carry), index=0
i_plus_ten = s32[] add(i, ten)
update = u32[1,100] get-tuple-element(loop_carry), index=1
data = u32[10000,100] get-tuple-element(loop_carry), index=2
new_data = u32[10000,100] dynamic-update-slice(data, update, i_plus_ten, zero)
new_i = s32[] add(i, one)
ROOT tuple = (s32[], u32[1,100], u32[10000,100]) tuple(new_i, update, new_data)
}
cond {
loop_carry = (s32[], u32[1,100], u32[10000,100]) parameter(0)
two = s32[] constant(2)
i = s32[] get-tuple-element(loop_carry), index=0
ROOT less-than = pred[] compare(i, two), direction=LT
}
ENTRY test {
zero = s32[] constant(0)
initial_i = s32[] parameter(0)
update = u32[1,100] parameter(1)
data = u32[10000,100] parameter(2)
tuple = (s32[], u32[1,100], u32[10000,100]) tuple(initial_i, update, data)
ROOT while = (s32[], u32[1,100], u32[10000,100]) while(tuple), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest, AllReduceNotParallelized) {
constexpr char hlo_string[] = R"(
HloModule TestTaskParallel_allreduce
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[1234567] parameter(0)
ROOT crs = f32[1234567] all-reduce(input), replica_groups={}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest, ConstantNotParallelized) {
constexpr char hlo_string[] = R"(
HloModule TestTaskParallel_constant
ENTRY const {
ROOT constant = f32[1234567] constant({...})
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/parallel_task_assignment.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/parallel_task_assignment_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fb500ced-65dc-4fb0-889f-5da1a2768a2a | cpp | tensorflow/tensorflow | alias_analysis | third_party/xla/xla/service/llvm_ir/alias_analysis.cc | third_party/xla/xla/service/llvm_ir/alias_analysis_test.cc | #include "xla/service/llvm_ir/alias_analysis.h"
#include <map>
#include "absl/container/flat_hash_set.h"
#include "llvm/IR/MDBuilder.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/hlo_value.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/service/llvm_ir/llvm_type_conversion_util.h"
#include "xla/service/logical_buffer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
namespace xla {
namespace llvm_ir {
static const BufferAllocation* kParameterAllocation = new BufferAllocation(
-1, 0, LogicalBuffer::Color(0));
void AliasAnalysis::AddAliasingInformationToIrArray(const HloInstruction& hlo,
llvm_ir::IrArray* array,
const ShapeIndex& index) {
BufferAllocation::Slice buffer_slice;
if (hlo.opcode() == HloOpcode::kParameter &&
hlo.parent() == module_.entry_computation()) {
buffer_slice = BufferAllocation::Slice(kParameterAllocation, 0, 0);
} else {
auto unique_slice = assignment_.GetUniqueSlice(&hlo, index);
if (!unique_slice.ok()) {
return;
}
buffer_slice = unique_slice.value();
}
if (module_.config().debug_options().xla_llvm_enable_alias_scope_metadata()) {
llvm::MDNode*& alias_scope_md = alias_scope_metadata_[buffer_slice];
if (alias_scope_md == nullptr) {
alias_scope_md =
GetAliasScopeMetadataForBuffer(buffer_slice, GetAliasDomain());
}
if (alias_scope_md != nullptr) {
array->AddAliasScopeMetadata(alias_scope_md);
}
}
if (module_.config().debug_options().xla_llvm_enable_noalias_metadata()) {
llvm::MDNode*& noalias_md = noalias_metadata_[{buffer_slice, &hlo}];
if (noalias_md == nullptr) {
noalias_md = GetNoaliasMetadataForBuffer(buffer_slice, GetAliasDomain(),
assignment_, hlo);
}
if (noalias_md != nullptr) {
array->AddNoaliasMetadata(noalias_md);
}
}
if (module_.config()
.debug_options()
.xla_llvm_enable_invariant_load_metadata()) {
if (hlo.opcode() == HloOpcode::kParameter &&
hlo.parent() == module_.entry_computation()) {
array->MarkInvariantOverWholeProgram(context_);
}
}
}
llvm::MDNode* AliasAnalysis::GetAliasDomain() {
llvm::MDBuilder metadata_builder(*context_);
if (alias_domain_ == nullptr) {
alias_domain_ =
metadata_builder.createAliasScopeDomain("XLA global AA domain");
}
return alias_domain_;
}
llvm::MDNode* AliasAnalysis::GetAliasScopeMetadataForBuffer(
const BufferAllocation::Slice& buffer_slice, llvm::MDNode* domain) {
if (buffer_slice.allocation() == kParameterAllocation) {
return nullptr;
}
llvm::MDBuilder metadata_builder(domain->getContext());
llvm::MDNode* scope = metadata_builder.createAliasScope(
"buffer: " + buffer_slice.ToString(), domain);
llvm::MDNode* scope_list = llvm::MDNode::get(domain->getContext(), scope);
return scope_list;
}
llvm::MDNode* AliasAnalysis::GetNoaliasMetadataForBuffer(
const BufferAllocation::Slice& buffer_slice, llvm::MDNode* domain,
const BufferAssignment& assignment, const HloInstruction& hlo) {
std::vector<const HloValue*> worklist;
absl::flat_hash_set<const HloInstruction*> added_to_worklist;
auto add_buffers_to_worklist =
[&](const HloInstruction* instruction) {
if (instruction->opcode() == HloOpcode::kParameter) {
return;
}
if (added_to_worklist.contains(instruction)) {
return;
}
added_to_worklist.insert(instruction);
ShapeUtil::ForEachSubshape(
instruction->shape(),
[&](const Shape& , const ShapeIndex& index) {
for (const HloValue* buffer :
assignment.GetSourceBuffers(instruction, index)) {
if (assignment.HasAllocation(*buffer)) {
worklist.push_back(buffer);
}
}
});
};
for (HloInstruction* user : hlo.users()) {
add_buffers_to_worklist(user);
for (HloInstruction* operand : user->operands()) {
add_buffers_to_worklist(operand);
}
}
add_buffers_to_worklist(&hlo);
for (HloInstruction* operand : hlo.operands()) {
add_buffers_to_worklist(operand);
}
std::set<BufferAllocation::Slice> buffers;
for (const HloValue* buffer : worklist) {
const BufferAllocation::Slice noalias_slice =
assignment.GetAssignedAllocation(*buffer).GetSlice(*buffer);
if (!buffer_slice.OverlapsWith(noalias_slice)) {
buffers.insert(noalias_slice);
constexpr int kMaxNoAliasSetSize = 500;
if (buffers.size() >= kMaxNoAliasSetSize) {
break;
}
}
}
if (buffers.empty()) {
return nullptr;
}
llvm::MDBuilder metadata_builder(domain->getContext());
std::vector<llvm::Metadata*> scopes;
for (const BufferAllocation::Slice noalias_slice : buffers) {
llvm::MDNode* scope = metadata_builder.createAliasScope(
"buffer: " + noalias_slice.ToString(), domain);
scopes.push_back(scope);
}
llvm::MDNode* noalias_list =
llvm::MDNode::get(domain->getContext(), AsArrayRef(scopes));
return noalias_list;
}
}
} | #include "absl/status/status.h"
#include "xla/ffi/ffi.h"
#include "xla/ffi/ffi_api.h"
#include "xla/service/cpu/tests/cpu_codegen_test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla.pb.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
class AliasAnalysisTest : public CpuCodegenTest {
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_cpu_use_thunk_runtime(false);
return debug_options;
}
};
static absl::Status FakeCustomCallTarget(ffi::AnyBuffer,
ffi::Result<ffi::AnyBuffer>) {
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kFakeCustomCallTarget, FakeCustomCallTarget,
ffi::Ffi::Bind()
.Arg<ffi::AnyBuffer>()
.Ret<ffi::AnyBuffer>()
);
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(),
"__xla_test$$FakeCustomCallTarget", "Host",
kFakeCustomCallTarget);
TEST_F(AliasAnalysisTest, EmbeddedComputationParamsMayAliasTemps) {
const char* hlo_string = R"(
HloModule while
body {
const.0.125 = f32[] constant(0.125)
body.state = f32[] parameter(0)
ROOT add.2.2 = f32[] add(const.0.125, body.state)
}
condition {
const.100 = f32[] constant(100)
condition.state = f32[] parameter(0)
addend = f32[] custom-call(condition.state), custom_call_target="__xla_test$$FakeCustomCallTarget", api_version=API_VERSION_TYPED_FFI
add = f32[] add(addend, condition.state)
ROOT greater-than = pred[] compare(const.100, add), direction=GT
}
ENTRY while3 {
const.0 = f32[] constant(0)
ROOT while = f32[] while(const.0), condition=condition, body=body
}
)";
CompileAndVerifyIr(hlo_string, R"(
; CHECK-LABEL: @body(ptr %retval
; CHECK: %[[add_result:.*]] = fadd float %[[fadd_lhs:.*]], %[[fadd_rhs:.*]]
; CHECK: store float %[[add_result]], ptr %[[store_dest:.*]], align 4, !alias.scope ![[alias_scope_md_for_store:[0-9]+]]
;
; CHECK-LABEL: @condition(ptr %retval, ptr noalias %run_options, ptr noalias %params
; CHECK: %[[cond_state_buf_ptr:.*]] = getelementptr inbounds ptr, ptr %buffer_table, i64 0
; CHECK: %[[cond_state_buf_untyped:.*]] = load ptr, ptr %[[cond_state_buf_ptr]]
; CHECK: load float, ptr %[[cond_state_buf_untyped]], align 4, !alias.scope ![[alias_scope_md_for_store]], !noalias ![[noalias_md_for_load:.*]]
;
; CHECK-LABEL: @while3(
![[alias_scope_md_for_store]] = !{![[buffer_idx_0:.*]]}
![[buffer_idx_0]] = !{!"buffer: {index:0, offset:0, size:4}", ![[aa_md_root:.*]]}
![[aa_md_root]] = !{!"XLA global AA domain"}
![[buffer_idx_1:.*]] = !{!"buffer: {index:1, offset:0, size:4}", !3}
![[buffer_idx_1_offset_16:.*]] = !{!"buffer: {index:1, offset:16, size:1}", !3}
![[noalias_md_for_load]] = !{![[buffer_idx_1_offset_16]], ![[buffer_idx_1]]}
}
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/llvm_ir/alias_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/llvm_ir/alias_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
93f94341-4b0e-4bc2-912e-7f6500b6da69 | cpp | tensorflow/tensorflow | math_ops | tensorflow/c/experimental/ops/math_ops.cc | tensorflow/core/ops/math_ops_test.cc | #include "tensorflow/c/experimental/ops/math_ops.h"
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_context.h"
#include "tensorflow/c/eager/abstract_operation.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/tracing_utils.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/errors.h"
using tensorflow::tracing::MaybeSetOpName;
namespace tensorflow {
namespace ops {
Status Mul(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle* const y, AbstractTensorHandle** z,
const char* name, const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Mul", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
TF_RETURN_IF_ERROR(op_ptr->AddInput(y));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(z, 1), &num_retvals);
}
Status Conj(AbstractContext* ctx, AbstractTensorHandle* const input,
AbstractTensorHandle** output, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Conj", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(input));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(output, 1), &num_retvals);
}
Status AddV2(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle* const y, AbstractTensorHandle** z,
const char* name, const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("AddV2", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
TF_RETURN_IF_ERROR(op_ptr->AddInput(y));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(z, 1), &num_retvals);
}
Status MatMul(AbstractContext* ctx, AbstractTensorHandle* const a,
AbstractTensorHandle* const b, AbstractTensorHandle** product,
bool transpose_a, bool transpose_b, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("MatMul", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(a));
TF_RETURN_IF_ERROR(op_ptr->AddInput(b));
TF_RETURN_IF_ERROR(op_ptr->SetAttrBool("transpose_a", transpose_a));
TF_RETURN_IF_ERROR(op_ptr->SetAttrBool("transpose_b", transpose_b));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(product, 1), &num_retvals);
}
Status Neg(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle** y, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Neg", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(y, 1), &num_retvals);
}
Status Sum(AbstractContext* ctx, AbstractTensorHandle* const input,
AbstractTensorHandle* const reduction_indices,
AbstractTensorHandle** output, bool keep_dims, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Sum", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(input));
TF_RETURN_IF_ERROR(op_ptr->AddInput(reduction_indices));
TF_RETURN_IF_ERROR(op_ptr->SetAttrBool("keep_dims", keep_dims));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(output, 1), &num_retvals);
}
Status Sub(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle* const y, AbstractTensorHandle** z,
const char* name, const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Sub", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
TF_RETURN_IF_ERROR(op_ptr->AddInput(y));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(z, 1), &num_retvals);
}
Status Div(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle* const y, AbstractTensorHandle** z,
const char* name, const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Div", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
TF_RETURN_IF_ERROR(op_ptr->AddInput(y));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(z, 1), &num_retvals);
}
Status DivNoNan(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle* const y, AbstractTensorHandle** z,
const char* name, const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("DivNoNan", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
TF_RETURN_IF_ERROR(op_ptr->AddInput(y));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(z, 1), &num_retvals);
}
Status Exp(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle** y, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Exp", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(y, 1), &num_retvals);
}
Status Sqrt(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle** y, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Sqrt", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(y, 1), &num_retvals);
}
Status SqrtGrad(AbstractContext* ctx, AbstractTensorHandle* const y,
AbstractTensorHandle* const dy, AbstractTensorHandle** z,
const char* name, const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("SqrtGrad", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(y));
TF_RETURN_IF_ERROR(op_ptr->AddInput(dy));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(z, 1), &num_retvals);
}
Status Log1p(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle** y, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Log1p", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(y, 1), &num_retvals);
}
}
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(MathOpsTest, AddN_ShapeFn) {
ShapeInferenceTestOp op("AddN");
auto set_n = [&op](int n) {
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_FLOAT);
TF_ASSERT_OK(NodeDefBuilder("test", "AddN")
.Input(src_list)
.Attr("N", n)
.Finalize(&op.node_def));
};
set_n(2);
INFER_OK(op, "?;?", "in0|in1");
INFER_OK(op, "[1];[?]", "in0");
INFER_OK(op, "[1];?", "in0");
INFER_OK(op, "[?];[1]", "in1");
INFER_OK(op, "?;[1]", "in1");
set_n(2);
INFER_OK(op, "[1,2];[?,2]", "in0");
INFER_OK(op, "[1,2];[1,2]", "in0|in1");
INFER_OK(op, "[?,2];[1,2]", "in1");
set_n(3);
INFER_OK(op, "[1,?];[?,2];[1,2]", "in2");
INFER_OK(op, "[1,2];[?,2];[1,?]", "in0");
INFER_OK(op, "?;?;[1,2]", "in2");
set_n(2);
INFER_OK(op, "?;[1,2]", "in1");
INFER_OK(op, "[1,?];[?,2]", "[d0_0,d1_1]");
INFER_OK(op, "[?,2,?];[?,?,3]", "[d0_0|d1_0,d0_1,d1_2]");
INFER_OK(op, "[?,2];[1,?]", "[d1_0,d0_1]");
set_n(3);
INFER_ERROR("Dimension 1 in both shapes must be equal, but are 2 and 4", op,
"[1,2];?;[1,4]");
INFER_ERROR("From merging shape 0 with other shapes.", op, "[1,2];?;[1,4]");
set_n(4);
INFER_ERROR("Shapes must be equal rank, but are 2 and 3", op,
"?;[1,2];?;[1,2,3]");
INFER_ERROR("From merging shape 1 with other shapes.", op,
"?;[1,2];?;[1,2,3]");
}
TEST(MathOpsTest, UnchangedShape_ShapeFn) {
ShapeInferenceTestOp op("Cast");
INFER_OK(op, "?", "in0");
INFER_OK(op, "[?]", "in0");
INFER_OK(op, "[1,?,3,4]", "in0");
}
TEST(MathOpsTest, Segment_ShapeFn) {
for (const auto* op_name : {"SegmentMax", "SegmentMean", "SegmentMin",
"SegmentProd", "SegmentSum"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "?;[100]", "?");
INFER_OK(op, "[?];?", "[?]");
INFER_OK(op, "[?];[100]", "[?]");
INFER_OK(op, "[1];?", "[?]");
INFER_OK(op, "[1];[100]", "[?]");
INFER_OK(op, "[?,?];?", "[?,d0_1]");
INFER_OK(op, "[?,2];[100]", "[?,d0_1]");
INFER_OK(op, "[?,2,?,4];[100]", "[?,d0_1,d0_2,d0_3]");
INFER_OK(op, "[1,?];?", "[?,d0_1]");
INFER_OK(op, "[1,2];[100]", "[?,d0_1]");
INFER_OK(op, "[1,2,?,4];[100]", "[?,d0_1,d0_2,d0_3]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[1,2]");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[];[1]");
}
}
TEST(MathOpsTest, BroadcastBinaryOps_ShapeFn) {
auto test_shapes = [&](ShapeInferenceTestOp& op,
bool incompatible_shape_error) {
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[1,2];?", "?");
INFER_OK(op, "?;[1,2]", "?");
INFER_OK(op, "[?];[1]", "[d0_0]");
INFER_OK(op, "[1];[?]", "[d1_0]");
INFER_OK(op, "[?];[2]", incompatible_shape_error ? "[d1_0]" : "?");
INFER_OK(op, "[2];[?]", incompatible_shape_error ? "[d0_0]" : "?");
INFER_OK(op, "[?];[?]", "[?]");
INFER_OK(op, "[];[?]", "[d1_0]");
INFER_OK(op, "[?];[]", "[d0_0]");
INFER_OK(op, "[1];[1]", "[d0_0|d1_0]");
INFER_OK(op, "[];[1]", "[d1_0]");
INFER_OK(op, "[1];[]", "[d0_0]");
INFER_OK(op, "[2];[2]", "[d0_0|d1_0]");
INFER_OK(op, "[];[2]", "[d1_0]");
INFER_OK(op, "[1];[2]", "[d1_0]");
INFER_OK(op, "[2];[1]", "[d0_0]");
INFER_OK(op, "[2];[]", "[d0_0]");
INFER_OK(op, "[2];[?]", incompatible_shape_error ? "[d0_0]" : "?");
INFER_OK(op, "[0];[0]", "[d0_0|d1_0]");
INFER_OK(op, "[];[0]", "[d1_0]");
INFER_OK(op, "[1];[0]", "[d1_0]");
INFER_OK(op, "[0];[1]", "[d0_0]");
INFER_OK(op, "[0];[]", "[d0_0]");
INFER_OK(op, "[2];[?,?]", incompatible_shape_error ? "[d1_0,d0_0]" : "?");
INFER_OK(op, "[2,2];[?,?,?]",
incompatible_shape_error ? "[d1_0,d0_0,d0_1]" : "?");
INFER_OK(op, "[?,1,2,3,4,5];[3,1,?]",
incompatible_shape_error ? "[d0_0,d0_1,d0_2,d0_3|d1_0,d0_4,d0_5]"
: "?");
INFER_OK(op, "[3,1,?];[?,1,2,3,4,5]",
incompatible_shape_error ? "[d1_0,d1_1,d1_2,d1_3|d0_0,d1_4,d1_5]"
: "?");
if (incompatible_shape_error) {
INFER_ERROR("Dimensions must be equal", op, "[2];[3]");
} else {
INFER_OK(op, "[2];[3]", "[]");
}
};
for (string op_name : {"Add", "Complex",
"Div", "Equal",
"Greater", "GreaterEqual",
"Igamma", "Igammac",
"Zeta", "Polygamma",
"Less", "LessEqual",
"LogicalAnd", "LogicalOr",
"Maximum", "Minimum",
"Mod", "Mul",
"NotEqual", "Pow",
"Sub", "SquaredDifference",
"DivNoNan"}) {
ShapeInferenceTestOp op(op_name);
AddNodeAttr("incompatible_shape_error", true, &op.node_def);
test_shapes(op, true);
if ((op_name == "Equal") || (op_name == "NotEqual")) {
ShapeInferenceTestOp op(op_name);
AddNodeAttr("incompatible_shape_error", false, &op.node_def);
test_shapes(op, false);
}
}
}
TEST(MathOpsTest, Select_ShapeFn) {
ShapeInferenceTestOp op("Select");
INFER_OK(op, "?;?;?", "in1|in2");
INFER_OK(op, "[];[1];?", "in1");
INFER_OK(op, "[];?;?", "in1|in2");
INFER_OK(op, "[1];?;?",
"in1|in2");
INFER_OK(op, "[1,2];?;?", "in1|in2?");
INFER_OK(op, "?;[];?", "in1");
INFER_OK(op, "?;?;[]", "in2");
INFER_OK(op, "?;[1];?", "in1");
INFER_OK(op, "?;?;[1]", "in2");
INFER_OK(op, "?;[1,2];?", "in1");
INFER_OK(op, "?;?;[1,2]", "in2");
INFER_ERROR("Shapes must be equal rank, but are 0 and 1", op, "[1];[];?");
INFER_ERROR("Shapes must be equal rank, but are 1 and 2", op, "[];[1];[1,2]");
INFER_ERROR("Shapes must be equal rank, but are 1 and 2", op, "[1,2];[1];?");
INFER_OK(op, "[2];[?];[?]", "in1|in2");
INFER_OK(op, "[?];[?,?,3];[1,2,?]", "[d2_0,d2_1,d1_2]");
INFER_OK(op, "[2];[?,?,3];[?,2,?]", "[d1_0|d2_0,d2_1,d1_2]");
INFER_ERROR("must be equal", op, "[1];[2,?,3];[?,2,?]");
INFER_ERROR("Shapes must be equal rank, but are 3 and 2", op,
"[2,?];[?,?,3];[?,2,?]");
INFER_OK(op, "[2,?,?];[?,?,3];[?,2,?]", "[d0_0,d2_1,d1_2]");
INFER_ERROR("Dimension 2 in both shapes must be equal, but are 3 and 5", op,
"[2,?,5];[?,?,3];[?,2,?]");
const OpRegistrationData* op_reg_data;
TF_ASSERT_OK(OpRegistry::Global()->LookUp(op.name, &op_reg_data));
typedef std::vector<std::pair<PartialTensorShape, DataType>> ShapeDtypeV;
std::vector<std::unique_ptr<ShapeDtypeV>> handle_data;
std::unique_ptr<shape_inference::InferenceContext> c;
auto run_inference_for_handles = [&]() -> Status {
CHECK(op_reg_data->shape_inference_fn != nullptr);
c.reset(new shape_inference::InferenceContext(
TF_GRAPH_DEF_VERSION, op.node_def, op_reg_data->op_def,
{PartialTensorShape(), PartialTensorShape(), PartialTensorShape()}, {},
{}, handle_data));
TF_CHECK_OK(c->construction_status());
Status s = c->Run(op_reg_data->shape_inference_fn);
LOG(INFO) << "Inference got " << s;
return s;
};
auto shape_proto = [](std::initializer_list<int64_t> dim_sizes) {
TensorShapeProto p;
for (auto i : dim_sizes) p.add_dim()->set_size(i);
return p;
};
auto i0 = PartialTensorShape({1, -1});
auto i1 = PartialTensorShape({-1, 2});
PartialTensorShape unknown_shape;
auto scalar = PartialTensorShape({});
handle_data.emplace_back(
new ShapeDtypeV{{scalar, DT_FLOAT}, {unknown_shape, DT_INT32}});
handle_data.emplace_back(new ShapeDtypeV{{i0, DT_FLOAT}, {i1, DT_INT32}});
handle_data.emplace_back(
new ShapeDtypeV{{i1, DT_FLOAT}, {unknown_shape, DT_INT32}});
TF_ASSERT_OK(run_inference_for_handles());
auto* out = c->output_handle_shapes_and_types(0);
ASSERT_EQ(2, out->size());
EXPECT_EQ("[1,2]", c->DebugString(out->at(0).shape));
EXPECT_EQ(DT_FLOAT, out->at(0).dtype);
EXPECT_EQ("[?,2]", c->DebugString(out->at(1).shape));
EXPECT_EQ(DT_INT32, out->at(1).dtype);
handle_data[2]->at(0).first = shape_proto({2, 2});
EXPECT_TRUE(absl::StrContains(run_inference_for_handles().message(),
"must be equal, but are 1 and 2"));
handle_data[2]->at(0).first = i1;
handle_data[2]->at(1).second = DT_INT64;
EXPECT_TRUE(absl::StrContains(run_inference_for_handles().message(),
"pointing to different dtypes"));
handle_data[2]->at(1).second = DT_INT32;
handle_data[2]->push_back({i1, DT_FLOAT});
EXPECT_TRUE(absl::StrContains(run_inference_for_handles().message(),
"pointing to different numbers of tensors"));
handle_data[2]->pop_back();
}
TEST(MathOpsTest, Range_ShapeFn) {
ShapeInferenceTestOp op("Range");
TF_ASSERT_OK(NodeDefBuilder("test", "Range")
.Input({"start", {}, DT_INT32})
.Input({"limit", {}, DT_INT32})
.Input({"delta", {}, DT_INT32})
.Attr("Tidx", DT_INT32)
.Finalize(&op.node_def));
op.input_tensors.resize(3);
INFER_OK(op, "?;?;?", "[?]");
INFER_ERROR("Shape must be rank 0 but is rank 2", op, "[1,2];?;?");
INFER_ERROR("for 'start'", op, "[1,2];?;?");
INFER_ERROR("Shape must be rank 0 but is rank 2", op, "?;[1,2];?");
INFER_ERROR("for 'limit'", op, "?;[1,2];?");
INFER_ERROR("Shape must be rank 0 but is rank 2", op, "?;?;[1,2]");
INFER_ERROR("for 'delta'", op, "?;?;[1,2]");
Tensor start_t = test::AsScalar(1);
op.input_tensors[0] = &start_t;
INFER_OK(op, "?;?;?", "[?]");
Tensor limit_t = test::AsScalar(1);
op.input_tensors[1] = &limit_t;
INFER_OK(op, "?;?;?", "[?]");
Tensor delta_t = test::AsScalar(1);
op.input_tensors[2] = &delta_t;
INFER_OK(op, "?;?;?", "[0]");
delta_t = test::AsScalar(0);
INFER_ERROR("Requires delta != 0", op, "?;?;?");
delta_t = test::AsScalar(3);
limit_t = test::AsScalar(-1);
INFER_ERROR("Requires start <= limit when delta > 0: 1/-1", op, "?;?;?");
delta_t = test::AsScalar(-1);
INFER_OK(op, "?;?;?", "[2]");
limit_t = test::AsScalar(4);
INFER_ERROR("Requires start >= limit when delta < 0: 1/4", op, "?;?;?");
limit_t = test::AsScalar(100);
start_t = test::AsScalar(2);
delta_t = test::AsScalar(3);
INFER_OK(op, "?;?;?", "[33]");
}
TEST(MathOpsTest, LinSpace_ShapeFn) {
ShapeInferenceTestOp op("LinSpace");
op.input_tensors.resize(3);
INFER_OK(op, "?;?;?", "[?]");
INFER_ERROR("Shape must be rank 0 but is rank 2", op, "[1,2];?;?");
INFER_ERROR("for 'start'", op, "[1,2];?;?");
INFER_ERROR("Shape must be rank 0 but is rank 2", op, "?;[1,2];?");
INFER_ERROR("for 'stop'", op, "?;[1,2];?");
INFER_ERROR("Shape must be rank 0 but is rank 2", op, "?;?;[1,2]");
INFER_ERROR("for 'num'", op, "?;?;[1,2]");
Tensor num_t = test::AsScalar(1);
op.input_tensors[2] = &num_t;
INFER_OK(op, "?;?;?", "[1]");
num_t = test::AsScalar(2);
INFER_OK(op, "?;?;?", "[2]");
num_t = test::AsScalar(-1);
INFER_ERROR("Requires num > 0: -1", op, "?;?;?");
}
TEST(MathOpsTest, UnsortedSegmentSum_ShapeFn) {
ShapeInferenceTestOp op("UnsortedSegmentSum");
op.input_tensors.resize(3);
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "?;[?];?", "?");
INFER_ERROR("Shape must be rank 0 but is rank 2", op, "?;?;[1,2]");
INFER_ERROR("Dimensions must be equal, but are 2 and 3", op,
"[1,?,2];[1,?,3];?");
INFER_OK(op, "?;[3];?", "?");
INFER_ERROR("Shape must be at least rank 3 but is rank 2", op,
"[1,2];[1,2,3];?");
Tensor num_segments_t = test::AsScalar(100);
op.input_tensors[2] = &num_segments_t;
INFER_OK(op, "[?,2,3,?,5];[1,2,?];[]", "[100,d0_3,d0_4]");
num_segments_t = test::AsScalar(-1);
INFER_ERROR(("Dimension size, given by scalar input 2, must be "
"non-negative but is -1"),
op, "[3];[3];?");
}
TEST(MathOpsTest, SparseSegment_ShapeFn) {
ShapeInferenceTestOp op("SparseSegmentSum");
op.input_tensors.resize(3);
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "[2,4,3];[3];[3]", "[?,d0_1,d0_2]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[2,4,3];[];[3]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[2,4,3];[3];[3,4]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 3 and 4", op,
"[2,4,3];[3];[4]");
}
TEST(MathOpsTest, SparseSegmentGrad_ShapeFn) {
ShapeInferenceTestOp op("SparseSegmentMeanGrad");
op.input_tensors.resize(4);
INFER_OK(op, "?;?;?;?", "?");
INFER_OK(op, "[2,4,3];[3];[3];[]", "[?,d0_1,d0_2]");
Tensor num_segments_t = test::AsScalar(100);
op.input_tensors[3] = &num_segments_t;
INFER_OK(op, "[2,4,3];[3];[3];[]", "[100,d0_1,d0_2]");
INFER_ERROR("Shape must be rank 0 but is rank 2", op,
"[2,4,3];[3];[3];[1,1]");
num_segments_t = test::AsScalar(-100);
op.input_tensors[3] = &num_segments_t;
INFER_ERROR("Cannot specify a negative value", op, "[2,4,3];[3];[3];[]");
}
TEST(MathOpsTest, BatchMatMul_ShapeFn) {
ShapeInferenceTestOp op("BatchMatMul");
auto set_adj = [&op](bool adj_x, bool adj_y) {
TF_ASSERT_OK(NodeDefBuilder("test", "BatchMatMul")
.Input({"a", 0, DT_FLOAT})
.Input({"b", 0, DT_FLOAT})
.Attr("adj_x", adj_x)
.Attr("adj_y", adj_y)
.Finalize(&op.node_def));
};
set_adj(false, false);
INFER_ERROR("at least rank 2", op, "[1];?");
INFER_ERROR("at least rank 2", op, "?;[2]");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[?,?];[?,?]", "[d0_0,d1_1]");
INFER_OK(op, "[?,?,?,?];?", "[d0_0,d0_1,d0_2,?]");
set_adj(false, false);
INFER_OK(op, "[1,2,3,4];[1,2,?,?]", "[d0_0,d0_1,d0_2,d1_3]");
INFER_ERROR("are 2 and 3", op, "[?,1,2];[?,3,1]");
set_adj(true, false);
INFER_OK(op, "[1,2,3,4];[1,2,?,?]", "[d0_0,d0_1,d0_3,d1_3]");
INFER_ERROR("are 2 and 3", op, "[?,2,1];[?,3,1]");
set_adj(false, true);
INFER_OK(op, "[1,2,?,?];[1,2,3,4]", "[d0_0,d0_1,d0_2,d1_2]");
INFER_ERROR("are 2 and 3", op, "[?,1,2];[?,1,3]");
set_adj(true, true);
INFER_OK(op, "[1,2,?,?];[1,2,3,4]", "[d0_0,d0_1,d0_3,d1_2]");
INFER_ERROR("are 2 and 3", op, "[?,2,1];[?,1,3]");
}
TEST(MathOpsTest, ArgOps_ShapeFn) {
ShapeInferenceTestOp op("ArgMax");
op.input_tensors.resize(2);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[2];?", "[]");
INFER_OK(op, "[];?", "[]");
INFER_ERROR("must be rank 0", op, "[2];[1]");
INFER_OK(op, "[2,3,4];?", "[?,?]");
INFER_OK(op, "[2,3,4,5,6];?", "[?,?,?,?]");
Tensor dimension = test::AsScalar(0);
op.input_tensors[1] = &dimension;
INFER_OK(op, "[2,3,4];[]", "[d0_1,d0_2]");
dimension = test::AsScalar(1);
op.input_tensors[1] = &dimension;
INFER_OK(op, "[2,3,4];[]", "[d0_0,d0_2]");
dimension = test::AsScalar(2);
op.input_tensors[1] = &dimension;
INFER_OK(op, "[2,3,4];[]", "[d0_0,d0_1]");
dimension = test::AsScalar(10);
op.input_tensors[1] = &dimension;
INFER_ERROR("must be in the range [-3, 3)", op, "[2,3,4];[]");
dimension = test::AsScalar(-10);
op.input_tensors[1] = &dimension;
INFER_ERROR("must be in the range [-3, 3)", op, "[2,3,4];[]");
dimension = test::AsScalar(-1);
op.input_tensors[1] = &dimension;
INFER_OK(op, "[2,3,4];[]", "[d0_0,d0_1]");
}
TEST(MathOpsTest, Betainc_ShapeFn) {
ShapeInferenceTestOp op("Betainc");
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "[?,?];?;?", "in0");
INFER_OK(op, "[?,2];?;[1,?]", "[d2_0,d0_1]");
INFER_OK(op, "[?,2,?];[1,?,?];[?,?,3]", "[d1_0,d0_1,d2_2]");
INFER_OK(op, "[?,2,?];[];[?,?,3]", "[d0_0|d2_0,d0_1,d2_2]");
INFER_OK(op, "[];[];[?,?,3]", "in2");
INFER_OK(op, "[];[];?", "in2");
INFER_OK(op, "[];[];[1,2,3,4]", "in2");
INFER_OK(op, "[];[];[]", "in0");
INFER_ERROR("must be equal", op, "[1,2];[];[1,4]");
INFER_ERROR("must be equal", op, "[1,2];[];[1,2,3]");
}
TEST(MathOpsTest, Requantize_ShapeFn) {
ShapeInferenceTestOp op("Requantize");
INFER_OK(op, "?;?;?;?;?", "in0;[];[]");
INFER_OK(op, "?;[];[];[];[]", "in0;[];[]");
INFER_ERROR("must be rank 0", op, "?;[1];?;?;?");
INFER_ERROR("must be rank 0", op, "?;?;[2];?;?");
INFER_ERROR("must be rank 0", op, "?;?;?;[3];?");
INFER_ERROR("must be rank 0", op, "?;?;?;?;[4]");
}
TEST(MathOpstest, RequantizationRange_ShapeFn) {
ShapeInferenceTestOp op("RequantizationRange");
INFER_OK(op, "?;?;?", "[];[]");
INFER_OK(op, "?;[];[]", "[];[]");
INFER_ERROR("must be rank 0", op, "?;[1];?");
INFER_ERROR("must be rank 0", op, "?;?;[2]");
}
TEST(MathOpsTest, Cross_ShapeFn) {
ShapeInferenceTestOp op("Cross");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[];[]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but", op, "[3];[5]");
INFER_ERROR("Dimension must be 3 but", op, "[3,5];[3,5]");
INFER_OK(op, "?;?", "in0");
INFER_OK(op, "[?];[?]", "in0");
INFER_OK(op, "[1,?,3];[?,?,?]", "in0");
}
TEST(MathOpsTest, HistogramFixedWidth_ShapeFn) {
ShapeInferenceTestOp op("HistogramFixedWidth");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];[];[]");
INFER_ERROR("Dimension must be 2 but is 3", op, "[];[3];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[2];[2]");
INFER_OK(op, "?;?;?", "[?]");
INFER_OK(op, "[?];[2];[]", "[?]");
INFER_OK(op, "[?];[2];?", "[?]");
}
TEST(MathOpsTest, QuantizedAdd_ShapeFn) {
ShapeInferenceTestOp op("QuantizedAdd");
INFER_OK(op, "?;?;?;?;?;?", "?;[];[]");
INFER_OK(op, "?;?;[];[];[];[]", "?;[];[]");
INFER_OK(op, "[1,2];?;[];[];[];[]", "?;[];[]");
INFER_OK(op, "[];[2];[];[];[];[]", "[d1_0];[];[]");
INFER_ERROR("must be rank 0", op, "?;?;[1];?;?;?");
INFER_ERROR("must be rank 0", op, "?;?;?;[2];?;?");
INFER_ERROR("must be rank 0", op, "?;?;?;?;[3];?");
INFER_ERROR("must be rank 0", op, "?;?;?;?;?;[4]");
}
TEST(MathOpsTest, Bincount_ShapeFn) {
ShapeInferenceTestOp op("Bincount");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;[1];?");
INFER_OK(op, "?;?;?", "[?]");
INFER_OK(op, "?;[];?", "[?]");
INFER_OK(op, "[?];[];?", "[?]");
INFER_OK(op, "[?];[];[?]", "[?]");
}
TEST(MathOpsTest, SobolSample) {
ShapeInferenceTestOp op("SobolSample");
INFER_ERROR("must be rank 0", op, "[1];?;?");
INFER_ERROR("must be rank 0", op, "?;[1];?");
INFER_ERROR("must be rank 0", op, "?;?;[1]");
INFER_OK(op, "[];[];[]", "[?,?]");
}
TEST(MathOpsTest, EqualOp) {
ShapeInferenceTestOp op("Equal");
AddNodeAttr("incompatible_shape_error", true, &op.node_def);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[1,2];?", "?");
INFER_OK(op, "?;[1,2]", "?");
INFER_OK(op, "[1,2,3];[1]", "[d0_0,d0_1,d0_2]");
INFER_OK(op, "[?,2,1];[1,3]", "[d0_0,d0_1,d1_1]");
INFER_OK(op, "[1,?,3];[3,1]", "[d0_0,d1_0,d0_2]");
INFER_OK(op, "[1,2,3];[2,1,3]", "[d1_0,d0_1,d0_2]");
INFER_OK(op, "[?,10,1];[?,1,4]", "[?,d0_1,d1_2]");
INFER_OK(op, "[10,?,1];[1,?,4]", "[d0_0,?,d1_2]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/ops/math_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/math_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3c947f36-5411-42e9-ae6b-7264dbade642 | cpp | tensorflow/tensorflow | ir_array | third_party/xla/xla/service/llvm_ir/ir_array.cc | third_party/xla/xla/service/llvm_ir/ir_array_test.cc | #include "xla/service/llvm_ir/ir_array.h"
#include <cstdint>
#include <optional>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/permutation_util.h"
#include "xla/primitive_util.h"
#include "xla/service/llvm_ir/llvm_type_conversion_util.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
namespace llvm_ir {
IrArray::Index::Index(absl::Span<llvm::Value* const> multidim,
llvm::Value* linear, const Shape& shape,
llvm::Type* index_type)
: Index(multidim, shape, index_type) {
CHECK_NE(linear, nullptr);
linear_ = linear;
}
void IrArray::Index::Delinearize(std::vector<llvm::Value*>* multidim,
llvm::Value* linear, const Shape& shape,
llvm::IRBuilder<>* b) const {
int64_t divisor = 1;
const Layout& layout = shape.layout();
for (int64_t i = 0; i < layout.minor_to_major_size(); ++i) {
int64_t dimension = layout.minor_to_major(i);
int64_t size_of_current_dimension = shape.dimensions(dimension);
auto* quot = b->CreateUDiv(linear, GetConstantWithIndexType(divisor));
if (i < layout.minor_to_major_size() - 1) {
(*multidim)[dimension] = b->CreateURem(
quot, GetConstantWithIndexType(size_of_current_dimension));
} else {
(*multidim)[dimension] = quot;
}
divisor *= size_of_current_dimension;
}
}
void IrArray::Index::Delinearize(std::vector<llvm::Value*>* multidim,
llvm::Value* linear, const Shape& shape,
absl::Span<llvm::Value*> dynamic_dims,
llvm::IRBuilder<>* b) const {
CHECK_EQ(shape.dimensions_size(), dynamic_dims.size());
CHECK_EQ(multidim_.size(), shape.rank());
llvm::Value* divisor = GetConstantWithIndexType(1);
const Layout& layout = shape.layout();
for (int64_t i = 0; i < layout.minor_to_major_size(); ++i) {
int64_t dimension = layout.minor_to_major(i);
auto* quot = b->CreateUDiv(linear, divisor, "quot");
if (i < layout.minor_to_major_size() - 1) {
llvm::Value* casted_dynamic_dim =
b->CreateIntCast(dynamic_dims[dimension], quot->getType(),
true);
(*multidim)[dimension] =
b->CreateURem(quot, casted_dynamic_dim, "dim_value");
divisor = b->CreateMul(divisor, casted_dynamic_dim, "divisor");
} else {
(*multidim)[dimension] = quot;
}
}
}
IrArray::Index::Index(llvm::Value* linear, const Shape& shape,
llvm::IRBuilder<>* b)
: multidim_(shape.rank()),
linear_(linear),
layout_(shape.layout()),
dims_(shape.dimensions().begin(), shape.dimensions().end()) {
CHECK_NE(linear, nullptr);
index_type_ = linear->getType();
CHECK(LayoutUtil::HasLayout(shape))
<< "Shape " << ShapeUtil::HumanStringWithLayout(shape)
<< " should have a layout.";
Delinearize(&multidim_, linear, shape, b);
}
IrArray::Index::Index(llvm::Value* linear,
absl::Span<llvm::Value* const> multidim,
const Shape& shape, llvm::IRBuilder<>* b)
: multidim_(shape.rank()),
linear_(linear),
layout_(shape.layout()),
dims_(shape.dimensions().begin(), shape.dimensions().end()) {
CHECK_NE(linear, nullptr);
index_type_ = linear->getType();
CHECK_EQ(multidim.size(), shape.rank());
for (auto dim : multidim) {
if (dim) {
CHECK_EQ(dim->getType(), index_type_);
}
}
CHECK(LayoutUtil::HasLayout(shape))
<< "Shape " << ShapeUtil::HumanStringWithLayout(shape)
<< " should have a layout.";
Delinearize(&multidim_, linear, shape, b);
for (int i = 0; i < multidim.size(); ++i) {
if (multidim[i] != nullptr) {
multidim_[i] = multidim[i];
}
}
}
IrArray::Index::Index(llvm::Value* linear, const Shape& shape,
absl::Span<llvm::Value*> dynamic_dims,
llvm::IRBuilder<>* b)
: multidim_(shape.rank()),
linear_(linear),
layout_(shape.layout()),
dims_(shape.dimensions().begin(), shape.dimensions().end()) {
CHECK_NE(linear, nullptr);
index_type_ = linear->getType();
CHECK(LayoutUtil::HasLayout(shape))
<< "Shape " << ShapeUtil::HumanStringWithLayout(shape)
<< " should have a layout.";
Delinearize(&multidim_, linear, shape, dynamic_dims, b);
}
IrArray::Index::Index(absl::Span<llvm::Value* const> multidim,
absl::Span<int64_t const> dimensions,
llvm::Type* index_type)
: Index(multidim, ShapeUtil::MakeShape( PRED, dimensions),
index_type) {}
IrArray::Index::Index(absl::Span<llvm::Value* const> multidim,
const Shape& shape, llvm::Type* index_type)
: multidim_(multidim.begin(), multidim.end()),
linear_(nullptr),
layout_(shape.layout()),
dims_(shape.dimensions().begin(), shape.dimensions().end()),
index_type_(index_type) {
CHECK_NE(index_type_, nullptr);
CHECK_EQ(shape.dimensions_size(), multidim.size());
for (const auto* dim : multidim) {
CHECK_NE(dim, nullptr);
}
CHECK(LayoutUtil::HasLayout(shape))
<< "Shape " << ShapeUtil::HumanStringWithLayout(shape)
<< " should have a layout.";
}
IrArray::IrArray(llvm::Value* base_ptr, llvm::Type* pointee_type, Shape shape)
: base_ptr_(base_ptr),
pointee_type_(pointee_type),
shape_(std::move(shape)) {
TF_CHECK_OK(ShapeUtil::ValidateShape(shape));
CHECK(base_ptr_->getType()->isPointerTy());
int depth = 0;
element_type_ = pointee_type;
while (llvm::ArrayType* array_type =
llvm::dyn_cast<llvm::ArrayType>(element_type_)) {
element_type_ = array_type->getElementType();
++depth;
}
if (!shape_.IsArray() || ShapeUtil::IsScalar(shape_)) {
DCHECK(depth == 1 || depth == 0) << depth;
} else {
DCHECK_EQ(depth, shape_.rank()) << shape.ShortDebugString();
}
}
bool IrArray::Index::LinearValidOnShape(const Shape& a) const {
auto b = ShapeUtil::MakeShape(a.element_type(), dims_);
*b.mutable_layout() = layout_;
return linear_ != nullptr &&
ShapeUtil::ElementsIn(a) == ShapeUtil::ElementsIn(b) &&
ShapeUtil::ReshapeIsBitcast(a, b);
}
IrArray::Index IrArray::Index::SourceIndexOfReshape(
const Shape& output_shape, const Shape& input_shape,
llvm::IRBuilder<>* builder) const {
CHECK_EQ(multidim_.size(), output_shape.rank());
std::vector<llvm::Value*> source_multidim_index(
input_shape.rank(), llvm::UndefValue::get(index_type_));
if (std::optional<ShapeUtil::ShapeEqualityDescriptor> trivial_reshape =
ShapeUtil::InsertedOrDeleted1SizedDimensions(input_shape,
output_shape)) {
for (int64_t i = 0, j = 0, k = 0, l = 0; i < source_multidim_index.size();
++i) {
if (j == trivial_reshape->deleted_dimensions.size() ||
trivial_reshape->deleted_dimensions[j] > i) {
while (l < trivial_reshape->inserted_dimensions.size() &&
trivial_reshape->inserted_dimensions[l] == k) {
++k;
++l;
}
source_multidim_index[i] = multidim_[k];
++k;
} else {
source_multidim_index[i] = GetConstantWithIndexType(0);
++j;
}
}
} else {
const auto common_factors =
CommonFactors(input_shape.dimensions(), output_shape.dimensions());
for (ssize_t k = common_factors.size() - 2; k >= 0; --k) {
absl::Span<int64_t const> dimensions = output_shape.dimensions().subspan(
common_factors[k].second,
common_factors[k + 1].second - common_factors[k].second);
llvm::Value* logical_linear_index =
Index(absl::Span<llvm::Value* const>(multidim_).subspan(
common_factors[k].second,
common_factors[k + 1].second - common_factors[k].second),
dimensions, index_type_)
.Linearize(dimensions, builder);
for (int64_t i = common_factors[k + 1].first - 1;
i >= common_factors[k].first; --i) {
llvm::Value* divisor =
GetConstantWithIndexType(input_shape.dimensions(i));
if (input_shape.dimensions(i) == 1) {
source_multidim_index[i] = GetConstantWithIndexType(0);
} else if (i == common_factors[k].first) {
source_multidim_index[i] = logical_linear_index;
} else {
source_multidim_index[i] =
builder->CreateURem(logical_linear_index, divisor);
}
logical_linear_index =
builder->CreateUDiv(logical_linear_index, divisor);
}
}
}
if (linear() != nullptr && LayoutUtil::HasLayout(input_shape) &&
LayoutUtil::HasLayout(output_shape) &&
ShapeUtil::ReshapeIsBitcast(input_shape, output_shape)) {
return Index(source_multidim_index, linear(), input_shape, index_type_);
}
return Index(source_multidim_index, input_shape, index_type_);
}
IrArray::Index IrArray::Index::SourceIndexOfSlice(
const Shape& operand_shape, absl::Span<const int64_t> starts,
absl::Span<const int64_t> strides, llvm::IRBuilder<>* builder) const {
std::vector<llvm::Value*> source_multi_index(multidim_.size());
for (int i = 0; i < multidim_.size(); ++i) {
int64_t stride = strides[i];
if (stride != 1) {
source_multi_index[i] = builder->CreateAdd(
builder->CreateMul(multidim_[i], GetConstantWithIndexType(stride)),
GetConstantWithIndexType(starts[i]));
} else {
source_multi_index[i] =
builder->CreateAdd(multidim_[i], GetConstantWithIndexType(starts[i]));
}
}
return Index(source_multi_index, operand_shape, index_type_);
}
IrArray::Index IrArray::Index::SourceIndexOfTranspose(
const Shape& shape, const Shape& operand_shape,
absl::Span<const int64_t> dimension_mapping) const {
std::vector<llvm::Value*> operand_multidim_index =
PermuteInverse(multidim(), dimension_mapping);
if (linear() != nullptr && LayoutUtil::HasLayout(operand_shape) &&
LayoutUtil::HasLayout(shape) &&
ShapeUtil::TransposeIsBitcast(operand_shape, shape, dimension_mapping)) {
return Index(operand_multidim_index, linear(), operand_shape, index_type_);
}
return Index(operand_multidim_index, operand_shape, index_type_);
}
IrArray::Index IrArray::Index::SourceIndexOfBitcast(
const Shape& shape, const Shape& operand_shape,
llvm::IRBuilder<>* builder) const {
CHECK(LayoutUtil::HasLayout(shape) && LayoutUtil::HasLayout(operand_shape));
const ShapeUtil::BitcastDecomposition decomposition =
ShapeUtil::DecomposeBitcast(operand_shape, shape);
if (std::holds_alternative<ShapeUtil::BitcastDecompositionReshape>(
decomposition)) {
return SourceIndexOfReshape(shape, operand_shape, builder);
}
if (std::holds_alternative<ShapeUtil::BitcastDecompositionTranspose>(
decomposition)) {
const auto& decomposition_transpose =
std::get<ShapeUtil::BitcastDecompositionTranspose>(decomposition);
return SourceIndexOfTranspose(shape, operand_shape,
decomposition_transpose.transpose_dims);
}
CHECK(std::holds_alternative<ShapeUtil::BitcastDecompositionTrt>(
decomposition));
const auto& decomposition_trt =
std::get<ShapeUtil::BitcastDecompositionTrt>(decomposition);
Index index = *this;
if (!decomposition_trt.IsTranspose2Identity()) {
index = index.SourceIndexOfTranspose(shape, decomposition_trt.reshape_shape,
decomposition_trt.transpose2_dims);
}
index =
index.SourceIndexOfReshape(decomposition_trt.reshape_shape,
decomposition_trt.transpose1_shape, builder);
if (!decomposition_trt.IsTranspose1Identity()) {
index = index.SourceIndexOfTranspose(decomposition_trt.transpose1_shape,
operand_shape,
decomposition_trt.transpose1_dims);
}
return index;
}
IrArray::Index IrArray::Index::SourceIndexOfBitcast(
const Shape& operand_shape, llvm::IRBuilder<>* builder) const {
auto shape = ShapeUtil::MakeShape(F32, dims_);
*shape.mutable_layout() = layout_;
return SourceIndexOfBitcast(shape, operand_shape, builder);
}
IrArray::Index IrArray::Index::SourceIndexOfBroadcast(
const Shape& shape, const Shape& operand_shape,
absl::Span<const int64_t> dimension_mapping,
llvm::IRBuilder<>* builder) const {
int64_t rank = operand_shape.rank();
std::vector<llvm::Value*> source_index(rank);
for (int64_t i = 0; i < rank; ++i) {
source_index[i] = multidim_[dimension_mapping[i]];
}
if (linear_ == nullptr || !LayoutUtil::HasLayout(operand_shape) ||
!LayoutUtil::HasLayout(shape) || rank == 1) {
return Index(source_index, operand_shape, index_type_);
}
std::vector<int64_t> logical_to_physical =
LayoutUtil::MakeLogicalToPhysical(shape.layout());
int64_t output_rank = shape.rank();
int64_t min_broadcasted_dimension = output_rank;
int64_t max_broadcasted_dimension = -1;
for (int64_t i = 0; i < rank; ++i) {
int64_t physical_dim = logical_to_physical[dimension_mapping[i]];
min_broadcasted_dimension =
std::min(min_broadcasted_dimension, physical_dim);
max_broadcasted_dimension =
std::max(max_broadcasted_dimension, physical_dim);
}
bool contiguous_broadcast_dimensions =
max_broadcasted_dimension - min_broadcasted_dimension == rank - 1;
if (!contiguous_broadcast_dimensions) {
return Index(source_index, operand_shape, index_type_);
}
std::vector<int64_t> operand_logical_to_physical =
LayoutUtil::MakeLogicalToPhysical(operand_shape.layout());
for (int64_t i = 0; i < rank; ++i) {
if (operand_logical_to_physical[i] !=
logical_to_physical[dimension_mapping[i]] - min_broadcasted_dimension) {
return Index(source_index, operand_shape, index_type_);
}
}
llvm::Value* linear = linear_;
int64_t divisor = 1;
for (int64_t i = max_broadcasted_dimension + 1; i < output_rank; ++i) {
divisor *= shape.dimensions(LayoutUtil::Major(shape.layout(), i));
}
if (divisor > 1) {
linear = builder->CreateUDiv(linear, GetConstantWithIndexType(divisor));
}
if (min_broadcasted_dimension > 0) {
int64_t mod = 1;
for (int64_t i = min_broadcasted_dimension; i <= max_broadcasted_dimension;
++i) {
mod *= shape.dimensions(LayoutUtil::Major(shape.layout(), i));
}
linear = builder->CreateURem(linear, GetConstantWithIndexType(mod));
}
return Index(source_index, linear, operand_shape, index_type_);
}
llvm::Value* IrArray::Index::Linearize(absl::Span<const int64_t> dimensions,
llvm::IRBuilder<>* builder) const {
CHECK_EQ(size(), dimensions.size());
llvm::Value* logical_linear_index = GetConstantWithIndexType(0);
int64_t multiplier = 1;
for (ssize_t i = 0; i < size(); ++i) {
int64_t dimension = layout_.minor_to_major(i);
llvm::Value* addend = builder->CreateMul(
(*this)[dimension], GetConstantWithIndexType(multiplier), "",
true, true);
addend = builder->CreateZExtOrTrunc(addend, index_type_);
logical_linear_index = builder->CreateAdd(logical_linear_index, addend, "",
true, true);
multiplier *= dimensions[dimension];
}
return logical_linear_index;
}
llvm::Value* IrArray::Index::Linearize(
const std::vector<llvm::Value*>& dynamic_dims,
llvm::IRBuilder<>* builder) const {
CHECK_EQ(size(), dynamic_dims.size());
llvm::Value* logical_linear_index = GetConstantWithIndexType(0);
llvm::Value* multiplier = GetConstantWithIndexType(1);
for (ssize_t i = 0; i < size(); ++i) {
int64_t dimension = layout_.minor_to_major(i);
llvm::Value* addend = builder->CreateMul((*this)[dimension], multiplier, "",
true, true);
addend = builder->CreateZExtOrTrunc(addend, index_type_);
logical_linear_index = builder->CreateAdd(logical_linear_index, addend, "",
true, true);
if (i < size() - 1) {
multiplier = builder->CreateMul(multiplier, dynamic_dims[dimension],
"multiplier");
}
}
return logical_linear_index;
}
llvm::Value* IrArray::EmitArrayElementAddress(const IrArray::Index& index,
llvm::IRBuilder<>* b,
absl::string_view name,
bool use_linear_index,
llvm::Value** bit_offset) const {
if (ShapeUtil::IsScalar(shape_)) {
if (primitive_util::IsSubByteNonPredType(shape_.element_type())) {
CHECK_NE(bit_offset, nullptr);
*bit_offset =
b->getInt8(8 - primitive_util::BitWidth(shape_.element_type()));
}
return base_ptr_;
}
CHECK_EQ(index.size(), shape_.rank());
CHECK(index.ShapeIsCompatible(shape_))
<< "Shape " << index.AsShapeWithType(shape_.element_type()).ToString(true)
<< " is not compatible with " << shape_.ToString(true);
if (use_linear_index && index.LinearValidOnShape(shape_)) {
return EmitLinearArrayElementAddress(index, b, name, bit_offset);
}
if (primitive_util::IsSubByteNonPredType(shape_.element_type())) {
IrArray::Index linear_index = index;
if (!index.LinearValidOnShape(shape_)) {
std::vector<int64_t> dimensions;
dimensions.reserve(shape_.rank());
for (int64_t i = 0; i < shape_.rank(); ++i) {
dimensions.push_back(shape_.dimensions(i));
}
llvm::Value* linearized = index.Linearize(dimensions, b);
linear_index = IrArray::Index(linearized, shape_, b);
}
return EmitLinearArrayElementAddress(linear_index, b, name, bit_offset);
}
std::vector<llvm::Value*> actual_index;
for (int64_t i = 0; i < index.size(); ++i) {
auto dim = shape_.dimensions(i);
actual_index.push_back(
dim == 1 ? llvm::ConstantInt::get(index[i]->getType(), 0) : index[i]);
}
CHECK_GT(index.size(), 0);
std::vector<llvm::Value*> gep_indices(
1, llvm::ConstantInt::get(index[0]->getType(), 0));
for (int64_t i = 0; i < shape_.rank(); ++i) {
int64_t dimension = LayoutUtil::Major(shape_.layout(), i);
gep_indices.push_back(actual_index[dimension]);
}
return b->CreateInBoundsGEP(pointee_type_, base_ptr_, gep_indices,
llvm_ir::AsStringRef(name));
}
llvm::Value* IrArray::EmitLinearArrayElementAddress(
const IrArray::Index& index, llvm::IRBuilder<>* b, absl::string_view name,
llvm::Value** bit_offset) const {
CHECK(index.LinearValidOnShape(shape_));
llvm::Module* module = b->GetInsertBlock()->getParent()->getParent();
llvm::Type* type = PrimitiveTypeToIrType(shape_.element_type(), module);
if (!primitive_util::IsSubByteNonPredType(shape_.element_type())) {
auto linear_index = llvm::dyn_cast<llvm::BinaryOperator>(index.linear());
if (linear_index && (linear_index->getOpcode() == llvm::Instruction::Add)) {
llvm::Value* index_operand_0 = linear_index->getOperand(0);
llvm::Value* index_operand_1 = linear_index->getOperand(1);
llvm::Value* ptr_address =
b->CreateGEP(type, base_ptr_, index_operand_0, "");
return b->CreateInBoundsGEP(type, ptr_address, index_operand_1,
llvm_ir::AsStringRef(name));
} else {
return b->CreateInBoundsGEP(type, base_ptr_, index.linear(),
llvm_ir::AsStringRef(name));
}
}
llvm::Type* index_type = index.linear()->getType();
auto bit_width = primitive_util::BitWidth(shape_.element_type());
llvm::Value* elements_per_byte =
llvm::ConstantInt::get(index_type, 8 / bit_width);
llvm::Value* remainder = b->CreateURem(index.linear(), elements_per_byte);
llvm::Value* byte_offset = b->CreateUDiv(index.linear(), elements_per_byte);
CHECK_NE(bit_offset, nullptr);
*bit_offset = b->CreateIntCast(
b->CreateSub(llvm::ConstantInt::get(index_type, 8 - bit_width),
b->CreateMul(remainder,
llvm::ConstantInt::get(index_type, bit_width))),
b->getInt8Ty(), false);
return b->CreateInBoundsGEP(b->getInt8Ty(), base_ptr_, byte_offset,
llvm_ir::AsStringRef(name));
}
void IrArray::AnnotateLoadStoreInstructionWithMetadata(
llvm::Instruction* instruction) const {
CHECK(llvm::isa<llvm::LoadInst>(instruction) ||
llvm::isa<llvm::StoreInst>(instruction));
CHECK(!llvm::isa<llvm::StoreInst>(instruction) || !is_invariant_)
<< "Trying to create a store to an invariant IRArray.";
for (const auto& kind_md_pair : metadata_) {
instruction->setMetadata(kind_md_pair.first, kind_md_pair.second);
}
}
llvm::Value* IrArray::EmitReadArrayElement(const Index& index,
llvm::IRBuilder<>* b,
absl::string_view name,
bool use_linear_index) const {
llvm::Value* bit_offset = nullptr;
llvm::Value* element_address =
EmitArrayElementAddress(index, b, name, use_linear_index, &bit_offset);
llvm::Type* load_type =
primitive_util::IsSubByteNonPredType(shape_.element_type())
? b->getInt8Ty()
: element_type_;
llvm::LoadInst* load =
b->CreateLoad(load_type, element_address, llvm_ir::AsStringRef(name));
AnnotateLoadStoreInstructionWithMetadata(load);
llvm::Value* elem = load;
if (primitive_util::IsSubByteNonPredType(shape_.element_type())) {
llvm::Value* shifted = b->CreateLShr(load, bit_offset);
elem = b->CreateTrunc(
shifted, b->getIntNTy(primitive_util::BitWidth(shape_.element_type())));
}
return elem;
}
void IrArray::EmitWriteArrayElement(const Index& index, llvm::Value* value,
llvm::IRBuilder<>* b,
bool use_linear_index) const {
llvm::Value* bit_offset = nullptr;
llvm::Value* element_address =
EmitArrayElementAddress(index, b, "", use_linear_index, &bit_offset);
if (primitive_util::IsSubByteNonPredType(shape_.element_type())) {
llvm::LoadInst* load = b->CreateLoad(b->getInt8Ty(), element_address);
AnnotateLoadStoreInstructionWithMetadata(load);
value = b->CreateIntCast(value, b->getInt8Ty(),
false);
value = b->CreateShl(value, bit_offset);
auto bit_width = primitive_util::BitWidth(shape_.element_type());
llvm::Value* mask = b->getInt8(~LsbMask<uint8_t>(bit_width));
mask = b->CreateIntrinsic(b->getInt8Ty(), llvm::Intrinsic::fshl,
{mask, mask, bit_offset});
llvm::Value* masked_load = b->CreateAnd(load, mask);
value = b->CreateOr(masked_load, value);
}
llvm::StoreInst* store = b->CreateStore(value, element_address);
AnnotateLoadStoreInstructionWithMetadata(store);
}
IrArray IrArray::CastToShape(const Shape& new_shape,
llvm::IRBuilder<>* b) const {
if (shape_ == new_shape) return *this;
llvm::Module* module = b->GetInsertBlock()->getParent()->getParent();
llvm::Type* new_ir_type = llvm_ir::ShapeToIrType(new_shape, module);
IrArray new_irarray(base_ptr_, new_ir_type, new_shape);
new_irarray.metadata_ = metadata_;
return new_irarray;
}
bool IrArray::Index::ShapeIsCompatible(const Shape& a, const Shape& b) {
const auto get_strides = [](const Shape& shape) {
int rank = shape.dimensions().size();
int64_t stride = 1;
std::vector<int64_t> strides;
for (int i = 0; i < rank; i++) {
auto dim = shape.dimensions(shape.layout().minor_to_major(i));
if (dim != 1) {
stride *= dim;
strides.push_back(stride);
}
}
return strides;
};
return get_strides(a) == get_strides(b);
}
}
} | #include "xla/service/llvm_ir/ir_array.h"
#include <string>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/filecheck.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace llvm_ir {
namespace {
class IrArrayTest : public ::testing::Test {
public:
IrArrayTest()
: context_{},
module_{"IrArrayTest module", context_},
builder_{context_} {}
llvm::Function* EmitFunctionAndSetInsertPoint(
llvm::ArrayRef<llvm::Type*> params) {
llvm::FunctionType* function_type =
llvm::FunctionType::get(llvm::Type::getVoidTy(context_), params,
false);
llvm::Function* function = llvm::Function::Create(
function_type, llvm::Function::LinkageTypes::ExternalLinkage,
"test_function", module_);
llvm::BasicBlock* bb = llvm::BasicBlock::Create(context_, "bb", function);
builder_.SetInsertPoint(bb);
return function;
}
protected:
llvm::LLVMContext context_;
llvm::Module module_;
llvm::IRBuilder<> builder_;
};
TEST_F(IrArrayTest, TestShapeIsCompatible) {
xla::Shape a =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 10, 20}, {2, 1, 0});
xla::Shape b =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 10, 20}, {2, 0, 1});
xla::Shape c =
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 1, 20}, {2, 1, 0});
xla::Shape d =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 10, 30}, {2, 1, 0});
xla::Shape e =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 10, 30}, {2, 0, 1});
xla::Shape f =
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 1, 30}, {2, 1, 0});
EXPECT_TRUE(IrArray::Index::ShapeIsCompatible(a, b));
EXPECT_TRUE(IrArray::Index::ShapeIsCompatible(a, c));
EXPECT_FALSE(IrArray::Index::ShapeIsCompatible(a, d));
EXPECT_FALSE(IrArray::Index::ShapeIsCompatible(a, e));
EXPECT_FALSE(IrArray::Index::ShapeIsCompatible(a, f));
}
TEST_F(IrArrayTest, EmitArrayElementAddress) {
llvm::Function* function = EmitFunctionAndSetInsertPoint(
{builder_.getPtrTy(), builder_.getInt32Ty()});
llvm::Argument* array_ptr = function->getArg(0);
llvm::Argument* array_index = function->getArg(1);
Shape shape = ShapeUtil::MakeShape(F32, {3, 5});
llvm::Type* type = llvm_ir::ShapeToIrType(shape, &module_);
IrArray ir_array(array_ptr, type, shape);
IrArray::Index index(array_index, shape, &builder_);
ir_array.EmitArrayElementAddress(index, &builder_);
std::string ir_str = DumpToString(&module_);
const char* filecheck_pattern = R"(
CHECK: define void @test_function(ptr %[[ptr:[0-9]+]], i32 %[[idx:[0-9]+]]) {
CHECK: getelementptr inbounds float, ptr %[[ptr]], i32 %[[idx]]
)";
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_match,
RunFileCheck(ir_str, filecheck_pattern));
EXPECT_TRUE(filecheck_match);
}
TEST_F(IrArrayTest, EmitArrayElementAddressNonLinear) {
llvm::Function* function = EmitFunctionAndSetInsertPoint(
{builder_.getPtrTy(), builder_.getInt32Ty()});
llvm::Argument* array_ptr = function->getArg(0);
llvm::Argument* array_index = function->getArg(1);
Shape shape = ShapeUtil::MakeShape(F32, {3, 5});
llvm::Type* type = llvm_ir::ShapeToIrType(shape, &module_);
IrArray ir_array(array_ptr, type, shape);
IrArray::Index index(array_index, shape, &builder_);
ir_array.EmitArrayElementAddress(index, &builder_, "",
false);
std::string ir_str = DumpToString(&module_);
const char* filecheck_pattern = R"(
CHECK: define void @test_function(ptr %[[ptr:[0-9]+]], i32 %[[idx:[0-9]+]]) {
CHECK: %[[udiv1:[0-9]+]] = udiv i32 %[[idx]], 1
CHECK: %[[urem:[0-9]+]] = urem i32 %[[udiv1]], 5
CHECK: %[[udiv2:[0-9]+]] = udiv i32 %[[idx]], 5
CHECK: getelementptr inbounds [3 x [5 x float]], ptr %0, i32 0, i32 %[[udiv2]], i32 %[[urem]]
)";
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_match,
RunFileCheck(ir_str, filecheck_pattern));
EXPECT_TRUE(filecheck_match);
}
TEST_F(IrArrayTest, EmitArrayElementAddressInt4) {
llvm::Function* function = EmitFunctionAndSetInsertPoint(
{builder_.getPtrTy(), builder_.getInt32Ty()});
llvm::Argument* array_ptr = function->getArg(0);
llvm::Argument* array_index = function->getArg(1);
Shape shape = ShapeUtil::MakeShape(S4, {3, 5});
llvm::Type* type = llvm_ir::ShapeToIrType(shape, &module_);
IrArray ir_array(array_ptr, type, shape);
IrArray::Index index(array_index, shape, &builder_);
llvm::Value* bit_offset;
ir_array.EmitArrayElementAddress(index, &builder_, "",
true,
&bit_offset);
std::string ir_str = DumpToString(&module_);
const char* filecheck_pattern = R"(
CHECK: define void @test_function(ptr %[[ptr:[0-9]+]], i32 %[[idx:[0-9]+]]) {
CHECK: %[[rem:[0-9]+]] = urem i32 %[[idx]], 2
CHECK: %[[div:[0-9]+]] = udiv i32 %[[idx]], 2
CHECK: getelementptr inbounds i8, ptr %[[ptr]], i32 %[[div]]
)";
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_match,
RunFileCheck(ir_str, filecheck_pattern));
EXPECT_TRUE(filecheck_match);
}
TEST_F(IrArrayTest, EmitArrayElementAddressInt4NonLinear) {
llvm::Function* function = EmitFunctionAndSetInsertPoint(
{llvm::PointerType::get(context_, 0), llvm::Type::getInt32Ty(context_),
llvm::Type::getInt32Ty(context_)});
llvm::Argument* array_ptr = function->getArg(0);
llvm::Argument* array_index0 = function->getArg(1);
llvm::Argument* array_index1 = function->getArg(2);
Shape shape = ShapeUtil::MakeShape(S4, {3, 5});
llvm::Type* type = llvm_ir::ShapeToIrType(shape, &module_);
IrArray ir_array(array_ptr, type, shape);
IrArray::Index index({array_index0, array_index1}, shape,
builder_.getInt32Ty());
llvm::Value* bit_offset;
ir_array.EmitArrayElementAddress(index, &builder_, "",
false,
&bit_offset);
std::string ir_str = DumpToString(&module_);
const char* filecheck_pattern = R"(
CHECK: define void @test_function(ptr %[[ptr:[0-9]+]], i32 %[[idx0:[0-9]+]], i32 %[[idx1:[0-9]+]]) {
CHECK: %[[mul1:[0-9]+]] = mul nuw nsw i32 %[[idx1]], 1
CHECK: %[[add1:[0-9]+]] = add nuw nsw i32 0, %[[mul1]]
CHECK: %[[mul2:[0-9]+]] = mul nuw nsw i32 %[[idx0]], 5
CHECK: %[[add2:[0-9]+]] = add nuw nsw i32 %[[add1]], %[[mul2]]
CHECK: %[[udiv:[0-9]+]] = udiv i32 %[[add2]], 2
CHECK: %[[gep:[0-9]+]] = getelementptr inbounds i8, ptr %[[ptr]], i32 %[[udiv]]
)";
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_match,
RunFileCheck(ir_str, filecheck_pattern));
EXPECT_TRUE(filecheck_match);
}
TEST_F(IrArrayTest, EmitReadArrayElementInt4) {
llvm::Function* function = EmitFunctionAndSetInsertPoint(
{builder_.getPtrTy(), builder_.getInt32Ty()});
llvm::Argument* array_ptr = function->getArg(0);
llvm::Argument* array_index = function->getArg(1);
Shape shape = ShapeUtil::MakeShape(S4, {3, 5});
llvm::Type* type = llvm_ir::ShapeToIrType(shape, &module_);
IrArray ir_array(array_ptr, type, shape);
IrArray::Index index(array_index, shape, &builder_);
ir_array.EmitReadArrayElement(index, &builder_);
std::string ir_str = DumpToString(&module_);
const char* filecheck_pattern = R"(
CHECK: define void @test_function(ptr %[[ptr:[0-9]+]], i32 %[[idx0:[0-9]+]]) {
COM: Calculate the address.
CHECK: %[[urem:[0-9]+]] = urem i32 %[[idx0]], 2
CHECK: %[[addr:[0-9]+]] = udiv i32 %[[idx0]], 2
CHECK: %[[mul:[0-9]+]] = mul i32 %[[urem]], 4
CHECK: %[[sub:[0-9]+]] = sub i32 4, %[[mul]]
CHECK: %[[trunc:[0-9]+]] = trunc i32 %[[sub]] to i8
CHECK: %[[gep:[0-9]+]] = getelementptr inbounds i8, ptr %[[ptr]], i32 %[[addr]]
COM: Load the element, optionally shift, and truncate.
CHECK: %[[load:[0-9]+]] = load i8, ptr %[[gep]], align 1
CHECK: %[[shift:[0-9]+]] = lshr i8 %[[load]], %[[trunc]]
CHECK: trunc i8 %[[shift]] to i4
)";
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_match,
RunFileCheck(ir_str, filecheck_pattern));
EXPECT_TRUE(filecheck_match);
}
TEST_F(IrArrayTest, EmitWriteArrayElementInt4) {
llvm::Function* function = EmitFunctionAndSetInsertPoint(
{builder_.getPtrTy(), builder_.getInt32Ty(), builder_.getIntNTy(4)});
llvm::Argument* array_ptr = function->getArg(0);
llvm::Argument* array_index = function->getArg(1);
llvm::Argument* val_to_write = function->getArg(2);
Shape shape = ShapeUtil::MakeShape(S4, {3, 5});
llvm::Type* type = llvm_ir::ShapeToIrType(shape, &module_);
IrArray ir_array(array_ptr, type, shape);
IrArray::Index index(array_index, shape, &builder_);
ir_array.EmitWriteArrayElement(index, val_to_write, &builder_);
std::string ir_str = DumpToString(&module_);
const char* filecheck_pattern = R"(
CHECK: define void @test_function(ptr %[[ptr:[0-9]+]], i32 %[[idx0:[0-9]+]], i4 %[[val:[0-9]+]]) {
COM: Calculate the address.
CHECK: %[[urem:[0-9]+]] = urem i32 %[[idx0]], 2
CHECK: %[[addr:[0-9]+]] = udiv i32 %[[idx0]], 2
CHECK: %[[mul:[0-9]+]] = mul i32 %[[urem]], 4
CHECK: %[[sub:[0-9]+]] = sub i32 4, %[[mul]]
CHECK: %[[trunc:[0-9]+]] = trunc i32 %[[sub]] to i8
CHECK: %[[gep:[0-9]+]] = getelementptr inbounds i8, ptr %[[ptr]], i32 %[[addr]]
COM: Load address, replace 4 bits with the value, and write to address.
CHECK: %[[load:[0-9]+]] = load i8, ptr %[[gep]], align 1
CHECK: %[[zext:[0-9]+]] = zext i4 %[[val]] to i8
CHECK: %[[shifted_val:[0-9]+]] = shl i8 %[[zext]], %[[trunc]]
CHECK: %[[mask:[0-9]+]] = call i8 @llvm.fshl.i8(i8 -16, i8 -16, i8 %[[trunc]])
CHECK: %[[and:[0-9]+]] = and i8 %[[load]], %[[mask]]
CHECK: %[[towrite:[0-9]+]] = or i8 %[[and]], %[[shifted_val]]
CHECK: store i8 %[[towrite]], ptr %[[gep]], align 1
)";
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_match,
RunFileCheck(ir_str, filecheck_pattern));
EXPECT_TRUE(filecheck_match);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/llvm_ir/ir_array.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/llvm_ir/ir_array_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6e3458b4-6bda-4c3d-b527-832bd9c9dbfd | cpp | tensorflow/tensorflow | spmd_prepare | third_party/xla/xla/service/spmd/spmd_prepare.cc | third_party/xla/xla/service/spmd/spmd_prepare_test.cc | #include "xla/service/spmd/spmd_prepare.h"
#include <memory>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/pattern_matcher.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace spmd {
namespace {
absl::StatusOr<bool> ProcessScatter(HloInstruction* hlo,
const CallGraph& call_graph) {
if (hlo->opcode() != HloOpcode::kScatter) {
return false;
}
HloScatterInstruction* scatter = Cast<HloScatterInstruction>(hlo);
HloComputation* computation = hlo->parent();
if (scatter->scatter_operand_count() > 1) {
return false;
}
HloInstruction* operand = scatter->scatter_operands()[0];
HloInstruction* indices = scatter->scatter_indices();
HloInstruction* updates = scatter->scatter_updates()[0];
if (operand->opcode() != HloOpcode::kAdd ||
indices->opcode() != HloOpcode::kConcatenate ||
indices->operand_count() != 2 ||
updates->opcode() != HloOpcode::kConcatenate ||
updates->operand_count() != 2 ||
!Match(scatter->to_apply()->root_instruction(),
match::AddAnyOrder(match::Parameter(0), match::Parameter(1)))) {
return false;
}
const auto& dnums = scatter->scatter_dimension_numbers();
auto get_parallel_dims_for_scatter = [&dnums, &call_graph](
const HloInstruction* operand,
const HloInstruction* indices,
const HloInstruction* updates) {
std::vector<int64_t> slice_sizes = hlo_sharding_util::GetScatterSliceSize(
operand->shape(), updates->shape(), dnums);
int64_t index_vector_dim = dnums.index_vector_dim();
const auto& index_map = dnums.scatter_dims_to_operand_dims();
return hlo_sharding_util::GetGatherScatterBatchParallelDims(
operand, indices, slice_sizes, index_vector_dim, index_map, call_graph);
};
if (get_parallel_dims_for_scatter(operand, indices, updates).has_value()) {
return false;
}
HloInstruction* lhs_indices = indices->mutable_operand(0);
HloInstruction* rhs_indices = indices->mutable_operand(1);
HloInstruction* lhs_updates = updates->mutable_operand(0);
HloInstruction* rhs_updates = updates->mutable_operand(1);
std::optional<hlo_sharding_util::GatherScatterParallelDims> lhs_parallel_dims;
std::optional<hlo_sharding_util::GatherScatterParallelDims> rhs_parallel_dims;
lhs_parallel_dims =
get_parallel_dims_for_scatter(operand, lhs_indices, lhs_updates);
if (!lhs_parallel_dims.has_value()) {
return false;
}
rhs_parallel_dims =
get_parallel_dims_for_scatter(operand, rhs_indices, rhs_updates);
if (!rhs_parallel_dims.has_value()) {
return false;
}
if (lhs_parallel_dims->operand_parallel_dims !=
rhs_parallel_dims->operand_parallel_dims ||
lhs_parallel_dims->indices_parallel_dims !=
rhs_parallel_dims->indices_parallel_dims) {
return false;
}
if (lhs_parallel_dims->operand_parallel_dims.size() !=
lhs_parallel_dims->indices_parallel_dims.size()) {
return false;
}
HloInstruction* lhs_operand = operand->mutable_operand(0);
HloInstruction* rhs_operand = operand->mutable_operand(1);
bool any_sharded_parallel_dim = false;
if (!lhs_operand->has_sharding() || !rhs_operand->has_sharding() ||
!lhs_indices->has_sharding() || !rhs_indices->has_sharding()) {
return false;
}
for (int i = 0; i < lhs_parallel_dims->operand_parallel_dims.size(); ++i) {
if (lhs_operand->sharding().IsTiled() &&
lhs_operand->sharding().tile_assignment().dim(
lhs_parallel_dims->operand_parallel_dims[i]) != 1 &&
lhs_indices->sharding().tile_assignment().dim(
lhs_parallel_dims->indices_parallel_dims[i]) != 1) {
any_sharded_parallel_dim = true;
break;
}
}
if (!any_sharded_parallel_dim) {
return false;
}
HloInstruction* scatter0 =
computation->AddInstruction(HloInstruction::CreateScatter(
scatter->shape(), operand, lhs_indices, lhs_updates,
scatter->to_apply(), dnums, false, false));
scatter0->set_metadata(scatter->metadata());
scatter0->set_sharding(scatter->sharding());
HloInstruction* scatter1 =
computation->AddInstruction(HloInstruction::CreateScatter(
scatter->shape(), scatter0, rhs_indices, rhs_updates,
scatter->to_apply(), dnums, false, false));
scatter1->set_metadata(scatter->metadata());
scatter1->set_sharding(scatter->sharding());
TF_RETURN_IF_ERROR(scatter->ReplaceAllUsesWith(scatter1));
return true;
}
absl::StatusOr<bool> RunOnComputation(HloComputation* computation,
const CallGraph& call_graph) {
bool changed = false;
for (HloInstruction* hlo : computation->MakeInstructionPostOrder()) {
if (!hlo->has_sharding()) {
continue;
}
TF_ASSIGN_OR_RETURN(bool scatter_changed, ProcessScatter(hlo, call_graph));
if (scatter_changed) {
changed = true;
continue;
}
}
return changed;
}
}
absl::StatusOr<bool> SpmdPrepare::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
for (auto comp : module->computations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool comp_changed, RunOnComputation(comp, *call_graph));
changed |= comp_changed;
}
return changed;
}
}
} | #include "xla/service/spmd/spmd_prepare.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace spmd {
namespace {
namespace op = xla::testing::opcode_matchers;
class SpmdPrepareTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module, int64_t distance_threshold = 100) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(
hlo_module, GetModuleConfigForTest()));
HloPassPipeline pipeline("spmd-prepare");
pipeline.AddPass<SpmdPrepare>();
TF_RETURN_IF_ERROR(pipeline.Run(module.get()).status());
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
};
TEST_F(SpmdPrepareTest, ScatterParallelIndexSplit) {
absl::string_view hlo_string = R"(
HloModule module
region_157.5067 {
Arg_0.5068 = f32[] parameter(0)
Arg_1.5069 = f32[] parameter(1)
ROOT add.5070 = f32[] add(Arg_0.5068, Arg_1.5069)
}
ENTRY entry {
p0 = f32[16,1000,2000]{2,1,0} parameter(0), sharding={devices=[4,2,1]<=[8]}
p1 = f32[16,1000,2000]{2,1,0} parameter(1), sharding={devices=[4,2,1]<=[8]}
p2 = s32[16,1000,64,1]{3,2,1,0} parameter(2), sharding={devices=[4,2,1,1]<=[8]}
p3 = f32[16,1000,64]{2,1,0} parameter(3), sharding={devices=[4,2,1]<=[8]}
p4 = f32[16,1000,64]{2,1,0} parameter(4), sharding={devices=[4,2,1]<=[8]}
iota.0 = s32[16,1000,64,1]{3,2,1,0} iota(), iota_dimension=0, sharding={devices=[4,2,1,1]<=[8]}
iota.1 = s32[16,1000,64,1]{3,2,1,0} iota(), iota_dimension=1, sharding={devices=[4,2,1,1]<=[8]}
iota.2 = s32[16,1000,64,1]{3,2,1,0} iota(), iota_dimension=0, sharding={devices=[4,2,1,1]<=[8]}
iota.3 = s32[16,1000,64,1]{3,2,1,0} iota(), iota_dimension=1, sharding={devices=[4,2,1,1]<=[8]}
concatenate.0 = s32[16,1000,64,3]{3,2,1,0} concatenate(iota.0, iota.1, p2), dimensions={3}, sharding={devices=[4,2,1,1]<=[8]}
concatenate.1 = s32[16,1000,64,3]{3,2,1,0} concatenate(iota.2, iota.3, p2), dimensions={3}, sharding={devices=[4,2,1,1]<=[8]}
concatenate.130 = s32[32,1000,64,3]{3,2,1,0} concatenate(concatenate.0, concatenate.1), dimensions={0}, sharding={devices=[4,2,1,1]<=[8]}
concatenate.131 = f32[32,1000,64]{2,1,0} concatenate(p3, p4), dimensions={0}, sharding={devices=[4,2,1]<=[8]}
add.190 = f32[16,1000,2000]{2,1,0} add(p0, p1), sharding={devices=[4,2,1]<=[8]}
ROOT scatter.2 = f32[16,1000,2000]{2,1,0} scatter(add.190, concatenate.130, concatenate.131), update_window_dims={}, inserted_window_dims={0,1,2}, scatter_dims_to_operand_dims={0,1,2}, index_vector_dim=3, to_apply=region_157.5067, sharding={devices=[4,2,1]<=[8]}
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* root = module->entry_computation()->root_instruction();
XLA_VLOG_LINES(1, module->ToString());
EXPECT_THAT(
root,
op::Scatter(
op::Scatter(op::Add(),
op::Concatenate(op::Iota(), op::Iota(), op::Parameter()),
op::Parameter()),
op::Concatenate(op::Iota(), op::Iota(), op::Parameter()),
op::Parameter()));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/spmd_prepare.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/spmd_prepare_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7a699b20-fc79-4536-a38d-cf545441b0c0 | cpp | tensorflow/tensorflow | stateful_rng_spmd_partitioner | third_party/xla/xla/service/spmd/stateful_rng_spmd_partitioner.cc | third_party/xla/xla/service/spmd/stateful_rng_spmd_partitioner_test.cc | #include "xla/service/spmd/stateful_rng_spmd_partitioner.h"
#include <memory>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/service/call_graph.h"
#include "xla/service/spmd/spmd_partitioner.h"
#include "xla/status_macros.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace spmd {
absl::Status StatefulRngSpmdPartitioningVisitor::HandleRngGetAndUpdateState(
HloInstruction* hlo) {
if (hlo->sharding().HasUniqueDevice()) {
return HandleSingleDevice(hlo);
}
TF_RET_CHECK(hlo->sharding().IsReplicated());
auto clone =
builder()->AddInstruction(hlo->CloneWithNewOperands(hlo->shape(), {}));
clone->set_sharding(hlo->sharding());
SetPartitionedHlo(
hlo, spmd::PartitionedHlo(clone, hlo->shape(), MakePartitioningState())
.Reshard(hlo->sharding()));
return absl::OkStatus();
}
std::unique_ptr<spmd::SpmdPartitioningVisitor>
StatefulRngSpmdPartitioner::CreateVisitor(
HloComputation* computation, int64_t num_partitions, int64_t num_replicas,
const spmd::SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, spmd::SpmdLogger* logger,
spmd::SpmdPartitionerOptions options, const CallGraph& call_graph) {
return std::make_unique<StatefulRngSpmdPartitioningVisitor>(
computation, num_partitions, num_replicas, collective_ops_creator,
next_channel_id, logger, std::move(options), this, call_graph);
}
absl::Status StatefulRngSpmdPartitioner::PreprocessSharding(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* hlo : computation->instructions()) {
if (hlo->opcode() == HloOpcode::kRngGetAndUpdateState &&
!hlo->has_sharding()) {
hlo->set_sharding(HloSharding::Replicate());
}
}
}
return spmd::SpmdPartitioner::PreprocessSharding(module, execution_threads);
}
bool StatefulRngSpmdPartitioner::CanSideEffectingHaveReplicatedSharding(
const HloInstruction* hlo) {
if (hlo->opcode() == HloOpcode::kRngGetAndUpdateState) return true;
return spmd::SpmdPartitioner::CanSideEffectingHaveReplicatedSharding(hlo);
}
absl::Status StatefulRngSpmdPartitioner::HandleRotateRightWhilePreprocessing(
HloComputation* computation) {
if (!computation->IsWhileBodyComputation()) {
return absl::OkStatus();
}
HloInstruction* while_loop = computation->WhileCallInstruction();
TF_RET_CHECK(while_loop);
if (computation->parent()
->config()
.debug_options()
.xla_gpu_unsafe_pipelined_loop_annotator()) {
xla::FrontendAttributes attributes;
(*attributes.mutable_map())["is_pipelined_while_loop"] = "true";
while_loop->add_frontend_attributes(attributes);
}
return absl::OkStatus();
}
}
} | #include "xla/service/spmd/stateful_rng_spmd_partitioner.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/rng_expander.h"
#include "xla/service/sharding_propagation.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace spmd {
namespace {
namespace op = xla::testing::opcode_matchers;
int64_t CountInstructions(const HloComputation &computation, HloOpcode opcode) {
int64_t count = 0;
for (const auto &instruction : computation.instructions()) {
if (instruction->opcode() == opcode) {
count++;
}
}
return count;
}
class StatefulRngSpmdPartitionerTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> PartitionComputation(
absl::string_view hlo_module, int64_t num_partitions,
DebugOptions debug_options,
std::function<void(HloPassPipeline &pipeline)> add_passes = nullptr,
bool skip_checking_windowed_einsum_users = false,
bool disable_ag_rewrite_for_multiple_consumers = false) {
HloModuleConfig config = GetModuleConfigForTest(1, num_partitions);
config.set_use_spmd_partitioning(true);
config.set_debug_options(debug_options);
TF_ASSIGN_OR_RETURN(auto module,
ParseAndReturnVerifiedModule(hlo_module, config));
HloPassPipeline pass("partitioning");
pass.AddPass<HloVerifier>(false,
false);
if (add_passes) {
add_passes(pass);
}
pass.AddPass<ShardingPropagation>(true);
pass.AddPass<StatefulRngSpmdPartitioner>(
num_partitions,
1,
debug_options.xla_gpu_threshold_for_windowed_einsum_mib(),
debug_options.xla_gpu_multi_streamed_windowed_einsum(),
skip_checking_windowed_einsum_users,
disable_ag_rewrite_for_multiple_consumers);
pass.AddPass<HloVerifier>(false,
false);
TF_RETURN_IF_ERROR(pass.Run(module.get()).status());
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
void VerifyNoAllReduce(HloModule *module) {
for (HloComputation *computation : module->computations()) {
for (HloInstruction *hlo : computation->instructions()) {
EXPECT_NE(hlo->opcode(), HloOpcode::kAllReduce);
}
}
}
DebugOptions GetDefaultDebugOptions() {
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_threshold_for_windowed_einsum_mib(1000000);
debug_options.set_xla_gpu_multi_streamed_windowed_einsum(false);
debug_options.set_xla_gpu_unsafe_pipelined_loop_annotator(false);
return debug_options;
}
};
TEST_F(StatefulRngSpmdPartitionerTest, RngReplicatedConsumer) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%p0 = f32[50,100] parameter(0), sharding={replicated}
%mu = f32[] constant(0)
%sigma = f32[] constant(1)
%rng = f32[50,100] rng(f32[] %mu, f32[] %sigma), distribution=rng_uniform
ROOT %add = f32[50,100] add(%rng, %p0), sharding={replicated}
}
)";
auto add_passes = [](HloPassPipeline &pipeline) {
pipeline.AddPass<RngExpander>();
};
DebugOptions debug_options = GetDebugOptionsForTest();
TF_ASSERT_OK_AND_ASSIGN(
auto module, PartitionComputation(hlo_string, 2,
GetDefaultDebugOptions(), add_passes));
XLA_VLOG_LINES(1, module->ToString());
VerifyNoAllReduce(module.get());
}
TEST_F(StatefulRngSpmdPartitionerTest, RngPartitionedConsumer) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%p0 = f32[50,100] parameter(0), sharding={replicated}
%mu = f32[] constant(0)
%sigma = f32[] constant(1)
%rng = f32[50,100] rng(f32[] %mu, f32[] %sigma), distribution=rng_uniform
ROOT %add = f32[50,100] add(%rng, %p0), sharding={devices=[2,1]0,1}
}
)";
auto add_passes = [](HloPassPipeline &pipeline) {
pipeline.AddPass<RngExpander>();
};
TF_ASSERT_OK_AND_ASSIGN(
auto module, PartitionComputation(hlo_string, 2,
GetDefaultDebugOptions(), add_passes));
XLA_VLOG_LINES(1, module->ToString());
VerifyNoAllReduce(module.get());
}
TEST_F(StatefulRngSpmdPartitionerTest,
EinsumDisableRewriteForAgWithMultipleConsumers) {
absl::string_view hlo_string = R"(
HloModule test, entry_computation_layout={(bf16[2,2048,24576]{2,1,0}, bf16[24576,98304]{1,0}, bf16[24576,98304]{1,0})->bf16[2,2048,98304]{2,1,0}}, num_partitions=4
ENTRY main {
Arg_0.1 = bf16[2,2048,24576]{2,1,0} parameter(0), sharding={devices=[1,4,1]<=[4]}
Arg_1.2 = bf16[24576,98304]{1,0} parameter(1), sharding={devices=[1,4]<=[4]}
dot.5 = bf16[2,2048,98304]{2,1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={2}, rhs_contracting_dims={0}, sharding={devices=[1,1,4]<=[4]}
Arg_2.3 = bf16[24576,98304]{1,0} parameter(2), sharding={devices=[1,4]<=[4]}
dot.6 = bf16[2,2048,98304]{2,1,0} dot(Arg_0.1, Arg_2.3), lhs_contracting_dims={2}, rhs_contracting_dims={0}, sharding={devices=[1,1,4]<=[4]}
ROOT add.8 = bf16[2,2048,98304]{2,1,0} add(dot.5, dot.6), sharding={devices=[1,1,4]<=[4]}
}
)";
DebugOptions debug_options = GetDefaultDebugOptions();
debug_options.set_xla_gpu_threshold_for_windowed_einsum_mib(0);
debug_options.set_xla_gpu_multi_streamed_windowed_einsum(true);
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4, debug_options,
nullptr,
true,
true));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_EQ(CountInstructions(*module->entry_computation(), HloOpcode::kWhile),
1);
EXPECT_EQ(CountInstructions(*module->entry_computation(), HloOpcode::kDot),
1);
EXPECT_EQ(
CountInstructions(*module->entry_computation(), HloOpcode::kAllGather),
1);
}
TEST_F(StatefulRngSpmdPartitionerTest, VerifyThresholdSetCorrectly) {
auto debug_options = HloTestBase::GetDebugOptionsForTest();
int64_t threshold = 400;
debug_options.set_xla_gpu_threshold_for_windowed_einsum_mib(threshold);
debug_options.set_xla_gpu_multi_streamed_windowed_einsum(true);
StatefulRngSpmdPartitioner rng_spmd_partitioner(
2, 1,
debug_options.xla_gpu_threshold_for_windowed_einsum_mib(),
debug_options.xla_gpu_multi_streamed_windowed_einsum());
EXPECT_EQ(rng_spmd_partitioner.options().threshold_for_windowed_einsum_mib,
threshold);
EXPECT_EQ(rng_spmd_partitioner.options().unroll_windowed_einsum, true);
}
TEST_F(StatefulRngSpmdPartitionerTest,
MergedSliceThenConcatRotateRightWhileOp) {
absl::string_view hlo_string = R"(
HloModule test
%Body {
%param = (f32[12], s32[]) parameter(0)
%i = s32[] get-tuple-element(%param), index=1
%one = s32[] constant(1)
%i_plus_one = s32[] add(s32[] %i, s32[] %one)
%param0 = f32[12] get-tuple-element(%param), index=0, sharding={devices=[4]<=[4]}
%slice0 = f32[2] slice(%param0), slice={[10:12]}, sharding={devices=[4]<=[4]}
%slice1 = f32[10] slice(%param0), slice={[0:10]}, sharding={devices=[4]<=[4]}
%concat = f32[12] concatenate(%slice0, %slice1), dimensions={0}, sharding={devices=[4]<=[4]}
ROOT %tuple = (f32[12], s32[]) tuple(%concat, %i_plus_one)
}
%Cond {
%param.1 = (f32[12], s32[]) parameter(0)
%i.1 = s32[] get-tuple-element(%param.1), index=1
%trip_count = s32[] constant(11)
ROOT %done = pred[] compare(%i.1, %trip_count), direction=LT
}
ENTRY %test {
%i_start = f32[12] parameter(0)
%p_start = s32[] constant(0)
%initial_tuple = (f32[12], s32[]) tuple(%i_start, %p_start)
ROOT %while = (f32[12], s32[]) while(%initial_tuple), condition=%Cond, body=%Body
}
)";
DebugOptions debug_options = GetDefaultDebugOptions();
debug_options.set_xla_gpu_unsafe_pipelined_loop_annotator(true);
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4, debug_options));
const HloInstruction *whileOp =
module->entry_computation()->GetInstructionWithName("while.1");
const HloInstruction *root =
whileOp->while_body()->GetInstructionWithName("concatenate");
auto rotate =
op::Concatenate(op::CollectivePermute(op::Slice()), op::Slice());
EXPECT_THAT(root, AllOf(rotate, op::Shape("f32[3]")));
EXPECT_TRUE(
whileOp->frontend_attributes().map().contains("is_pipelined_while_loop"));
debug_options.set_xla_gpu_unsafe_pipelined_loop_annotator(false);
TF_ASSERT_OK_AND_ASSIGN(
module,
PartitionComputation(hlo_string, 4, debug_options));
whileOp = module->entry_computation()->GetInstructionWithName("while.1");
root = whileOp->while_body()->GetInstructionWithName("concatenate");
rotate = op::Concatenate(op::CollectivePermute(op::Slice()), op::Slice());
EXPECT_THAT(root, AllOf(rotate, op::Shape("f32[3]")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/stateful_rng_spmd_partitioner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/stateful_rng_spmd_partitioner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
11861505-cf46-458e-b235-534742eff9ac | cpp | tensorflow/tensorflow | schedule_aware_collective_ops_cse | third_party/xla/xla/service/spmd/schedule_aware_collective_ops_cse.cc | third_party/xla/xla/service/spmd/schedule_aware_collective_ops_cse_test.cc | #include "xla/service/spmd/schedule_aware_collective_ops_cse.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsAddingOnlyDegenerateDimensions(const HloInstruction* inst) {
if (inst->opcode() != HloOpcode::kBitcast &&
inst->opcode() != HloOpcode::kReshape) {
return false;
}
const Shape& in_shape = inst->operand(0)->shape();
const Shape& out_shape = inst->shape();
return ShapeUtil::ElementsIn(in_shape) == ShapeUtil::ElementsIn(out_shape) &&
ShapeUtil::DimensionsUnmodifiedByReshape(in_shape, out_shape).size() ==
in_shape.rank();
}
const HloInstruction* PassthroughDegenerateAddingReshapes(
const HloInstruction* inst) {
while (IsAddingOnlyDegenerateDimensions(inst)) {
inst = inst->operand(0);
}
return inst;
}
bool ShouldConsiderSchedule(HloInstruction* hlo) {
return hlo->opcode() != HloOpcode::kCollectivePermute;
}
HloInstruction* MayConsiderCollective(HloInstruction* hlo, bool for_replicas) {
auto chan_instr = DynCast<HloChannelInstruction>(hlo);
if (!chan_instr) {
return nullptr;
}
if (for_replicas == chan_instr->channel_id().has_value()) {
return nullptr;
}
if (hlo->opcode() == HloOpcode::kCollectivePermute) {
return hlo;
}
auto coll = DynCast<HloCollectiveInstruction>(hlo);
if (!coll) {
return nullptr;
}
if (coll->constrain_layout()) {
return nullptr;
}
if (coll->opcode() == HloOpcode::kAllGather) {
return coll;
}
if (coll->opcode() == HloOpcode::kAllReduce && coll->shape().IsArray()) {
auto operand = coll->operand(0);
return operand->opcode() == HloOpcode::kDynamicUpdateSlice &&
operand->operand(0)->opcode() == HloOpcode::kBroadcast
? coll
: nullptr;
}
return nullptr;
}
absl::StatusOr<bool> RunOnComputation(HloComputation* comp, bool for_replicas,
int64_t distance_threshold) {
bool changed = false;
absl::flat_hash_map<const HloInstruction*, int64_t> height;
auto ordered_hlos = comp->MakeInstructionPostOrder();
int64_t max_height = 0;
for (auto it = ordered_hlos.rbegin(); it != ordered_hlos.rend(); ++it) {
auto hlo = *it;
int64_t h = 0;
for (auto user : hlo->users()) {
h = std::max(h, height[user]) + 1;
}
max_height = std::max(max_height, h);
height[hlo] = h;
}
auto lowest_user_height = [&](const HloInstruction* hlo) {
int64_t lowest = height[hlo];
for (auto user : hlo->users()) {
lowest = std::min(lowest, height[user]);
}
return lowest;
};
absl::flat_hash_map<const HloInstruction*, std::vector<HloInstruction*>>
operand_to_collective;
for (HloInstruction* hlo : ordered_hlos) {
HloInstruction* coll = MayConsiderCollective(hlo, for_replicas);
if (!coll) {
continue;
}
auto& earlier_colls =
operand_to_collective[PassthroughDegenerateAddingReshapes(
coll->operand(0))];
bool found = false;
int64_t coll_height = height[coll];
for (HloInstruction* earlier_coll : earlier_colls) {
if (!ShapeUtil::Equal(earlier_coll->shape(), coll->shape())) {
continue;
}
HloInstruction* coll_operand = coll->mutable_operand(0);
TF_RETURN_IF_ERROR(
coll->ReplaceOperandWith(0, earlier_coll->mutable_operand(0)));
if (!earlier_coll->IdenticalIgnoringChannelIdValues(*coll)) {
TF_RETURN_IF_ERROR(coll->ReplaceOperandWith(0, coll_operand));
continue;
}
found = true;
if (ShouldConsiderSchedule(coll) &&
lowest_user_height(earlier_coll) > coll_height + distance_threshold) {
TF_RETURN_IF_ERROR(coll->ReplaceOperandWith(0, coll_operand));
earlier_coll = coll;
continue;
}
changed = true;
VLOG(1) << "Replacing " << coll->ToString() << " with "
<< earlier_coll->ToString();
TF_RETURN_IF_ERROR(coll->ReplaceAllUsesWith(earlier_coll));
break;
}
if (!found) {
earlier_colls.push_back(coll);
}
}
return changed;
}
}
absl::StatusOr<bool> ScheduleAwareCollectiveOpsCSE::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (auto comp : module->computations(execution_threads)) {
TF_ASSIGN_OR_RETURN(
auto comp_changed,
RunOnComputation(comp, for_replicas_, distance_threshold_));
changed |= comp_changed;
}
return changed;
}
} | #include "xla/service/spmd/schedule_aware_collective_ops_cse.h"
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace spmd {
namespace {
class CollectiveOpsCseTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module, int64_t distance_threshold = 100) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(
hlo_module, GetModuleConfigForTest()));
HloPassPipeline pipeline("all-gather-cse");
pipeline.AddPass<ScheduleAwareCollectiveOpsCSE>(distance_threshold,
false);
TF_RETURN_IF_ERROR(pipeline.Run(module.get()).status());
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
};
TEST_F(CollectiveOpsCseTest, SimpleCseAllGather) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[1,8]{1,0} parameter(0)
ag1 = s32[2,8]{1,0} all-gather(param0), replica_groups={{0,1}}, dimensions={0},
channel_id=0, use_global_device_ids=true
ag2 = s32[2,8]{1,0} all-gather(param0), replica_groups={{0,1}}, dimensions={0},
channel_id=1, use_global_device_ids=true
ROOT tuple = (s32[2,8]{1,0}, s32[2,8]{1,0}) tuple(ag1, ag2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_EQ(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, SimpleCseCollectivePermute) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[2,8]{1,0} parameter(0)
cp1 = s32[2,8]{1,0} collective-permute(param0), source_target_pairs={{0,1},{1,0}},
channel_id=0
cp2 = s32[2,8]{1,0} collective-permute(param0), source_target_pairs={{0,1},{1,0}},
channel_id=1
ROOT tuple = (s32[2,8]{1,0}, s32[2,8]{1,0}) tuple(cp1, cp2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_EQ(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, SimpleCseReshapeLookthroughAllGather) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
rshp = s32[1,8]{1,0} reshape(param0)
rshp2 = s32[1,8]{1,0} reshape(param0)
ag1 = s32[2,8]{1,0} all-gather(rshp), replica_groups={{0,1}}, dimensions={0},
channel_id=0, use_global_device_ids=true
ag2 = s32[2,8]{1,0} all-gather(rshp2), replica_groups={{0,1}}, dimensions={0},
channel_id=1, use_global_device_ids=true
ROOT tuple = (s32[2,8]{1,0}, s32[2,8]{1,0}) tuple(ag1, ag2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_EQ(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, SimpleCseReshapeLookthroughCollectivePermute) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
rshp = s32[1,8]{1,0} reshape(param0)
rshp2 = s32[1,8]{1,0} reshape(param0)
cp1 = s32[1,8]{1,0} collective-permute(rshp), source_target_pairs={{0,1},{1,0}},
channel_id=0
cp2 = s32[1,8]{1,0} collective-permute(rshp2), source_target_pairs={{0,1},{1,0}},
channel_id=1
ROOT tuple = (s32[1,8]{1,0}, s32[1,8]{1,0}) tuple(cp1, cp2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_EQ(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, SimpleNoCseInvalidReshapes) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
rshp = s32[2,4]{1,0} reshape(param0)
rshp2 = s32[2,4]{1,0} reshape(param0)
ag1 = s32[4,4]{1,0} all-gather(rshp), replica_groups={{0,1}}, dimensions={0},
channel_id=0, use_global_device_ids=true
ag2 = s32[4,4]{1,0} all-gather(rshp2), replica_groups={{0,1}}, dimensions={0},
channel_id=1, use_global_device_ids=true
ROOT tuple = (s32[4,4]{1,0}, s32[4,4]{1,0}) tuple(ag1, ag2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_NE(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, SimpleCseDifferentDim) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[1,8]{1,0} parameter(0)
ag1 = s32[1,16]{1,0} all-gather(param0), replica_groups={{0,1}}, dimensions={1},
channel_id=0, use_global_device_ids=true
ag2 = s32[1,16]{1,0} all-gather(param0), replica_groups={{0,1}},
dimensions={1}, channel_id=1, use_global_device_ids=true
ROOT tuple = (s32[1,16]{1,0}, s32[1,16]{1,0}) tuple(ag1, ag2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_EQ(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, SimpleCseDifferentDimReshapeLookthrough) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
rshp = s32[1,8]{1,0} reshape(param0)
rshp2 = s32[1,8]{1,0} reshape(param0)
ag1 = s32[1,16]{1,0} all-gather(rshp), replica_groups={{0,1}}, dimensions={1},
channel_id=0, use_global_device_ids=true
ag2 = s32[1,16]{1,0} all-gather(rshp2), replica_groups={{0,1}},
dimensions={1}, channel_id=1, use_global_device_ids=true
ROOT tuple = (s32[1,16]{1,0}, s32[1,16]{1,0}) tuple(ag1, ag2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_EQ(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, NoCseGlobalDevice) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[1,8]{1,0} parameter(0)
ag1 = s32[2,8]{1,0} all-gather(param0), replica_groups={{0,1}}, dimensions={0},
channel_id=0, use_global_device_ids=true
ag2 = s32[2,8]{1,0} all-gather(param0), replica_groups={{0},{1}}, dimensions={0},
channel_id=1, use_global_device_ids=false
ROOT tuple = (s32[2,8]{1,0}, s32[2,8]{1,0}) tuple(ag1, ag2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_NE(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, NoCseChannelIdMismatch) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[1,8]{1,0} parameter(0)
ag1 = s32[1,16]{1,0} all-gather(param0), replica_groups={{0,1}}, dimensions={1},
channel_id=0
ag2 = s32[1,16]{1,0} all-gather(param0), replica_groups={{0,1}},
dimensions={1}
ROOT tuple = (s32[1,16]{1,0}, s32[1,16]{1,0}) tuple(ag1, ag2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_NE(tuple->operand(0), tuple->operand(1));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/schedule_aware_collective_ops_cse.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/schedule_aware_collective_ops_cse_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
427bb8a3-c26e-4c32-83fa-f82be047f902 | cpp | tensorflow/tensorflow | spmd_partitioner | third_party/xla/xla/service/spmd/spmd_partitioner.cc | third_party/xla/xla/service/spmd/spmd_partitioner_test.cc | #include "xla/service/spmd/spmd_partitioner.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <functional>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/tile_assignment.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/protobuf_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/computation_layout.h"
#include "xla/service/flatten_call_graph.h"
#include "xla/service/hlo_cse.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/shape_inference.h"
#include "xla/service/spmd/custom_call_handler.h"
#include "xla/service/spmd/spmd_partitioner_util.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace spmd {
namespace {
using hlo_sharding_util::GroupedSharding;
}
std::string SpmdLogger::MakeReport() {
std::string report;
absl::StrAppend(&report,
"\n\n***** SPMD memory during transformation *****\n");
std::sort(entries_.begin(), entries_.end(),
[](auto const& entry0, auto const& entry1) {
return entry0.first > entry1.first;
});
for (int64_t i = 0;
i < std::min<int64_t>(report_instruction_count_, entries_.size()); ++i) {
absl::StrAppend(&report, "\n ",
tsl::strings::HumanReadableNumBytes(entries_[i].first),
" : ", entries_[i].second, "\n");
}
return report;
}
void SpmdLogger::RegisterLogEntry(HloInstruction* hlo,
const std::vector<HloInstruction*>& group) {
if (disabled_) {
return;
}
std::string report = hlo->ToString();
int64_t max_value = -1;
for (HloInstruction* inst : group) {
if (!inst->shape().IsArray()) {
continue;
}
max_value = std::max<int64_t>(max_value, ShapeSizeInBytes(inst->shape()));
absl::StrAppend(&report, " * ", inst->ToString(), "\n");
}
entries_.push_back(std::make_pair(max_value, report));
}
std::string SpmdLogger::ReportBeforePartition(
const HloModule& module, int64_t report_instruction_count) {
std::string report;
absl::StrAppend(&report,
"\n\n***** SPMD memory usage before partition *****\n");
absl::StrAppend(&report, "\n ** Replicated instructions\n");
absl::StrAppend(&report, ReportMemoryUsage(
module,
[](const HloInstruction* hlo) {
return !hlo->has_sharding() ||
hlo->sharding().IsReplicated();
},
report_instruction_count));
absl::StrAppend(&report, "\n ** All instructions\n");
absl::StrAppend(&report, ReportMemoryUsage(module, HloPredicateTrue,
report_instruction_count));
return report;
}
std::string SpmdLogger::ReportAfterPartition(
const HloModule& module, int64_t report_instruction_count) {
std::string report;
absl::StrAppend(&report,
"\n\n***** SPMD memory usage after partition *****\n");
absl::StrAppend(&report, ReportMemoryUsage(module, HloPredicateTrue,
report_instruction_count));
return report;
}
template <typename F>
std::string SpmdLogger::ReportMemoryUsage(
const HloModule& module, const F& filter,
int64_t report_instruction_count) {
std::string report;
std::vector<HloInstruction*> instructions;
instructions.reserve(module.instruction_count());
for (auto computation : module.computations()) {
if (computation->IsFusionComputation()) {
continue;
}
for (auto hlo : computation->instructions()) {
if (!hlo->shape().IsArray() ||
ShapeUtil::IsEffectiveScalar(hlo->shape())) {
continue;
}
if (filter(hlo)) {
instructions.push_back(hlo);
}
}
}
const auto add_report = [&](std::vector<HloInstruction*>* insts) {
std::sort(insts->begin(), insts->end(),
[](const HloInstruction* inst0, const HloInstruction* inst1) {
return ShapeSizeInBytes(inst0->shape()) >
ShapeSizeInBytes(inst1->shape());
});
for (int64_t i = 0;
i < std::min<int64_t>(report_instruction_count, insts->size()); ++i) {
absl::StrAppend(&report, " ",
tsl::strings::HumanReadableNumBytes(
ShapeSizeInBytes((*insts)[i]->shape())),
" : ", (*insts)[i]->ToString(), "\n");
}
};
add_report(&instructions);
return report;
}
namespace {
bool ShouldKeepSharding(const HloInstruction* hlo) {
if (hlo->opcode() == HloOpcode::kInfeed ||
hlo->opcode() == HloOpcode::kOutfeed ||
DynCast<HloSendRecvInstruction>(hlo) != nullptr) {
return true;
}
if (hlo->opcode() == HloOpcode::kParameter &&
hlo->parent() == hlo->GetModule()->entry_computation()) {
return true;
}
return false;
}
absl::Status ClearShardingAttributes(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* hlo : computation->instructions()) {
if (ShouldKeepSharding(hlo)) {
continue;
}
hlo->clear_sharding();
}
}
return absl::OkStatus();
}
HloSharding GetShardingReplicatedOnWindowedDimension(
const HloSharding& sharding, const Window& window) {
std::vector<int64_t> dimensions_to_replicate;
for (int i = 0; i < window.dimensions_size(); ++i) {
const WindowDimension& wd = window.dimensions(i);
if (window_util::IsTrivialWindowDimension(wd)) {
continue;
}
dimensions_to_replicate.push_back(i);
}
return hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
sharding, dimensions_to_replicate);
}
}
HloInstruction* SpmdBuilder::AddInstruction(
std::unique_ptr<HloInstruction> instruction) {
HloInstruction* hlo =
HloComputation::Builder::AddInstruction(std::move(instruction));
if (visiting_hlo_) {
hlo->set_metadata(visiting_hlo_->metadata());
instructions_[visiting_hlo_].push_back(hlo);
}
if (hlo->opcode() == HloOpcode::kBroadcast) {
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
if (!absl::c_linear_search(hlo->dimensions(), i)) {
broadcast_dims_[hlo].insert(i);
}
}
}
if (hlo->IsElementwise() && hlo->operand_count() > 0 &&
hlo->shape().IsArray()) {
absl::flat_hash_set<int64_t> broadcast_dims;
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
broadcast_dims.insert(i);
}
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
auto it = broadcast_dims_.find(hlo->operand(i));
if (it == broadcast_dims_.end()) {
broadcast_dims.clear();
break;
}
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
if (!it->second.contains(i)) {
broadcast_dims.erase(i);
}
}
}
if (!broadcast_dims.empty()) {
broadcast_dims_[hlo] = std::move(broadcast_dims);
}
}
if (hlo->opcode() == HloOpcode::kTranspose) {
auto it = broadcast_dims_.find(hlo->operand(0));
if (it != broadcast_dims_.end()) {
absl::flat_hash_set<int64_t> xpose_broadcast_dims;
std::vector<int64_t> reverse_map(hlo->shape().rank());
for (int64_t i = 0; i < reverse_map.size(); ++i) {
reverse_map[hlo->dimensions(i)] = i;
}
for (int64_t dim : it->second) {
xpose_broadcast_dims.insert(reverse_map[dim]);
}
broadcast_dims_[hlo] = std::move(xpose_broadcast_dims);
}
}
if (hlo->opcode() == HloOpcode::kReshape &&
Product(hlo->shape().dimensions()) > 0) {
auto it = broadcast_dims_.find(hlo->operand(0));
if (it != broadcast_dims_.end()) {
absl::flat_hash_set<int64_t> reshape_broadcast_dims;
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
reshape_broadcast_dims.insert(i);
}
std::vector<int64_t> before_dim_size_stack;
std::vector<int64_t> after_dim_size_stack;
const int64_t operand0_rank = hlo->operand(0)->shape().rank();
const int64_t hlo_shape_rank = hlo->shape().rank();
before_dim_size_stack.reserve(operand0_rank);
after_dim_size_stack.reserve(hlo_shape_rank);
for (int64_t i = operand0_rank - 1; i >= 0; --i) {
before_dim_size_stack.push_back(hlo->operand(0)->shape().dimensions(i));
}
for (int64_t i = hlo_shape_rank - 1; i >= 0; --i) {
after_dim_size_stack.push_back(hlo->shape().dimensions(i));
}
while (!before_dim_size_stack.empty() && !after_dim_size_stack.empty()) {
int64_t before_size = before_dim_size_stack.back();
int64_t after_size = after_dim_size_stack.back();
int64_t current_before_dim =
hlo->operand(0)->shape().rank() - before_dim_size_stack.size();
int64_t current_after_dim =
hlo->shape().rank() - after_dim_size_stack.size();
before_dim_size_stack.pop_back();
after_dim_size_stack.pop_back();
if (!it->second.contains(current_before_dim)) {
reshape_broadcast_dims.erase(current_after_dim);
}
if (before_size == after_size) {
continue;
}
if (before_size % after_size == 0) {
before_dim_size_stack.push_back(before_size / after_size);
} else if (after_size % before_size == 0) {
after_dim_size_stack.push_back(after_size / before_size);
} else {
for (int64_t i = current_after_dim; i < hlo->shape().rank(); ++i) {
reshape_broadcast_dims.erase(i);
}
break;
}
}
if (!before_dim_size_stack.empty() || !after_dim_size_stack.empty()) {
reshape_broadcast_dims.clear();
}
if (!reshape_broadcast_dims.empty()) {
broadcast_dims_[hlo] = std::move(reshape_broadcast_dims);
}
}
}
if (hlo->opcode() == HloOpcode::kSlice ||
hlo->opcode() == HloOpcode::kDynamicSlice) {
auto it = broadcast_dims_.find(hlo->operand(0));
if (it != broadcast_dims_.end()) {
auto dims = it->second;
broadcast_dims_[hlo] = std::move(dims);
}
}
if (hlo->opcode() == HloOpcode::kPad) {
auto it = broadcast_dims_.find(hlo->operand(0));
if (it != broadcast_dims_.end()) {
absl::flat_hash_set<int64_t> pad_broadcast_dims;
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
const auto& dim = hlo->padding_config().dimensions(i);
if (dim.edge_padding_low() == 0 && dim.edge_padding_high() == 0 &&
dim.interior_padding() == 0 && it->second.contains(i)) {
pad_broadcast_dims.insert(i);
}
}
if (!pad_broadcast_dims.empty()) {
broadcast_dims_[hlo] = std::move(pad_broadcast_dims);
}
}
}
return hlo;
}
PartitionedHlo PartitionedHlo::Reshard(const HloSharding& target,
std::optional<Literal> pad_value) const {
if (sharding() == target) {
return *this;
}
if (hlo()->opcode() == HloOpcode::kConstant && !sharding().IsManual() &&
target.IsManual()) {
PartitionedHlo pconstant = this->Reshard(HloSharding::Replicate());
pconstant.hlo()->set_sharding(target);
return pconstant;
}
auto& cache = state_.reshard_cache->per_hlo_cache[hlo()].reshard_cache;
const bool replace_cache = pad_value.has_value();
const bool is_to_replicate =
hlo_->shape().IsArray() && target.NumTiles() < sharding().NumTiles();
const bool use_cache =
!is_to_replicate || state_.partitioner->options().cache_all_gather;
if (!replace_cache && use_cache) {
auto it = cache.find(target);
if (it != cache.end()) {
return it->second;
}
}
auto resharded = ReshardNoCache(target, std::move(pad_value));
{
auto& cache =
state_.reshard_cache->per_hlo_cache[resharded.hlo()].reshard_cache;
cache.insert_or_assign(sharding(), *this);
}
if (use_cache) {
auto& cache = state_.reshard_cache->per_hlo_cache[hlo()].reshard_cache;
auto [it, _] = cache.insert_or_assign(target, std::move(resharded));
return it->second;
}
return resharded;
}
PartitionedHlo PartitionedHlo::ReshardNoCache(
const HloSharding& target, std::optional<Literal> pad_value,
bool allow_full_replication) const {
VLOG(2) << "Resharding " << hlo_->ToString() << " from "
<< hlo_->sharding().ToString() << " to " << target.ToString();
const Shape& shape = hlo_->shape();
if (shape.element_type() == TOKEN) {
return *this;
}
CHECK(shape.IsTuple() || !target.IsTuple());
if (shape.IsTuple() && !target.IsTuple()) {
return Reshard(target.GetTupleSharding(shape).value());
}
if (shape.IsTuple()) {
std::vector<HloInstruction*> elements;
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
auto subshape = ShapeUtil::GetTupleElementShape(shape, i);
auto element = state_.b->AddInstruction(
HloInstruction::CreateGetTupleElement(subshape, hlo(), i));
element->set_sharding(sharding().GetSubSharding(shape, {i}));
elements.push_back(
PartitionedHlo(
element, ShapeUtil::GetTupleElementShape(base_shape_, i), state_)
.Reshard(target.GetSubSharding(shape, {i}))
.hlo());
}
auto tuple =
state_.b->AddInstruction(HloInstruction::CreateTuple(elements));
tuple->set_sharding(target);
return PartitionedHlo(tuple, base_shape_, state_);
}
if (sharding() == target) {
return *this;
}
CHECK_EQ(target.IsManualSubgroup(), sharding().IsManualSubgroup());
if (sharding().IsManualSubgroup()) {
auto grouped = hlo_sharding_util::GetManualSubgroupSharding(sharding());
auto target_grouped = AlignGroupsWithIfCompatible(
hlo_sharding_util::GetManualSubgroupSharding(target), grouped);
CHECK(target_grouped.has_value())
<< "Resharding target has incompatible sharding subgroups. From "
<< sharding().ToString() << " to " << target.ToString();
HloSharding original_sharding = sharding();
hlo_->set_sharding(grouped.sharding);
HloInstruction* partitioned =
PartitionedHlo(hlo_, base_shape_,
CreatePerGroupPartitioningState(
state(), grouped.device_groups, state_.b))
.ReshardNoCache(target_grouped->sharding)
.hlo();
hlo_->set_sharding(original_sharding);
partitioned->set_sharding(target);
return PartitionedHlo(partitioned, base_shape_, state_);
}
if (CanReshardWithCollectivePermute(sharding(), target)) {
return ReshardWithCollectivePermute(target);
}
if (auto src_tgt_dims =
GetReshardAllToAllSourceTargetDims(sharding(), target)) {
return ReshardWithAllToAll(target, *src_tgt_dims);
}
if (!target.IsTileMaximal() && sharding().ReplicateOnLastTileDim()) {
auto try_reshard = ReshardFromPartialReplicateWithDynamicSlice(target);
if (try_reshard.has_value()) {
return try_reshard.value();
}
try_reshard = ReshardPartialReplicateWithAllToAll(target);
if (try_reshard.has_value()) {
return try_reshard.value();
}
}
if (!sharding().IsTileMaximal() && target.ReplicateOnLastTileDim()) {
auto try_reshard = ReshardToPartialReplicateWithAllGather(target);
if (try_reshard.has_value()) {
return try_reshard.value();
}
try_reshard = ReshardPartialReplicateWithAllToAll(target);
if (try_reshard.has_value()) {
return try_reshard.value();
}
}
if (!sharding().IsReplicated()) {
if (!target.IsReplicated()) {
if (sharding().IsTiled() && target.IsTiled()) {
auto reshard = TryComplexReshardHandling(target);
if (reshard.has_value()) {
return reshard.value();
}
std::vector<int64_t> equal_dims;
for (int64_t dim = 0; dim < hlo_->shape().rank(); ++dim) {
if (sharding().tile_assignment().dim(dim) == 1 ||
target.tile_assignment().dim(dim) !=
sharding().tile_assignment().dim(dim)) {
continue;
}
equal_dims.push_back(dim);
}
if (!equal_dims.empty()) {
auto grouped =
hlo_sharding_util::GroupShardingOnDims(sharding(), equal_dims);
auto grouped_target = AlignGroupsWith(
hlo_sharding_util::GroupShardingOnDims(target, equal_dims),
grouped);
Shape inner_base_shape = base_shape_;
for (int64_t dim : equal_dims) {
inner_base_shape.set_dimensions(dim, hlo_->shape().dimensions(dim));
}
auto state = CreatePerGroupPartitioningState(
state_, grouped.device_groups, state_.b);
HloInstruction* copy =
state_.b->AddInstruction(HloInstruction::CreateUnary(
hlo_->shape(), HloOpcode::kCopy, hlo_));
copy->set_sharding(grouped.sharding);
HloInstruction* resharded =
PartitionedHlo(copy, inner_base_shape, state)
.ReshardNoCache(grouped_target.sharding)
.hlo();
resharded->set_sharding(
hlo_sharding_util::UngroupSharding(grouped_target));
return PartitionedHlo(resharded, base_shape_, state_)
.ReshardNoCache(target);
}
}
if (!allow_full_replication) {
return *this;
}
LOG(ERROR)
<< "[spmd] Involuntary full rematerialization. The compiler was "
"not able to go from sharding "
<< sharding().ToString(true) << " to "
<< target.ToString(true)
<< " without doing a full rematerialization of the tensor for HLO "
"operation: "
<< hlo_->ToString()
<< ". You probably want to enrich the sharding annotations to "
"prevent "
"this from happening.";
}
return Replicate().Reshard(target);
}
if (target.IsTileMaximal()) {
auto copy = state_.b->AddInstruction(
HloInstruction::CreateUnary(hlo_->shape(), HloOpcode::kCopy, hlo_));
copy->set_sharding(target);
return PartitionedHlo(copy, base_shape_, state_);
}
if (target.ReplicateOnLastTileDim()) {
std::vector<int64_t> group_dims(target.tile_assignment().num_dimensions() -
1);
std::iota(group_dims.begin(), group_dims.end(), 0);
auto target_grouped =
hlo_sharding_util::GroupShardingOnDims(target, group_dims);
auto partially_sharded = PerGroupSliceFromReplicated(
hlo_, state_.partition_id, target_grouped.device_groups, group_dims,
target_grouped.group_dim_sizes, state_.b);
partially_sharded->set_sharding(target);
return PartitionedHlo(partially_sharded, base_shape(), state_);
}
auto padded_hlo = PadBaseShapeBeforeUnevenTiledSharding(
hlo_, target, state_.b, std::move(pad_value));
auto shard_shape = MakePartitionedShape(shape, target);
auto slice = state_.b->AddInstruction(HloInstruction::CreateDynamicSlice(
shard_shape, padded_hlo,
MakePartitionOffsets(shape, target, state_.partition_id, state_.b),
shard_shape.dimensions()));
slice->set_sharding(target);
return PartitionedHlo(slice, base_shape_, state_);
}
PartitionedHlo PartitionedHlo::PadWithValue(
HloInstruction* pad_value, absl::Span<const int64_t> left_padded_dims,
absl::Span<const int64_t> skipped_dims) const {
HloInstruction* result =
PadWithValueHlo(pad_value, left_padded_dims, skipped_dims);
if (hlo_ != result) {
result->set_sharding(hlo_->sharding());
}
return PartitionedHlo(result, base_shape_, state_);
}
HloInstruction* PartitionedHlo::PadWithValueHlo(
HloInstruction* pad_value, absl::Span<const int64_t> left_padded_dims,
absl::Span<const int64_t> skipped_dims) const {
const HloSharding& sharding = hlo_->sharding();
const Shape& shape = hlo_->shape();
CHECK(!shape.IsTuple() && shape.element_type() != TOKEN);
if (sharding.IsReplicated() || EvenlyPartitions(base_shape_, sharding)) {
return hlo_;
}
CHECK(!sharding.IsTileMaximal());
auto index_shape = ShapeUtil::ChangeElementType(shape, S32);
auto mask_shape = ShapeUtil::ChangeElementType(index_shape, PRED);
auto get_mask_for_dim = [&](int64_t dim, HloInstruction* start_index) {
auto iota =
state_.b->AddInstruction(HloInstruction::CreateIota(index_shape, dim));
auto broadcast_start_index = state_.b->AddInstruction(
HloInstruction::CreateBroadcast(index_shape, start_index, {}));
auto index_in_full_shape =
state_.b->AddInstruction(HloInstruction::CreateBinary(
index_shape, HloOpcode::kAdd, iota, broadcast_start_index));
ComparisonDirection direction = ComparisonDirection::kLt;
int64_t index_limit = base_shape_.dimensions(dim);
if (absl::c_linear_search(left_padded_dims, dim)) {
direction = ComparisonDirection::kGe;
index_limit =
index_shape.dimensions(dim) * sharding.tile_assignment().dim(dim) -
index_limit;
}
auto limit = state_.b->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(index_limit)));
auto broadcast_limit = state_.b->AddInstruction(
HloInstruction::CreateBroadcast(index_shape, limit, {}));
return state_.b->AddInstruction(HloInstruction::CreateCompare(
mask_shape, index_in_full_shape, broadcast_limit, direction));
};
HloInstruction* mask = nullptr;
auto offsets = MakePartitionOffsets(base_shape_, sharding,
state_.partition_id, state_.b);
for (int64_t i = 0; i < shape.rank(); ++i) {
if (base_shape_.dimensions(i) % sharding.tile_assignment().dim(i) == 0 ||
absl::c_linear_search(skipped_dims, i)) {
continue;
}
if (mask == nullptr) {
mask = get_mask_for_dim(i, offsets[i]);
} else {
mask = state_.b->AddInstruction(
HloInstruction::CreateBinary(mask->shape(), HloOpcode::kAnd, mask,
get_mask_for_dim(i, offsets[i])));
}
}
if (mask == nullptr) {
return hlo_;
}
auto broadcast_pad_value = state_.b->AddInstruction(
HloInstruction::CreateBroadcast(shape, pad_value, {}));
return state_.b->AddInstruction(HloInstruction::CreateTernary(
shape, HloOpcode::kSelect, mask, hlo_, broadcast_pad_value));
}
PartitionedHlo PartitionedHlo::PadWithZero(
absl::Span<const int64_t> left_padded_dims,
absl::Span<const int64_t> skipped_dims) const {
auto zero = state_.b->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(hlo_->shape().element_type())));
return PadWithValue(zero, left_padded_dims, skipped_dims);
}
std::optional<PartitionedHlo::WindowedInputShardReturnValue>
PartitionedHlo::ReshardAsWindowedInput(const Window& window,
const HloSharding& target,
HloInstruction* pad_value,
bool mask_invalid_region,
bool force_mask_in_compact) {
auto& cache = state_.reshard_cache->per_hlo_cache[hlo()].window_reshard_cache;
for (auto& entry : cache) {
if (std::get<0>(entry) == target &&
protobuf_util::ProtobufEquals(std::get<1>(entry), window)) {
return std::get<2>(entry);
}
}
auto update_cache = [&](WindowedInputShardReturnValue result) {
cache.emplace_back(target, window, std::move(result));
return std::get<2>(cache.back());
};
VLOG(2) << "ReshardAsWindowedInput()\n"
<< "\twindow:" << window_util::ToString(window)
<< "\ttarget sharding:" << target.ToString();
CHECK(!target.IsTileMaximal());
auto partition_ordinals =
MakeTiledPartitionOrdinals(target, state_.partition_id, state_.b);
auto shard_shape = base_shape_;
std::vector<MultiplyAddDivideOffsetCalculation> start_on_padded_calculations(
base_shape_.rank());
std::vector<MultiplyAddDivideOffsetCalculation> limit_on_padded_calculations(
base_shape_.rank());
std::vector<HloInstruction*> dynamic_slice_offset_on_output(
base_shape_.rank(), nullptr);
Window shard_window = window;
Shape padded_shape = base_shape_;
std::vector<HloInstruction*> offsets_on_padded_shape(base_shape_.rank());
std::vector<int64_t> per_shard_window_counts(base_shape_.rank());
std::vector<int64_t> explicit_left_padding(base_shape_.rank(), 0);
std::vector<int64_t> trimmed_target_sharding_tile_shape(base_shape_.rank());
std::vector<std::pair<int64_t, int64_t>> trimmed_target_sharding_middle_range(
base_shape_.rank(), std::pair<int64_t, int64_t>(-1, -1));
bool trimmed_shards = false;
std::vector<int64_t> dims_needs_pre_masking;
Shape halo_exchange_base_shape = base_shape_;
bool trimmed_in_shard = false;
std::vector<int64_t> pre_halo_exchange_slice_starts(base_shape_.rank(), 0);
std::vector<int64_t> pre_halo_exchange_slice_limits(
hlo_->shape().dimensions().begin(), hlo_->shape().dimensions().end());
std::vector<bool> can_leave_dimension_partitioned(base_shape_.rank(), false);
for (int64_t i = 0; i < base_shape_.rank(); ++i) {
can_leave_dimension_partitioned[i] =
window_util::IsTrivialWindowDimension(window.dimensions(i));
}
for (int64_t i = 0; i < base_shape_.rank(); ++i) {
int64_t shard_count = target.tile_assignment().dim(i);
trimmed_target_sharding_tile_shape[i] = shard_count;
if (shard_count == 1) {
offsets_on_padded_shape[i] = state_.b->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));
shard_shape.set_dimensions(
i, CeilOfRatio(base_shape_.dimensions(i), shard_count));
continue;
}
if (can_leave_dimension_partitioned[i]) {
int64_t shard_size = CeilOfRatio(base_shape_.dimensions(i), shard_count);
padded_shape.set_dimensions(i, shard_size * shard_count);
offsets_on_padded_shape[i] =
state_.b->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(S32, {}), HloOpcode::kMultiply,
partition_ordinals[i],
state_.b->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(shard_size)))));
shard_shape.set_dimensions(i, shard_size);
continue;
}
const WindowDimension& wd = window.dimensions(i);
WindowDimension* swd = shard_window.mutable_dimensions(i);
const int64_t dilated_size = 1 + (wd.size() - 1) * wd.window_dilation();
const int64_t full_size =
1 + (base_shape_.dimensions(i) - 1) * wd.base_dilation() +
wd.padding_high() + wd.padding_low();
int64_t window_count = (full_size - dilated_size) / wd.stride() + 1;
per_shard_window_counts[i] = CeilOfRatio(window_count, shard_count);
int64_t input_shard_size = hlo_->shape().dimensions(i);
if (window_count < shard_count && wd.window_dilation() == 1 &&
wd.base_dilation() == 1) {
int64_t useful_input_shards = CeilOfRatio(
base_shape_.dimensions(i) + wd.padding_high(), input_shard_size);
if (useful_input_shards < shard_count) {
shard_count = std::max<int64_t>(useful_input_shards, window_count);
trimmed_shards = true;
trimmed_target_sharding_tile_shape[i] = shard_count;
if (shard_count == 1) {
offsets_on_padded_shape[i] = state_.b->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));
swd->set_padding_high(base_shape_.dimensions(i) + wd.padding_high() -
hlo_->shape().dimensions(i));
continue;
}
halo_exchange_base_shape.set_dimensions(i,
input_shard_size * shard_count);
if (input_shard_size * shard_count > base_shape_.dimensions(i) &&
wd.padding_high() > 0) {
dims_needs_pre_masking.push_back(i);
} else if (wd.padding_high() < 0 &&
full_size - wd.padding_low() < input_shard_size) {
input_shard_size = full_size - wd.padding_low();
halo_exchange_base_shape.set_dimensions(
i, input_shard_size * shard_count);
pre_halo_exchange_slice_limits[i] = input_shard_size;
trimmed_in_shard = true;
}
}
}
explicit_left_padding[i] = wd.padding_low() / wd.base_dilation();
swd->set_padding_low(wd.padding_low() % wd.base_dilation());
swd->set_padding_high(0);
if (window_count < shard_count && wd.window_dilation() == 1 &&
wd.base_dilation() == 1) {
int64_t middle_empty_shards =
(-explicit_left_padding[i]) / input_shard_size - window_count;
if (middle_empty_shards > 0) {
shard_count -= middle_empty_shards;
CHECK_GT(shard_count, 1);
trimmed_target_sharding_middle_range[i].first = window_count;
trimmed_target_sharding_middle_range[i].second = middle_empty_shards;
trimmed_shards = true;
trimmed_target_sharding_tile_shape[i] = shard_count;
explicit_left_padding[i] += middle_empty_shards * input_shard_size;
halo_exchange_base_shape.set_dimensions(i,
input_shard_size * shard_count);
HloInstruction* ordinal = partition_ordinals[i];
HloInstruction* left_count = CreateR0WithType<int32_t>(
ordinal->shape().element_type(), window_count, state_.b);
HloInstruction* on_left =
state_.b->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::ChangeElementType(ordinal->shape(), PRED), ordinal,
left_count, ComparisonDirection::kLt));
HloInstruction* right_ordinal =
state_.b->AddInstruction(HloInstruction::CreateBinary(
ordinal->shape(), HloOpcode::kSubtract, ordinal, left_count));
partition_ordinals[i] =
state_.b->AddInstruction(HloInstruction::CreateTernary(
partition_ordinals[i]->shape(), HloOpcode::kSelect, on_left,
partition_ordinals[i], right_ordinal));
if (-explicit_left_padding[i] > input_shard_size * (shard_count - 1)) {
int64_t skip_amount =
-explicit_left_padding[i] - input_shard_size * (shard_count - 1);
input_shard_size -= skip_amount;
explicit_left_padding[i] += skip_amount * shard_count;
pre_halo_exchange_slice_starts[i] = skip_amount;
trimmed_in_shard = true;
if (full_size < input_shard_size) {
skip_amount = input_shard_size - full_size;
pre_halo_exchange_slice_limits[i] -= skip_amount;
explicit_left_padding[i] += skip_amount * (shard_count - 1);
input_shard_size = full_size;
}
halo_exchange_base_shape.set_dimensions(
i, input_shard_size * shard_count);
}
}
}
if (full_size < dilated_size) {
VLOG(2) << "Failed to reshard window operand because the window size is "
"larger than padded base size";
return std::nullopt;
}
if (wd.stride() != 1 &&
(wd.stride() * per_shard_window_counts[i]) % wd.base_dilation() != 0) {
VLOG(2) << "Failed to reshard window operand due to non-trivial dilation";
return std::nullopt;
}
start_on_padded_calculations[i] = MultiplyAddDivideOffsetCalculation(
wd.stride() * per_shard_window_counts[i],
wd.base_dilation() - 1 - swd->padding_low(), wd.base_dilation());
int64_t dilated_shard_size =
wd.stride() * (per_shard_window_counts[i] - 1) + dilated_size;
limit_on_padded_calculations[i] = MultiplyAddDivideOffsetCalculation(
wd.stride() * per_shard_window_counts[i],
dilated_shard_size + wd.base_dilation() - 1 - swd->padding_low(),
wd.base_dilation());
offsets_on_padded_shape[i] = start_on_padded_calculations[i].Calculate(
partition_ordinals[i], state_.b);
auto shard_size_function =
limit_on_padded_calculations[i] - start_on_padded_calculations[i];
int64_t max_shard_size = shard_size_function.MaxInRange(0, shard_count);
shard_shape.set_dimensions(i, max_shard_size);
padded_shape.set_dimensions(
i, limit_on_padded_calculations[i].Calculate(shard_count - 1));
if (wd.base_dilation() != 1) {
auto get_first_valid_element_offset_on_dilated_shard =
[&](int64_t shard_ordinal) {
return start_on_padded_calculations[i].Calculate(shard_ordinal) *
wd.base_dilation() +
swd->padding_low() -
wd.stride() * per_shard_window_counts[i] * shard_ordinal;
};
CHECK_EQ(get_first_valid_element_offset_on_dilated_shard(0),
swd->padding_low());
for (int64_t shard_ordinal = 0; shard_ordinal < shard_count;
++shard_ordinal) {
int64_t wanted_limit_on_dilated_shard =
wd.stride() * (per_shard_window_counts[i] - 1) + dilated_size;
int64_t actual_limit_on_dilated_shard_without_pad_high =
get_first_valid_element_offset_on_dilated_shard(shard_ordinal) +
(max_shard_size - 1) * wd.base_dilation() + 1;
swd->set_padding_high(std::max<int64_t>(
swd->padding_high(),
wanted_limit_on_dilated_shard -
actual_limit_on_dilated_shard_without_pad_high));
}
if (wd.stride() == 1) {
int64_t max_pad_low =
get_first_valid_element_offset_on_dilated_shard(0);
bool all_same = true;
for (int64_t shard_ordinal = 1; shard_ordinal < shard_count;
++shard_ordinal) {
int64_t start =
get_first_valid_element_offset_on_dilated_shard(shard_ordinal);
if (start != swd->padding_low()) {
all_same = false;
}
max_pad_low = std::max(max_pad_low, start);
}
if (!all_same) {
auto start_on_padded_input =
start_on_padded_calculations[i].Calculate(partition_ordinals[i],
state_.b);
auto first_window_minus_max_pad_low =
MultiplyAddDivideOffsetCalculation(
wd.base_dilation(), swd->padding_low() - max_pad_low, 1)
.Calculate(start_on_padded_input, state_.b);
auto required_first_window =
MultiplyAddDivideOffsetCalculation(per_shard_window_counts[i], 0,
1)
.Calculate(partition_ordinals[i], state_.b);
dynamic_slice_offset_on_output[i] =
state_.b->AddInstruction(HloInstruction::CreateBinary(
required_first_window->shape(), HloOpcode::kSubtract,
required_first_window, first_window_minus_max_pad_low));
}
swd->set_padding_low(max_pad_low);
} else {
if ((wd.stride() * per_shard_window_counts[i]) % wd.base_dilation() !=
0) {
return std::nullopt;
}
}
}
}
auto get_dynamic_slice_offset_on_output_if_needed =
[&]() -> std::optional<std::vector<HloInstruction*>> {
if (absl::c_all_of(
dynamic_slice_offset_on_output,
[](HloInstruction* offset) { return offset == nullptr; })) {
return std::nullopt;
}
auto zero = state_.b->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));
for (int64_t i = 0; i < dynamic_slice_offset_on_output.size(); ++i) {
if (dynamic_slice_offset_on_output[i] == nullptr) {
dynamic_slice_offset_on_output[i] = zero;
}
}
return dynamic_slice_offset_on_output;
};
auto handle_all_windowed_dimensions_are_replicated = [&]() {
PaddingConfig padding_config;
auto pad_hlo_shape = padded_shape;
for (int64_t i = 0; i < base_shape_.rank(); ++i) {
auto padding_config_dim = padding_config.add_dimensions();
padding_config_dim->set_interior_padding(0);
if (target.tile_assignment().dim(i) == 1 ||
(can_leave_dimension_partitioned[i] && !sharding().IsReplicated())) {
padding_config_dim->set_edge_padding_low(0);
padding_config_dim->set_edge_padding_high(0);
pad_hlo_shape.set_dimensions(i, hlo_->shape().dimensions(i));
} else {
padding_config_dim->set_edge_padding_low(explicit_left_padding[i]);
padding_config_dim->set_edge_padding_high(padded_shape.dimensions(i) -
explicit_left_padding[i] -
base_shape_.dimensions(i));
}
}
auto padded_hlo =
ShapeUtil::Compatible(pad_hlo_shape, base_shape_)
? hlo_
: state_.b->AddInstruction(HloInstruction::CreatePad(
pad_hlo_shape, hlo_, pad_value, padding_config));
auto sharded_input =
state_.b->AddInstruction(HloInstruction::CreateDynamicSlice(
shard_shape, padded_hlo, offsets_on_padded_shape,
shard_shape.dimensions()));
return update_cache(WindowedInputShardReturnValue{
sharded_input, shard_window,
get_dynamic_slice_offset_on_output_if_needed()});
};
auto sharding_with_windowed_dims_replicated =
GetShardingReplicatedOnWindowedDimension(target, window);
if (sharding().IsReplicated() ||
(target != sharding() &&
sharding_with_windowed_dims_replicated == sharding())) {
return handle_all_windowed_dimensions_are_replicated();
}
if (target != sharding() &&
sharding_with_windowed_dims_replicated != sharding()) {
return Reshard(target).ReshardAsWindowedInput(window, target, pad_value);
}
if (Product(trimmed_target_sharding_tile_shape) == 1) {
return update_cache(WindowedInputShardReturnValue{
hlo_, shard_window, get_dynamic_slice_offset_on_output_if_needed()});
}
if (target.ReplicateOnLastTileDim()) {
trimmed_target_sharding_tile_shape.push_back(
target.tile_assignment().dimensions().back());
}
std::optional<HloSharding> trimmed_target;
const HloSharding* halo_exchange_target = ⌖
if (trimmed_shards) {
Array<int64_t> trimmed_devices(trimmed_target_sharding_tile_shape);
trimmed_devices.Each([&](absl::Span<const int64_t> indices, int64_t* d) {
std::vector<int64_t> target_indices(indices.begin(), indices.end());
for (int64_t i = 0; i < base_shape_.rank(); ++i) {
const auto& range = trimmed_target_sharding_middle_range[i];
if (range.first >= 0 && indices[i] >= range.first) {
target_indices[i] += range.second;
}
}
*d = target.tile_assignment()(target_indices);
});
trimmed_target = target.ReplicateOnLastTileDim()
? HloSharding::PartialTile(trimmed_devices)
: HloSharding::Tile(trimmed_devices);
halo_exchange_target = &*trimmed_target;
}
HloInstruction* visiting_hlo = hlo_;
if (!dims_needs_pre_masking.empty()) {
std::vector<int64_t> skipped_dims;
for (int dim = 0; dim < base_shape_.rank(); ++dim) {
if (!absl::c_linear_search(dims_needs_pre_masking, dim)) {
skipped_dims.push_back(dim);
}
}
visiting_hlo = PadWithValueHlo(pad_value, {},
skipped_dims);
}
if (trimmed_in_shard) {
std::vector<int64_t> slice_sizes(halo_exchange_base_shape.rank());
for (int64_t i = 0; i < slice_sizes.size(); ++i) {
slice_sizes[i] =
pre_halo_exchange_slice_limits[i] - pre_halo_exchange_slice_starts[i];
}
visiting_hlo = state_.b->AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(halo_exchange_base_shape.element_type(),
slice_sizes),
visiting_hlo,
pre_halo_exchange_slice_starts,
pre_halo_exchange_slice_limits,
std::vector<int64_t>(halo_exchange_base_shape.rank(), 1)));
}
for (int dim = 0; dim < base_shape_.rank(); ++dim) {
int64_t shard_count = halo_exchange_target->tile_assignment().dim(dim);
if (shard_count == 1 || can_leave_dimension_partitioned[dim]) {
continue;
}
int64_t input_shard_size =
CeilOfRatio(halo_exchange_base_shape.dimensions(dim), shard_count);
MultiplyAddDivideOffsetCalculation shard_limit_of_previous_on_padded(
input_shard_size, explicit_left_padding[dim], 1);
OffsetCalculation left_halo_size_functions =
shard_limit_of_previous_on_padded - start_on_padded_calculations[dim];
MultiplyAddDivideOffsetCalculation shard_start_of_next_on_padded(
input_shard_size, input_shard_size + explicit_left_padding[dim], 1);
OffsetCalculation right_halo_size_functions =
limit_on_padded_calculations[dim] - shard_start_of_next_on_padded;
auto resharded = ExchangeHaloAndGetValidData(
visiting_hlo, halo_exchange_base_shape, left_halo_size_functions,
right_halo_size_functions, explicit_left_padding[dim],
padded_shape.dimensions(dim), shard_shape.dimensions(dim), dim,
*halo_exchange_target, offsets_on_padded_shape[dim], pad_value,
partition_ordinals[dim], state_.collective_ops_creator,
state_.next_channel_id, state_.b, mask_invalid_region,
force_mask_in_compact);
if (!resharded) {
VLOG(1) << "ReshardAsWindowedInput failed without replicate first: halo "
"is beyond the neighbor.";
if (sharding_with_windowed_dims_replicated == sharding()) {
return handle_all_windowed_dimensions_are_replicated();
}
return Reshard(sharding_with_windowed_dims_replicated)
.ReshardAsWindowedInput(window, target, pad_value);
}
visiting_hlo = *resharded;
}
return update_cache(WindowedInputShardReturnValue{
visiting_hlo, shard_window,
get_dynamic_slice_offset_on_output_if_needed()});
}
PartitionedHlo PartitionedHlo::Replicate() const {
auto& cache = state_.reshard_cache->per_hlo_cache[hlo()].reshard_cache;
if (state_.partitioner->options().cache_all_gather) {
for (auto& entry : cache) {
if (entry.first.IsReplicated()) {
return entry.second;
}
}
}
const HloSharding sharding = hlo_->sharding();
const Shape& shape = hlo_->shape();
CHECK(!shape.IsTuple() && shape.element_type() != TOKEN);
if (sharding.IsReplicated()) {
return *this;
}
for (auto& entry : cache) {
if (entry.first.IsReplicated()) {
return entry.second;
}
}
auto update_cache = [&](PartitionedHlo resharded) {
state_.reshard_cache->per_hlo_cache[resharded.hlo()]
.reshard_cache.insert_or_assign(sharding, *this);
auto& cache = state_.reshard_cache->per_hlo_cache[hlo()].reshard_cache;
if (state_.partitioner->options().cache_all_gather) {
auto [it, _] = cache.insert_or_assign(HloSharding::Replicate(),
std::move(resharded));
return it->second;
}
return resharded;
};
if (sharding.IsTileMaximal()) {
return update_cache(Broadcast());
}
std::vector<int64_t> all_dims(shape.rank());
std::iota(all_dims.begin(), all_dims.end(), 0);
HloInstruction* result = ReplicatePartial(all_dims);
result->set_sharding(HloSharding::Replicate());
return update_cache(PartitionedHlo(result, base_shape_, state_));
}
HloInstruction* PartitionedHlo::ReplicatePartial(
absl::Span<const int64_t> dims) const {
CHECK(!sharding().IsTileMaximal());
const Shape& shard_shape = hlo()->shape();
Shape final_result_shape = shard_shape;
Shape ag_result_shape = shard_shape;
std::vector<int64_t> broadcast_dims;
std::vector<int64_t> dus_ar_dims;
std::vector<int64_t> ag_dims;
for (int64_t i : dims) {
int64_t partitions = sharding().tile_assignment().dim(i);
if (partitions == 1) {
continue;
}
final_result_shape.set_dimensions(i, base_shape().dimensions(i));
if (base_shape().dimensions(i) == shard_shape.dimensions(i)) {
broadcast_dims.push_back(i);
} else if (base_shape().dimensions(i) <= partitions / 2) {
dus_ar_dims.push_back(i);
} else {
ag_result_shape.set_dimensions(i, base_shape().dimensions(i));
ag_dims.push_back(i);
}
}
HloInstruction* broadcast = hlo_;
if (!broadcast_dims.empty()) {
std::vector<int64_t> other_dims;
for (int64_t i = 0; i < sharding().tile_assignment().num_dimensions();
++i) {
if (!absl::c_linear_search(broadcast_dims, i)) {
other_dims.push_back(i);
}
}
HloSharding original_sharding = sharding();
auto grouped =
hlo_sharding_util::GroupShardingOnDims(original_sharding, other_dims);
std::vector<int64_t> dev_indices(
grouped.sharding.tile_assignment().num_dimensions(), 0);
hlo_->set_sharding(HloSharding::AssignDevice(
grouped.sharding.tile_assignment()(dev_indices)));
auto per_group_partitioner_state = CreatePerGroupPartitioningState(
state(), grouped.device_groups, state().b);
auto partial_replicate_hlo =
PartitionedHlo(hlo_, shard_shape, per_group_partitioner_state)
.Broadcast();
hlo_->set_sharding(original_sharding);
partial_replicate_hlo.hlo()->clear_sharding();
broadcast = partial_replicate_hlo.hlo();
}
if (ag_dims.empty() && dus_ar_dims.empty()) {
return broadcast;
}
HloInstruction* result = nullptr;
if (state_.collective_ops_creator.create_cross_partition_all_gather) {
result = state_.partitioner->AllGatherShards(
state_.b, broadcast, sharding(), state_.next_channel_id, ag_dims,
state_.collective_ops_creator);
}
if (result == nullptr) {
dus_ar_dims.insert(dus_ar_dims.end(), ag_dims.begin(), ag_dims.end());
result = broadcast;
} else {
if (!ShapeUtil::Compatible(result->shape(), ag_result_shape)) {
std::vector<int64_t> start_indices(ag_result_shape.rank(), 0);
std::vector<int64_t> strides(ag_result_shape.rank(), 1);
result = state_.b->AddInstruction(
HloInstruction::CreateSlice(ag_result_shape, result, start_indices,
ag_result_shape.dimensions(), strides));
}
}
if (!dus_ar_dims.empty()) {
auto zero = state_.b->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(shard_shape.element_type())));
std::vector<int64_t> masking_dims;
for (int64_t dim : dus_ar_dims) {
if (shard_shape.dimensions(dim) * sharding().tile_assignment().dim(dim) !=
base_shape().dimensions(dim)) {
masking_dims.push_back(dim);
}
}
if (!masking_dims.empty()) {
std::vector<int64_t> skipped_dims;
for (int64_t i = 0; i < base_shape().rank(); ++i) {
if (!absl::c_linear_search(masking_dims, i)) {
skipped_dims.push_back(i);
}
}
result->copy_sharding(hlo_);
result = PartitionedHlo(result, final_result_shape, state_)
.PadWithValue(zero,
{},
skipped_dims)
.hlo();
}
auto zero_bcast = state_.b->AddInstruction(
HloInstruction::CreateBroadcast(final_result_shape, zero, {}));
auto offsets = MakePartitionOffsets(
final_result_shape,
hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(
sharding(), dus_ar_dims),
state_.partition_id, state_.b, dus_ar_dims);
auto dus =
state_.b->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
final_result_shape, zero_bcast, result, offsets));
HloComputation* reduction =
MakeBinaryAdd(shard_shape.element_type(), state_.module);
result = state_.partitioner->AllReduceAlongShardingDims(
state_.b, dus, sharding(), state_.next_channel_id, dus_ar_dims,
state_.collective_ops_creator, reduction);
}
return result;
}
std::optional<PartitionedHlo>
PartitionedHlo::ReshardToPartialReplicateWithAllGather(
const HloSharding& target) const {
if (!target.ReplicateOnLastTileDim()) {
return std::nullopt;
}
auto compatible_sharding =
PartialReplicateReshardCompatibleSharding(target, sharding());
if (!compatible_sharding.has_value()) {
return std::nullopt;
}
const auto& temp_sharding = compatible_sharding.value();
auto partitioned_hlo = *this;
if (CanReshardWithCollectivePermute(sharding(), temp_sharding)) {
partitioned_hlo =
partitioned_hlo.ReshardWithCollectivePermute(temp_sharding);
}
int64_t rank = hlo_->shape().rank();
std::vector<int64_t> replicate_dims;
std::vector<int64_t> replicate_factors;
for (int64_t dim = 0; dim < rank; dim++) {
int64_t replicate_factor = temp_sharding.tile_assignment().dim(dim) /
target.tile_assignment().dim(dim);
if (replicate_factor > 1) {
replicate_dims.emplace_back(dim);
replicate_factors.emplace_back(replicate_factor);
}
}
auto halo_exchange = TileToPartialReplicateHaloExchange(
partitioned_hlo.hlo_, base_shape_, temp_sharding, target, replicate_dims,
partitioned_hlo.state().collective_ops_creator,
partitioned_hlo.state().next_channel_id,
partitioned_hlo.state().partition_id, partitioned_hlo.state().b);
if (!halo_exchange.has_value()) {
return std::nullopt;
}
auto halo_exchange_hlo = halo_exchange.value();
auto sharding_grouped = hlo_sharding_util::GroupShardingOnDims(
temp_sharding, replicate_dims, replicate_factors);
auto per_group_partitioner_state = CreatePerGroupPartitioningState(
partitioned_hlo.state(), sharding_grouped.device_groups,
partitioned_hlo.state().b);
auto base_shape = MakePartitionedShape(base_shape_, target);
auto original_sharding = partitioned_hlo.sharding();
halo_exchange_hlo->set_sharding(sharding_grouped.sharding);
auto partial_replicate_hlo = PartitionedHlo(halo_exchange_hlo, base_shape,
per_group_partitioner_state);
HloInstruction* result =
partial_replicate_hlo.ReplicatePartial(replicate_dims);
partitioned_hlo.hlo()->set_sharding(original_sharding);
result->set_sharding(target);
return PartitionedHlo(result, base_shape_, partitioned_hlo.state());
}
std::optional<PartitionedHlo>
PartitionedHlo::ReshardFromPartialReplicateWithDynamicSlice(
const HloSharding& target) const {
if (!sharding().ReplicateOnLastTileDim()) {
return std::nullopt;
}
auto target_compatible_sharding =
PartialReplicateReshardCompatibleSharding(sharding(), target);
if (!target_compatible_sharding.has_value()) {
return std::nullopt;
}
std::vector<int64_t> expand_tile_dims;
std::vector<int64_t> tiling_dim_factors;
int64_t rank = hlo_->shape().rank();
tiling_dim_factors.reserve(target.tile_assignment().num_dimensions());
const auto& temp_target_sharding = target_compatible_sharding.value();
for (int64_t dim = 0; dim < rank; dim++) {
if (temp_target_sharding.tile_assignment().dim(dim) >
sharding().tile_assignment().dim(dim)) {
expand_tile_dims.push_back(dim);
}
tiling_dim_factors.emplace_back(
temp_target_sharding.tile_assignment().dim(dim) /
sharding().tile_assignment().dim(dim));
}
if (target.ReplicateOnLastTileDim()) {
tiling_dim_factors.emplace_back(
target.tile_assignment().dimensions().back());
}
auto padded_hlo = PadFromPartialReplicateShape(
hlo_, base_shape_, sharding(), temp_target_sharding, expand_tile_dims,
state_.collective_ops_creator, state_.next_channel_id,
state_.partition_id, state_.b);
if (!padded_hlo.has_value()) {
return std::nullopt;
}
auto shard_shape = MakePartitionedShape(base_shape_, temp_target_sharding);
auto padded_base_shape = shard_shape;
for (int64_t i = 0; i < padded_base_shape.rank(); ++i) {
padded_base_shape.set_dimensions(
i, padded_base_shape.dimensions(i) *
temp_target_sharding.tile_assignment().dim(i));
}
auto offsets = MakePartitionOffsets(padded_base_shape, temp_target_sharding,
state_.partition_id, state_.b);
auto old_offsets = MakePartitionOffsets(padded_base_shape, sharding(),
state_.partition_id, state_.b);
for (int64_t i = 0; i < offsets.size(); ++i) {
offsets[i] = state_.b->AddInstruction(HloInstruction::CreateBinary(
offsets[i]->shape(), HloOpcode::kSubtract, offsets[i], old_offsets[i]));
}
auto slice = state_.b->AddInstruction(HloInstruction::CreateDynamicSlice(
shard_shape, padded_hlo.value(), offsets, shard_shape.dimensions()));
slice->set_sharding(temp_target_sharding);
auto result = PartitionedHlo(slice, base_shape_, state_);
if (CanReshardWithCollectivePermute(temp_target_sharding, target)) {
return result.ReshardWithCollectivePermute(target);
}
return result;
}
PartitionedHlo PartitionedHlo::Broadcast() const {
const Shape& shape = hlo_->shape();
const HloSharding& sharding = hlo_->sharding();
CHECK(sharding.HasUniqueDevice());
CHECK(!shape.IsTuple() && shape.element_type() != TOKEN);
auto src_core_id = state_.b->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<uint32_t>(sharding.GetUniqueDevice())));
Shape bcast_shape = ShapeUtil::ChangeElementType(shape, PRED);
auto is_src_core = state_.b->AddInstruction(HloInstruction::CreateBroadcast(
bcast_shape,
state_.b->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), state_.partition_id, src_core_id,
ComparisonDirection::kEq)),
{}));
auto zero = state_.b->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(shape.element_type())));
auto zero_bcast = state_.b->AddInstruction(
HloInstruction::CreateBroadcast(shape, zero, {}));
auto operand = state_.b->AddInstruction(HloInstruction::CreateTernary(
shape, HloOpcode::kSelect, is_src_core, hlo(), zero_bcast));
HloComputation* reduction =
MakeBinaryAdd(shape.element_type(), state_.module);
auto result = state_.collective_ops_creator.create_cross_partition_all_reduce(
state_.b, operand, reduction, {}, NewChannel());
result->set_sharding(HloSharding::Replicate());
return PartitionedHlo(result, base_shape_, state_);
}
PartitionedHlo PartitionedHlo::ReshardWithAllToAll(
const HloSharding& target,
absl::Span<const std::pair<int64_t, int64_t>> source_target_dims) const {
if (source_target_dims.empty()) {
if (target == sharding()) {
return *this;
}
return ReshardWithCollectivePermute(target);
}
VLOG(5) << "Source: " << sharding().ToString();
VLOG(5) << "Target: " << target.ToString();
int64_t source_dim = source_target_dims[0].first;
int64_t target_dim = source_target_dims[0].second;
const int64_t group_size = sharding().tile_assignment().dim(source_dim) /
sharding().tile_assignment().dim(target_dim);
VLOG(5) << "Group size: " << group_size;
auto temp_target_tile = [&] {
auto& original_tile_assignment = sharding().tile_assignment();
std::vector<int64_t> reshape_tile_dims(
original_tile_assignment.num_dimensions() + 2);
int64_t i = 0;
int64_t added_source_dim = -1;
int64_t added_target_dim = -1;
for (int64_t j = 0; j < original_tile_assignment.num_dimensions(); ++j) {
if (source_dim == j) {
reshape_tile_dims[i] = original_tile_assignment.dim(j) / group_size;
reshape_tile_dims[++i] = group_size;
added_source_dim = i;
} else if (target_dim == j) {
reshape_tile_dims[i] = original_tile_assignment.dim(j);
reshape_tile_dims[++i] = 1;
added_target_dim = i;
} else {
reshape_tile_dims[i] = original_tile_assignment.dim(j);
}
++i;
}
VLOG(5) << "Added target: " << added_target_dim;
VLOG(5) << "Added source: " << added_source_dim;
std::vector<int64_t> xpose_dims(reshape_tile_dims.size());
std::iota(xpose_dims.begin(), xpose_dims.end(), 0);
xpose_dims[added_source_dim] = added_target_dim;
xpose_dims[added_target_dim] = added_source_dim;
auto temp_target_tile =
hlo_sharding_util::TransposeSharding(
HloSharding::Tile(
original_tile_assignment.Reshape(reshape_tile_dims)),
xpose_dims)
.tile_assignment();
VLOG(5) << "Transposed target: " << temp_target_tile.ToString();
std::vector<int64_t> temp_target_tile_dims(
sharding().tile_assignment().dimensions().begin(),
sharding().tile_assignment().dimensions().end());
temp_target_tile_dims[source_dim] =
sharding().tile_assignment().dim(target_dim);
temp_target_tile_dims[target_dim] =
sharding().tile_assignment().dim(source_dim);
return temp_target_tile.Reshape(temp_target_tile_dims);
}();
auto temp_target = target.ReplicateOnLastTileDim()
? HloSharding::PartialTile(temp_target_tile)
: HloSharding::Tile(temp_target_tile);
VLOG(5) << "Temp target sharding: " << temp_target.ToString();
auto padded_shape = hlo_->shape();
auto padded_base_shape = base_shape_;
auto current_base_padded_shape = base_shape_;
padded_base_shape.set_dimensions(
target_dim, RoundUpTo(base_shape_.dimensions(target_dim),
temp_target.tile_assignment().dim(target_dim)));
current_base_padded_shape.set_dimensions(
target_dim, hlo_->shape().dimensions(target_dim) *
sharding().tile_assignment().dim(target_dim));
auto padded_source_base_shape = base_shape_;
auto current_source_base_padded_shape = base_shape_;
padded_source_base_shape.set_dimensions(
source_dim, RoundUpTo(base_shape_.dimensions(source_dim),
temp_target.tile_assignment().dim(source_dim)));
current_source_base_padded_shape.set_dimensions(
source_dim, hlo_->shape().dimensions(source_dim) *
sharding().tile_assignment().dim(source_dim));
VLOG(5) << "Target dim: " << target_dim;
VLOG(5) << "Source dim: " << source_dim;
VLOG(5) << "Original sharded shape: " << hlo_->shape();
VLOG(5) << "Base shape: " << base_shape_.ToString();
VLOG(5) << "Padded base shape: " << padded_base_shape.ToString();
VLOG(5) << "Current padded shape: " << current_base_padded_shape.ToString();
VLOG(5) << "Padded source base shape: "
<< padded_source_base_shape.ToString();
VLOG(5) << "Current source padded shape: "
<< current_source_base_padded_shape.ToString();
VLOG(5) << "Dimension padded target_dim: "
<< hlo_->shape().dimensions(target_dim) *
sharding().tile_assignment().dim(target_dim);
CHECK_GE(padded_base_shape.rank(), current_base_padded_shape.rank());
CHECK_LE(padded_source_base_shape.rank(),
current_source_base_padded_shape.rank());
PaddingConfig pc;
for (int64_t i = 0; i < hlo_->shape().rank(); ++i) {
auto* pd = pc.add_dimensions();
pd->set_edge_padding_low(0);
pd->set_edge_padding_high(padded_base_shape.dimensions(i) -
current_base_padded_shape.dimensions(i));
pd->set_interior_padding(0);
}
PartitionedHlo p_hlo = *this;
VLOG(5) << "Before reshard: " << p_hlo.hlo_->ToString();
HloInstruction* zero = CreateZero(
ShapeUtil::MakeShape(hlo_->shape().element_type(), {}), state_.b);
HloSharding sharding_copy = sharding();
auto padded_phlo =
ReshardDataForPad(zero, pc, p_hlo, sharding_copy, state_.b);
CHECK(padded_phlo.has_value());
VLOG(5) << "Resharded: " << padded_phlo->sharded_input->ToString();
VLOG(5) << "Padded Window: " << padded_phlo->shard_window.DebugString();
HloInstruction* padded_hlo =
PadDataFromWindowReshard(*padded_phlo, zero, state_.b);
VLOG(5) << "Padded data: " << padded_hlo->ToString();
std::vector<std::vector<int64_t>> groups(
temp_target.tile_assignment().num_elements() / group_size);
temp_target.tile_assignment().Each(
[&](absl::Span<const int64_t> indices, int64_t device) {
int64_t group_id = 0;
for (int64_t dim = 0; dim < indices.size(); ++dim) {
if (dim == target_dim) {
group_id *= temp_target.tile_assignment().dim(dim) / group_size;
group_id += indices[dim] / group_size;
} else {
group_id *= temp_target.tile_assignment().dim(dim);
group_id += indices[dim];
}
}
groups[group_id].push_back(device);
});
HloInstruction* result = nullptr;
std::vector<int64_t> dimensions;
const int64_t rank = base_shape_.rank();
dimensions.reserve(rank + 1);
for (int64_t i = 0; i < rank; ++i) {
if (i == target_dim) {
dimensions.push_back(group_size);
dimensions.push_back(padded_hlo->shape().dimensions(i) / group_size);
} else {
dimensions.push_back(padded_hlo->shape().dimensions(i));
}
}
VLOG(5) << "Target ata shape: "
<< ShapeUtil::MakeShape(base_shape_.element_type(), dimensions)
.ToString();
auto reshape = state_.b->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(base_shape_.element_type(), dimensions),
padded_hlo));
auto all_to_all =
state_.collective_ops_creator.create_cross_partition_all_to_all(
state_.b, {reshape}, groups, (*state_.next_channel_id)++, target_dim);
int64_t new_source_dim =
(target_dim < source_dim) ? source_dim + 1 : source_dim;
std::vector<int64_t> permutation;
for (int64_t i = 0; i < all_to_all->shape().rank(); ++i) {
if (i == target_dim) {
continue;
}
if (i == new_source_dim) {
permutation.push_back(target_dim);
}
permutation.push_back(i);
}
auto transpose = state_.b->AddInstruction(HloInstruction::CreateTranspose(
ShapeInference::InferTransposeShape(all_to_all->shape(), permutation)
.value(),
all_to_all, permutation));
auto new_shape = ShapeInference::InferAllToAllShape(
padded_hlo->shape(), target_dim, source_dim, group_size)
.value();
result = state_.b->AddInstruction(
HloInstruction::CreateReshape(new_shape, transpose));
result->set_sharding(temp_target);
std::vector<int64_t> strides(result->shape().rank(), 1);
std::vector<int64_t> starts(result->shape().rank(), 0);
std::vector<int64_t> limits(result->shape().rank());
for (int64_t i = 0; i < result->shape().rank(); ++i) {
limits[i] = padded_source_base_shape.dimensions(i);
}
auto sliced_phlo = ReshardDataForSlicing(
strides, starts, limits,
PartitionedHlo(result, current_source_base_padded_shape, state_),
temp_target, state_.b);
CHECK(sliced_phlo.has_value());
result = SliceDataFromWindowReshard(*sliced_phlo, strides, base_shape_,
temp_target, state_.b);
result->set_sharding(temp_target);
auto remaining_source_target_dims = source_target_dims;
remaining_source_target_dims.remove_prefix(1);
return PartitionedHlo(result, base_shape_, state_)
.ReshardWithAllToAll(target, remaining_source_target_dims);
}
namespace {
std::optional<std::tuple<HloSharding, HloSharding, int64_t>>
PatternMatchMergeOrSplitSharding(const Shape& shape, const Shape& base_shape,
const HloSharding& source,
const HloSharding& target) {
if (!source.IsTiled() || !target.IsTiled()) {
return std::nullopt;
}
if (source.TiledDataRank() != target.TiledDataRank()) {
return std::nullopt;
}
if ((source.HasPartialReplication() ^ target.HasPartialReplication()) ||
(source.HasPartialReplication() &&
source.tile_assignment().dimensions()[source.TiledDataRank()] !=
target.tile_assignment().dimensions()[target.TiledDataRank()])) {
return std::nullopt;
}
std::vector<int64_t> diff_index;
for (int64_t i = 0; i < target.TiledDataRank(); ++i) {
if (source.tile_assignment().dim(i) != target.tile_assignment().dim(i)) {
diff_index.push_back(i);
}
}
if (diff_index.size() < 2) {
return std::nullopt;
}
for (int64_t diff_index_i = 0; diff_index_i < diff_index.size();
++diff_index_i) {
for (int64_t diff_index_j = diff_index_i + 1;
diff_index_j < diff_index.size(); ++diff_index_j) {
int64_t i = diff_index[diff_index_i];
int64_t j = diff_index[diff_index_j];
const std::vector<bool> is_one = {source.tile_assignment().dim(i) == 1,
source.tile_assignment().dim(j) == 1,
target.tile_assignment().dim(i) == 1,
target.tile_assignment().dim(j) == 1};
int64_t new_dim_size;
switch (std::count(is_one.begin(), is_one.end(), true)) {
case 1: {
if (source.tile_assignment().dim(i) *
source.tile_assignment().dim(j) !=
target.tile_assignment().dim(i) *
target.tile_assignment().dim(j)) {
continue;
}
if (source.tile_assignment().dim(i) == 1 ||
target.tile_assignment().dim(i) == 1) {
std::swap(i, j);
}
if (target.tile_assignment().dim(j) == 1) {
if (shape.dimensions(i) % source.tile_assignment().dim(j) != 0) {
continue;
}
new_dim_size = source.tile_assignment().dim(i);
} else {
if (base_shape.dimensions(i) % source.tile_assignment().dim(i) !=
0) {
continue;
}
new_dim_size = target.tile_assignment().dim(i);
}
break;
}
case 0: {
if (source.tile_assignment().dim(i) <
target.tile_assignment().dim(i)) {
std::swap(i, j);
}
if (source.tile_assignment().dim(i) !=
target.tile_assignment().dim(i) *
target.tile_assignment().dim(j)) {
continue;
}
if (base_shape.dimensions(i) % source.tile_assignment().dim(i) != 0) {
continue;
}
new_dim_size = target.tile_assignment().dim(i);
break;
}
default:
continue;
}
auto reshaped_sharding =
hlo_sharding_util::SplitShardingDimension(source, i, new_dim_size);
std::vector<int64_t> dimensions(
reshaped_sharding.tile_assignment().dimensions().begin(),
reshaped_sharding.tile_assignment().dimensions().end());
std::swap(dimensions[i + 1], dimensions[j + (j > i ? 1 : 0)]);
auto target_tile_assignment =
target.tile_assignment().Reshape(dimensions);
auto new_sharding =
source.HasPartialReplication()
? HloSharding::PartialTile(target_tile_assignment,
source.metadata())
: HloSharding::Tile(target_tile_assignment, source.metadata());
VLOG(10) << "Reshaped sharding before: " << reshaped_sharding.ToString();
VLOG(10) << "Reshaped sharding: " << new_sharding.ToString();
return std::make_tuple(std::move(reshaped_sharding),
std::move(new_sharding), i);
}
}
return std::nullopt;
}
std::optional<HloSharding> PatternMatchPartiallyReplicateDim(
const HloSharding& source, const HloSharding& target) {
if (!target.ReplicateOnLastTileDim()) {
return std::nullopt;
}
const int64_t target_replicated_dim = target.SubgroupReplicationDim();
const int64_t source_replicated_size =
source.HasPartialReplication()
? source.tile_assignment().dim(source.SubgroupReplicationDim())
: 1;
CHECK_NE(target_replicated_dim, -1) << "Expected replicated dim";
for (int i = 0; i < source.TiledDataRank(); ++i) {
if (source.tile_assignment().dim(i) == 1 ||
source.tile_assignment().dim(i) * source_replicated_size !=
target.tile_assignment().dim(target_replicated_dim)) {
continue;
}
auto replicated_sharding =
hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(source, {i});
return replicated_sharding;
}
return std::nullopt;
}
PartitionedHlo SplitReshapeHelper(const PartitionedHlo& to_reshape,
int64_t dim_to_split, int64_t dim_size,
const HloSharding& target_sharding) {
Shape original_shape = to_reshape.hlo()->shape();
std::vector<int64_t> shape_dim(original_shape.dimensions().begin(),
original_shape.dimensions().end());
shape_dim.insert(shape_dim.begin() + dim_to_split + 1, dim_size);
shape_dim[dim_to_split] /= dim_size;
std::vector<int64_t> base_shape_dim(
to_reshape.base_shape().dimensions().begin(),
to_reshape.base_shape().dimensions().end());
base_shape_dim.insert(
base_shape_dim.begin() + dim_to_split + 1,
dim_size * target_sharding.tile_assignment().dim(dim_to_split + 1));
base_shape_dim[dim_to_split] /=
dim_size * target_sharding.tile_assignment().dim(dim_to_split + 1);
Shape shape = ShapeUtil::MakeShape(original_shape.element_type(), shape_dim);
HloInstruction* reshaped_instr = to_reshape.state().b->AddInstruction(
HloInstruction::CreateReshape(shape, to_reshape.hlo()));
reshaped_instr->set_sharding(target_sharding);
return PartitionedHlo{
reshaped_instr,
ShapeUtil::MakeShape(to_reshape.base_shape().element_type(),
base_shape_dim),
to_reshape.state()};
}
PartitionedHlo MergeReshapeHelper(const PartitionedHlo& to_reshape,
int64_t dim_to_merge,
const HloSharding& target_sharding) {
Shape original_shape = to_reshape.hlo()->shape();
std::vector<int64_t> shape_dim(original_shape.dimensions().begin(),
original_shape.dimensions().end());
shape_dim[dim_to_merge] *= shape_dim[dim_to_merge + 1];
shape_dim.erase(shape_dim.begin() + dim_to_merge + 1);
std::vector<int64_t> base_shape_dim(
to_reshape.base_shape().dimensions().begin(),
to_reshape.base_shape().dimensions().end());
base_shape_dim[dim_to_merge] *= base_shape_dim[dim_to_merge + 1];
base_shape_dim.erase(base_shape_dim.begin() + dim_to_merge + 1);
Shape shape = ShapeUtil::MakeShape(original_shape.element_type(), shape_dim);
HloInstruction* reshaped_instr = to_reshape.state().b->AddInstruction(
HloInstruction::CreateReshape(shape, to_reshape.hlo()));
reshaped_instr->set_sharding(target_sharding);
return PartitionedHlo(
reshaped_instr,
ShapeUtil::MakeShape(original_shape.element_type(), base_shape_dim),
to_reshape.state());
}
}
std::optional<PartitionedHlo> PartitionedHlo::TryComplexReshardHandling(
const HloSharding& target) const {
VLOG(5) << "Trying to split complicated reshard: " << sharding().ToString()
<< " to " << target.ToString();
const bool is_source_partially_replicated =
sharding().ReplicateOnLastTileDim();
const bool is_target_partially_replicated = target.ReplicateOnLastTileDim();
if (auto reshape = PatternMatchMergeOrSplitSharding(
this->hlo()->shape(), this->base_shape(), sharding(), target)) {
auto& [before_sharding, new_reshaped_sharding, source_dim] = *reshape;
VLOG(10) << "Matched \"pattern_match_reshape()\": "
<< std::get<0>(*reshape).ToString();
VLOG(10) << "Original shape: " << hlo()->shape().ToString();
VLOG(10) << "Dim to split: " << std::get<1>(*reshape) << " size "
<< sharding().tile_assignment().dim(source_dim);
VLOG(10) << "Before sharding: " << before_sharding.ToString();
PartitionedHlo reshaped = SplitReshapeHelper(
*this, source_dim, this->hlo()->shape().dimensions(source_dim),
before_sharding);
auto reshard = reshaped.ReshardNoCache(new_reshaped_sharding,
std::nullopt,
false);
if (reshard.sharding() != new_reshaped_sharding) {
return std::nullopt;
}
auto reshaped_sharding = hlo_sharding_util::MergeShardingDimension(
reshard.sharding(), source_dim);
reshaped = MergeReshapeHelper(reshard, source_dim, reshaped_sharding);
if (reshaped.sharding() != target) {
reshaped = reshaped.ReshardNoCache(target, std::nullopt,
false);
if (reshaped.sharding() != target) {
return std::nullopt;
}
}
return reshaped;
}
if (auto intermediate_target =
PatternMatchPartiallyReplicateDim(sharding(), target)) {
VLOG(5) << "Matched \"pattern_match_partially_replicate_dim()\": "
<< intermediate_target->ToString();
auto intermediate_reshard = Reshard(*intermediate_target);
auto final_reshard = intermediate_reshard.ReshardNoCache(
target, std::nullopt, false);
if (final_reshard.sharding() != target) {
return std::nullopt;
}
return final_reshard;
}
if (is_source_partially_replicated && !is_target_partially_replicated) {
const int64_t partial_repl_amount =
sharding().tile_assignment().dimensions().back();
int64_t first_different_dimension = -1;
for (int64_t i = 0; i < target.tile_assignment().num_dimensions(); ++i) {
if (target.tile_assignment().dim(i) !=
sharding().tile_assignment().dim(i) &&
sharding().tile_assignment().dim(i) == 1 &&
target.tile_assignment().dim(i) % partial_repl_amount == 0) {
first_different_dimension = i;
break;
}
}
if (first_different_dimension == -1) {
return std::nullopt;
}
VLOG(5) << "Matched partially replicated to non partially replicated: "
<< sharding().ToString();
std::vector<int64_t> transpose_dims(
sharding().tile_assignment().num_dimensions(), 0);
std::iota(transpose_dims.begin(), transpose_dims.end(), 0);
std::swap(transpose_dims[first_different_dimension], transpose_dims.back());
auto intermediate_sharding =
hlo_sharding_util::TransposeSharding(sharding(), transpose_dims);
auto intermediate_reshard = Reshard(intermediate_sharding);
auto reshard = intermediate_reshard.ReshardNoCache(
target, std::nullopt, false);
if (reshard.sharding() != target) {
return std::nullopt;
}
return reshard;
}
return std::nullopt;
}
std::optional<PartitionedHlo>
PartitionedHlo::ReshardPartialReplicateWithAllToAll(
const HloSharding& target) const {
bool source_is_partial_replicate = sharding().ReplicateOnLastTileDim();
const auto& partial_replicate_sharding =
source_is_partial_replicate ? sharding() : target;
if (!partial_replicate_sharding.ReplicateOnLastTileDim()) {
return std::nullopt;
}
const auto& tile_sharding = source_is_partial_replicate ? target : sharding();
if (tile_sharding.ReplicateOnLastTileDim() || tile_sharding.IsTileMaximal()) {
return std::nullopt;
}
const int num_replicas =
partial_replicate_sharding.tile_assignment().dimensions().back();
if (((tile_sharding.tile_assignment().num_dimensions() + 1) !=
partial_replicate_sharding.tile_assignment().num_dimensions()) ||
(partial_replicate_sharding.tile_assignment().dim(0) != 1)) {
return std::nullopt;
}
int to_replicate_dim = -1;
for (int i = tile_sharding.tile_assignment().num_dimensions() - 1; i >= 0;
--i) {
if (tile_sharding.tile_assignment().dim(i) > 1 &&
(to_replicate_dim == -1)) {
if (tile_sharding.tile_assignment().dim(i) != num_replicas) {
return std::nullopt;
}
to_replicate_dim = i;
}
if (tile_sharding.tile_assignment().dim(i) !=
partial_replicate_sharding.tile_assignment().dim(i + 1)) {
return std::nullopt;
}
}
if (to_replicate_dim == -1) {
return std::nullopt;
}
auto reshape_tile_assignment =
partial_replicate_sharding.tile_assignment().Reshape(
tile_sharding.tile_assignment().dimensions());
if (reshape_tile_assignment != tile_sharding.tile_assignment()) {
return std::nullopt;
}
std::vector<int64_t> tmp_tile_assignment_dimensions(
tile_sharding.tile_assignment().dimensions().begin(),
tile_sharding.tile_assignment().dimensions().end());
tmp_tile_assignment_dimensions[to_replicate_dim] = 1;
tmp_tile_assignment_dimensions.push_back(num_replicas);
auto tmp_tile_assignment =
tile_sharding.tile_assignment().Reshape(tmp_tile_assignment_dimensions);
auto tmp_partial_replicate_sharding =
HloSharding::PartialTile(tmp_tile_assignment);
if (source_is_partial_replicate) {
if (auto src_tgt_dims = GetReshardAllToAllSourceTargetDims(
sharding(), tmp_partial_replicate_sharding)) {
auto partitioned_hlo =
ReshardWithAllToAll(tmp_partial_replicate_sharding, *src_tgt_dims);
return partitioned_hlo.Reshard(target);
}
} else {
auto partitioned_hlo = Reshard(tmp_partial_replicate_sharding);
if (auto src_tgt_dims = GetReshardAllToAllSourceTargetDims(
partitioned_hlo.sharding(), target)) {
return partitioned_hlo.ReshardWithAllToAll(target, *src_tgt_dims);
}
}
return std::nullopt;
}
PartitionedHlo PartitionedHlo::ReshardWithCollectivePermute(
const HloSharding& target) const {
CHECK(CanReshardWithCollectivePermute(sharding(), target))
<< sharding().ToString() << " to " << target.ToString();
if (auto broadcast_dims = state_.b->BroadcastDimsForCreatedHlo(hlo())) {
if (!(*broadcast_dims)->empty()) {
std::vector<int64_t> broadcast_dims_vector;
for (int64_t i = 0; i < hlo()->shape().rank(); ++i) {
if ((*broadcast_dims)->contains(i)) {
broadcast_dims_vector.push_back(i);
}
}
if (hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
sharding(), broadcast_dims_vector) ==
hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
target, broadcast_dims_vector)) {
auto copy = state_.b->AddInstruction(HloInstruction::CreateUnary(
hlo()->shape(), HloOpcode::kCopy, hlo()));
copy->set_sharding(target);
return PartitionedHlo(copy, base_shape_, state_);
}
}
}
std::vector<std::pair<int64_t, int64_t>> src_dst_pairs;
sharding().tile_assignment().Each(
[&](absl::Span<const int64_t> indices, int64_t src_device) {
int64_t dst_device = target.tile_assignment()(indices);
src_dst_pairs.emplace_back(src_device, dst_device);
});
auto cp =
state_.collective_ops_creator.create_cross_partition_collective_permute(
state_.b, hlo(), src_dst_pairs, (*state_.next_channel_id)++);
cp->set_sharding(target);
return PartitionedHlo(cp, base_shape_, state_);
}
SpmdPartitioningVisitor::SpmdPartitioningVisitor(
HloComputation* computation, int64_t num_partitions, int64_t num_replicas,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, SpmdLogger* logger,
SpmdPartitionerOptions options, SpmdPartitioner* partitioner,
const CallGraph& call_graph)
: changed_(false),
module_(computation->parent()),
num_partitions_(num_partitions),
num_replicas_(num_replicas),
collective_ops_creator_(collective_ops_creator),
next_channel_id_(next_channel_id),
b_(SpmdBuilder(absl::StrCat(computation->name(), "_spmd"),
nullptr)),
partition_id_(collective_ops_creator_.create_partition_id(&b_)),
logger_(logger),
options_(std::move(options)),
partitioner_(partitioner),
call_graph_(call_graph) {}
SpmdPartitioningVisitor::SpmdPartitioningVisitor(
const SpmdPartitioningVisitor& src)
: changed_(src.changed_),
module_(src.module_),
num_partitions_(src.num_partitions_),
num_replicas_(src.num_replicas_),
collective_ops_creator_(src.collective_ops_creator_),
next_channel_id_(src.next_channel_id_),
b_(absl::StrCat(module_->entry_computation()->name(), "_spmd"),
nullptr),
partition_id_(collective_ops_creator_.create_partition_id(&b_)),
logger_(src.logger_),
options_(src.options_),
partitioner_(src.partitioner_),
call_graph_(src.call_graph_) {}
std::unique_ptr<SpmdPartitioningVisitor> SpmdPartitioningVisitor::Clone()
const {
return std::make_unique<SpmdPartitioningVisitor>(*this);
}
PartitionedHlo::PartitioningState
SpmdPartitioningVisitor::MakePartitioningState() {
PartitionedHlo::PartitioningState state;
state.b = &b_;
state.module = module_;
state.num_replicas = num_replicas_;
state.next_channel_id = next_channel_id_;
state.reshard_cache = &reshard_cache_;
state.partitioner = partitioner_;
if (!device_groups_.empty()) {
state.collective_ops_creator = *visiting_collective_ops_creator_;
state.partition_id = *visiting_partition_id_;
return CreatePerGroupPartitioningState(state, device_groups_, &b_);
} else {
state.collective_ops_creator = collective_ops_creator_;
state.partition_id = partition_id_;
}
return state;
}
std::vector<ReplicaGroup> SpmdPartitioningVisitor::CreateReplicaGroups(
std::vector<std::vector<int64_t>>& groups) {
std::vector<ReplicaGroup> device_groups;
device_groups.reserve(groups.size() * num_replicas_);
for (int64_t i = 0; i < num_replicas_; ++i) {
for (const auto& group : groups) {
device_groups.emplace_back();
for (int64_t id : group) {
device_groups.back().add_replica_ids(i * num_partitions_ + id);
}
}
}
return device_groups;
}
absl::Status SpmdPartitioningVisitor::HandleCall(HloInstruction* hlo) {
std::vector<HloInstruction*> call_args;
HloComputation* computation = hlo->called_computations()[0];
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
computation->parameter_instruction(i)->set_sharding(
hlo->operand(i)->sharding());
call_args.push_back(GetPartitionedHlo(hlo->operand(i)).hlo());
}
TF_RETURN_IF_ERROR(partitioner_
->PartitionComputation(computation, hlo->sharding(),
next_channel_id_, logger_,
call_graph_)
.status());
SetPartitionedHlo(hlo, [&] {
auto* call = b_.AddInstruction(HloInstruction::CreateCall(
MakePartitionedShape(hlo->shape(), hlo->sharding()), call_args,
hlo->called_computations()[0]));
call->set_raw_backend_config_string(hlo->raw_backend_config_string());
return call;
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::DefaultAction(HloInstruction* hlo) {
if (hlo->HasSideEffect() && !hlo->sharding().HasUniqueDevice()) {
return Unimplemented("Side-effect ops cannot be replicated: %s",
hlo->ToString());
}
if (hlo->IsElementwise() && hlo->operand_count() > 0) {
return HandleElementwise(hlo);
}
if (!hlo->sharding().IsTileMaximal()) {
VLOG(1) << "Not partitioned in SPMD mode (DefaultAction):"
<< hlo->ToString();
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
VLOG(1) << " operand " << i
<< " sharding:" << hlo->operand(i)->sharding().ToString();
}
}
const HloSharding base_sharding = [&]() {
if (hlo->sharding().HasUniqueDevice()) {
return HloSharding::AssignDevice(hlo->sharding().GetUniqueDevice());
}
return HloSharding::Replicate();
}();
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : hlo->operands()) {
HloSharding operand_sharding =
base_sharding.NormalizeTupleSharding(operand->shape());
new_operands.push_back(
GetPartitionedHlo(operand).Reshard(operand_sharding).hlo());
}
auto clone =
b_.AddInstruction(hlo->CloneWithNewOperands(hlo->shape(), new_operands));
clone->set_sharding(base_sharding.NormalizeTupleSharding(clone->shape()));
SetPartitionedHlo(hlo,
PartitionedHlo(clone, hlo->shape(), MakePartitioningState())
.Reshard(hlo->sharding()));
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::Preprocess(HloInstruction* hlo) {
visiting_hlo_ = hlo;
b_.set_visiting_hlo(hlo);
auto manual_to_onedevice = [&](HloOpcode opcode, const Shape& shape,
const HloSharding& sharding) {
if (sharding.IsTuple()) {
std::vector<HloSharding> subshardings = sharding.tuple_elements();
for (HloSharding& subsharding : subshardings) {
if (subsharding.IsManual() && opcode != HloOpcode::kCustomCall) {
subsharding = HloSharding::AssignDevice(0);
}
}
return HloSharding::Tuple(shape, subshardings);
}
if (sharding.IsManual() && opcode != HloOpcode::kCustomCall &&
opcode != HloOpcode::kPartitionId) {
return HloSharding::AssignDevice(0);
}
return sharding;
};
if (hlo->opcode() != HloOpcode::kConditional &&
hlo->opcode() != HloOpcode::kTuple &&
hlo->opcode() != HloOpcode::kParameter &&
hlo->opcode() != HloOpcode::kWhile && hlo->opcode() != HloOpcode::kRng &&
hlo->opcode() != HloOpcode::kOutfeed &&
hlo->opcode() != HloOpcode::kAllReduce &&
hlo->opcode() != HloOpcode::kCall) {
const bool has_manual_sharding =
hlo->sharding().IsManual() ||
(hlo->sharding().IsTuple() &&
absl::c_any_of(
hlo->sharding().tuple_elements(),
[](const HloSharding& sharding) { return sharding.IsManual(); }));
if (has_manual_sharding && !hlo->IsCustomCall("SPMDFullToShardShape")) {
visiting_hlo_sharding_ = hlo->sharding();
auto get_sharding_shape = [](const HloInstruction* hlo) {
if (hlo->opcode() != HloOpcode::kOutfeed) {
return hlo->shape();
}
std::vector<Shape> operand_shapes(hlo->operand_count());
for (int i = 0; i < hlo->operand_count(); ++i) {
operand_shapes[i] = hlo->operand(i)->shape();
}
return ShapeUtil::MakeTupleShape(operand_shapes);
};
hlo->set_sharding(manual_to_onedevice(
hlo->opcode(), get_sharding_shape(hlo), *visiting_hlo_sharding_));
visiting_hlo_operand_shardings_.reserve(hlo->operand_count());
for (HloInstruction* operand : hlo->unique_operands()) {
visiting_hlo_operand_shardings_.push_back(operand->sharding());
operand->set_sharding(manual_to_onedevice(
hlo->opcode(), get_sharding_shape(operand), operand->sharding()));
GetPartitionedHlo(operand).hlo()->copy_sharding(operand);
}
} else {
const bool has_manual_subgroup =
hlo->sharding().IsManualSubgroup() ||
(hlo->sharding().IsTuple() &&
absl::c_any_of(hlo->sharding().tuple_elements(),
[](const HloSharding& sharding) {
return sharding.IsManualSubgroup();
}));
if (has_manual_subgroup && !hlo->IsCustomCall("SPMDFullToShardShape") &&
!hlo->IsCustomCall("SPMDShardToFullShape") &&
hlo->opcode() != HloOpcode::kGetTupleElement) {
auto get_grouped_sharding =
[&](const HloSharding& sharding, const Shape& shape,
const GroupedSharding* ref =
nullptr) -> absl::StatusOr<GroupedSharding> {
if (!sharding.IsTuple()) {
GroupedSharding grouped =
hlo_sharding_util::GetManualSubgroupSharding(sharding);
if (ref != nullptr) {
auto aligned =
AlignGroupsWithIfCompatible(std::move(grouped), *ref);
TF_RET_CHECK(aligned.has_value())
<< "Incompatible manual sharding at " << hlo->ToString();
return *aligned;
}
return grouped;
}
std::vector<HloSharding> elements;
elements.reserve(sharding.tuple_elements().size());
CHECK(!sharding.tuple_elements().empty());
GroupedSharding grouped0 =
hlo_sharding_util::GetManualSubgroupSharding(
sharding.tuple_elements()[0]);
if (ref != nullptr) {
auto aligned =
AlignGroupsWithIfCompatible(std::move(grouped0), *ref);
TF_RET_CHECK(aligned.has_value())
<< "Incompatible manual sharding at " << hlo->ToString();
grouped0 = std::move(*aligned);
}
elements.push_back(std::move(grouped0.sharding));
for (int64_t i = 1; i < sharding.tuple_elements().size(); ++i) {
auto grouped_i = AlignGroupsWithIfCompatible(
hlo_sharding_util::GetManualSubgroupSharding(
sharding.tuple_elements()[i]),
grouped0);
TF_RET_CHECK(grouped_i.has_value())
<< "Incompatible manual sharding between tuple elements: "
<< hlo->ToString();
elements.push_back(std::move(grouped_i->sharding));
}
grouped0.sharding = HloSharding::Tuple(shape, elements);
return grouped0;
};
TF_ASSIGN_OR_RETURN(
auto group_sharding,
get_grouped_sharding(hlo->sharding(), hlo->shape()));
visiting_hlo_sharding_ = hlo->sharding();
hlo->set_sharding(group_sharding.sharding);
device_groups_ = group_sharding.device_groups;
visiting_num_partitions_ = num_partitions_;
num_partitions_ = num_partitions_ / group_sharding.device_groups.size();
visiting_partition_id_ = partition_id_;
visiting_collective_ops_creator_ = std::move(collective_ops_creator_);
auto grouped_state = MakePartitioningState();
collective_ops_creator_ =
std::move(grouped_state.collective_ops_creator);
partition_id_ = grouped_state.partition_id;
visiting_hlo_operand_shardings_.reserve(hlo->operand_count());
visiting_state_.reserve(hlo->operand_count());
for (HloInstruction* operand : hlo->unique_operands()) {
visiting_hlo_operand_shardings_.push_back(operand->sharding());
auto old_state = GetPartitionedHlo(operand).state();
visiting_state_.push_back(old_state);
if (operand->shape().IsArray() && operand->IsConstant() &&
operand->shape().rank() == 0 &&
!operand->sharding().IsManualSubgroup()) {
continue;
}
TF_ASSIGN_OR_RETURN(
auto op_group_sharding,
get_grouped_sharding(operand->sharding(), operand->shape(),
&group_sharding));
operand->set_sharding(op_group_sharding.sharding);
GetPartitionedHlo(operand).hlo()->copy_sharding(operand);
auto group_state = CreatePerGroupPartitioningState(
old_state, op_group_sharding.device_groups, &b_);
GetPartitionedHlo(operand).set_state(group_state);
}
}
}
}
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::Postprocess(HloInstruction* hlo) {
logger_->RegisterLogEntry(hlo, b_.derived_instructions(hlo));
visiting_hlo_ = nullptr;
b_.set_visiting_hlo(nullptr);
if (visiting_hlo_sharding_) {
hlo->set_sharding(*visiting_hlo_sharding_);
GetPartitionedHlo(hlo).hlo()->set_sharding(*visiting_hlo_sharding_);
int64_t i = 0;
for (HloInstruction* operand : hlo->unique_operands()) {
operand->set_sharding(visiting_hlo_operand_shardings_[i++]);
GetPartitionedHlo(operand).hlo()->copy_sharding(operand);
}
visiting_hlo_sharding_.reset();
visiting_hlo_operand_shardings_.clear();
}
if (!device_groups_.empty()) {
device_groups_.clear();
num_partitions_ = *visiting_num_partitions_;
visiting_num_partitions_.reset();
collective_ops_creator_ = *visiting_collective_ops_creator_;
visiting_collective_ops_creator_.reset();
partition_id_ = *visiting_partition_id_;
visiting_partition_id_.reset();
GetPartitionedHlo(hlo).set_state(MakePartitioningState());
}
if (!visiting_state_.empty()) {
int64_t i = 0;
for (const HloInstruction* operand : hlo->unique_operands()) {
GetPartitionedHlo(operand).set_state(std::move(visiting_state_[i++]));
}
visiting_state_.clear();
}
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleElementwise(HloInstruction* hlo) {
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : hlo->operands()) {
new_operands.push_back(
GetPartitionedHlo(operand).Reshard(hlo->sharding()).hlo());
}
SetPartitionedHlo(hlo, [&] {
return b_.AddInstruction(hlo->CloneWithNewOperands(
MakePartitionedShape(hlo->shape(), hlo->sharding()), new_operands));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleConcatenate(HloInstruction* hlo) {
const HloSharding& sharding = hlo->sharding();
if (sharding.IsTileMaximal()) {
return DefaultAction(hlo);
}
const Shape shard_shape = MakePartitionedShape(hlo->shape(), hlo->sharding());
const int64_t dimension = hlo->concatenate_dimension();
if (sharding.tile_assignment().dim(dimension) == 1) {
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : hlo->operands()) {
new_operands.push_back(
GetPartitionedHlo(operand).Reshard(sharding).hlo());
}
SetPartitionedHlo(hlo, [&] {
return b_.AddInstruction(
hlo->CloneWithNewOperands(shard_shape, new_operands));
});
return absl::OkStatus();
}
auto temp_output_shape = MakePartitionedShape(hlo->shape(), sharding);
auto last_operand_padded_shape =
MakePartitionedShape(hlo->operands().back()->shape(), sharding);
int last_operand_padding =
last_operand_padded_shape.dimensions(dimension) *
sharding.tile_assignment().dim(dimension) -
hlo->operands().back()->shape().dimensions(dimension);
int temp_output_padding = temp_output_shape.dimensions(dimension) *
sharding.tile_assignment().dim(dimension) -
hlo->shape().dimensions(dimension);
int padding_for_last_operand =
last_operand_padding < temp_output_padding
? 0
: last_operand_padding - temp_output_padding;
temp_output_shape.set_dimensions(
dimension, temp_output_shape.dimensions(dimension) *
sharding.tile_assignment().dim(dimension) +
padding_for_last_operand);
auto temp_output = CreateZero(temp_output_shape, &b_);
int64_t offset = 0;
auto state = MakePartitioningState();
for (HloInstruction* operand : hlo->operands()) {
auto spmd_operand =
GetPartitionedHlo(operand).Reshard(sharding).PadWithZero().hlo();
std::vector<HloInstruction*> start_indices(
hlo->shape().rank(), b_.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(S32))));
start_indices[dimension] =
MultiplyAddDivideOffsetCalculation(
spmd_operand->shape().dimensions(dimension), offset, 1)
.Calculate(MakeTiledPartitionOrdinals(sharding, state.partition_id,
&b_)[dimension],
&b_);
temp_output = b_.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
temp_output_shape, temp_output, spmd_operand, start_indices));
offset += operand->shape().dimensions(dimension);
}
std::vector<int64_t> non_concat_dims;
non_concat_dims.reserve(hlo->shape().rank() - 1);
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
if (i != dimension) {
non_concat_dims.push_back(i);
}
}
auto grouped =
hlo_sharding_util::GroupShardingOnDims(sharding, non_concat_dims);
auto per_group_partitioner_state =
CreatePerGroupPartitioningState(state, grouped.device_groups, &b_);
auto all_reduce = per_group_partitioner_state.collective_ops_creator
.create_cross_partition_all_reduce(
&b_, temp_output,
MakeBinaryAdd(hlo->shape().element_type(), module_),
{}, NewChannel());
SetPartitionedHlo(hlo, [&] {
auto start_indices = MakeTiledPartitionOrdinals(
grouped.sharding, per_group_partitioner_state.partition_id, &b_);
start_indices[dimension] = MultiplyAddDivideOffsetCalculation(
shard_shape.dimensions(dimension), 0, 1)
.Calculate(start_indices[dimension], &b_);
return b_.AddInstruction(HloInstruction::CreateDynamicSlice(
shard_shape, all_reduce, start_indices, shard_shape.dimensions()));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleSlice(HloInstruction* hlo) {
const HloSharding& sharding = hlo->sharding();
if (sharding.IsTileMaximal()) {
return DefaultAction(hlo);
}
auto operand = GetPartitionedHlo(hlo->operand(0)).Reshard(sharding);
auto reshard_operand =
ReshardDataForSlicing(hlo->slice_strides(), hlo->slice_starts(),
hlo->slice_limits(), operand, sharding, &b_);
if (!reshard_operand.has_value()) {
return DefaultAction(hlo);
}
TF_RET_CHECK(!reshard_operand->dynamic_slice_index_on_output.has_value());
HloInstruction* final_operand = SliceDataFromWindowReshard(
*reshard_operand, hlo->slice_strides(), hlo->shape(), sharding, &b_);
SetPartitionedHlo(hlo, [&] {
if (final_operand != reshard_operand->sharded_input) {
return final_operand;
}
return b_.AddInstruction(HloInstruction::CreateUnary(
final_operand->shape(), HloOpcode::kCopy, final_operand));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleSort(HloInstruction* hlo) {
HloSharding sharding = hlo->sharding();
int64_t input_count = 1;
if (hlo->shape().IsTuple()) {
input_count = hlo->shape().tuple_shapes_size();
CHECK_GT(input_count, 0);
}
if (sharding.HasUniqueDevice()) {
std::vector<HloInstruction*> new_operands(input_count, nullptr);
for (int64_t i = 0; i != input_count; ++i) {
HloSharding subsharding =
hlo->sharding().IsTuple()
? hlo->sharding().GetSubSharding(hlo->shape(), {i})
: hlo->sharding();
CHECK(!subsharding.IsTuple() && subsharding.HasUniqueDevice());
new_operands[i] =
GetPartitionedHlo(hlo->operand(i)).Reshard(subsharding).hlo();
}
auto clone = b_.AddInstruction(
hlo->CloneWithNewOperands(hlo->shape(), new_operands));
clone->set_sharding(sharding);
SetPartitionedHlo(
hlo, PartitionedHlo(clone, hlo->shape(), MakePartitioningState()));
return absl::OkStatus();
}
auto k = GetKValueInTopKWhenPartitionSortDim(hlo);
if (k.has_value()) {
HloSortInstruction* sort = DynCast<HloSortInstruction>(hlo);
const int64_t sort_dim = sort->sort_dimension();
auto input = hlo->operand(0);
auto index = hlo->operand(1);
const HloSharding& input_sharding = input->sharding();
const int64_t partition_count =
input_sharding.tile_assignment().dim(sort_dim);
const int64_t input_size = input->shape().dimensions(sort_dim);
const auto element_type = input->shape().element_type();
const auto index_type = index->shape().element_type();
auto partitioned_input = GetPartitionedHlo(input).PadWithValue(
CreateFirstWithType(element_type, &b_));
auto partitioned_index =
GetPartitionedHlo(index)
.Reshard(input_sharding)
.PadWithValue(CreateLastWithType(index_type, &b_));
std::vector<int64_t> replicated_dimensions(
input->shape().dimensions().begin(), input->shape().dimensions().end());
replicated_dimensions[sort_dim] = RoundUpTo(input_size, partition_count);
const Shape replicated_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(element_type, replicated_dimensions),
ShapeUtil::MakeShape(index_type, replicated_dimensions)});
auto topk_sharding =
input_sharding.GetTupleSharding(replicated_shape).value();
auto shard_shape = MakePartitionedShape(replicated_shape, topk_sharding);
auto topk = b_.AddInstruction(hlo->CloneWithNewOperands(
shard_shape, {partitioned_input.hlo(), partitioned_index.hlo()}));
HloInstruction* value_gte =
b_.AddInstruction(HloInstruction::CreateGetTupleElement(
topk->shape().tuple_shapes(0), topk, 0));
HloInstruction* index_gte =
b_.AddInstruction(HloInstruction::CreateGetTupleElement(
topk->shape().tuple_shapes(1), topk, 1));
replicated_dimensions[sort_dim] = k.value() * partition_count;
auto slice_input = SliceFirstK(value_gte, &b_, sort_dim, k.value());
slice_input->set_sharding(input_sharding);
PartitionedHlo partitioned_slice_input(
slice_input, ShapeUtil::MakeShape(element_type, replicated_dimensions),
MakePartitioningState());
auto replicated_slice_input =
partitioned_slice_input.Reshard(HloSharding::Replicate()).hlo();
auto slice_index = SliceFirstK(index_gte, &b_, sort_dim, k.value());
slice_index->set_sharding(input_sharding);
PartitionedHlo partitioned_slice_index(
slice_index, ShapeUtil::MakeShape(index_type, replicated_dimensions),
MakePartitioningState());
auto replicated_slice_index =
partitioned_slice_index.Reshard(HloSharding::Replicate()).hlo();
const Shape final_topk_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(element_type, replicated_dimensions),
ShapeUtil::MakeShape(index_type, replicated_dimensions)});
HloInstruction* final_sort = b_.AddInstruction(HloInstruction::CreateSort(
final_topk_shape, sort_dim,
{replicated_slice_input, replicated_slice_index}, sort->to_apply(),
sort->is_stable()));
final_sort->set_sharding(
HloSharding::Replicate().GetTupleSharding(final_sort->shape()).value());
PartitionedHlo replicated_sort(final_sort, final_sort->shape(),
MakePartitioningState());
SetPartitionedHlo(hlo, replicated_sort.Reshard(hlo->sharding()));
return absl::OkStatus();
}
auto sort = DynCast<HloSortInstruction>(hlo);
auto sort_dim = sort->sort_dimension();
VLOG(2) << "sort dim: " << sort_dim;
auto cur_sharding = sharding;
bool same_subsharding = true;
if (sharding.IsTuple()) {
cur_sharding = sharding.GetSubSharding(hlo->shape(), {0});
for (int64_t i = 1; i != input_count; ++i) {
if (cur_sharding != hlo->sharding().GetSubSharding(hlo->shape(), {i})) {
same_subsharding = false;
break;
}
}
}
auto subshape = hlo->operand(0)->shape();
if (subshape.rank() > 1 && same_subsharding && cur_sharding.IsTiled() &&
!cur_sharding.IsTileMaximal() &&
cur_sharding.tile_assignment().dim(sort_dim) != 1) {
std::vector<int64_t> tile_assignment_dims(
cur_sharding.tile_assignment().dimensions().begin(),
cur_sharding.tile_assignment().dimensions().end());
int64_t picked_dim = -1;
int64_t first_nonsort_nonsharded_dim = -1;
auto nshards = tile_assignment_dims[sort_dim];
for (int64_t dim = 0; dim < subshape.rank(); ++dim) {
if (dim == sort_dim || tile_assignment_dims[dim] != 1 ||
subshape.dimensions(dim) == 1) {
continue;
}
if (first_nonsort_nonsharded_dim == -1) {
first_nonsort_nonsharded_dim = dim;
}
if (subshape.dimensions(dim) % nshards != 0) {
continue;
}
picked_dim = dim;
break;
}
if (picked_dim == -1) {
picked_dim = first_nonsort_nonsharded_dim;
}
std::vector<HloInstruction*> new_operands;
std::vector<HloSharding> new_shardings;
std::optional<HloSharding> new_output_sharding;
if (picked_dim != -1) {
VLOG(2) << "Sort partitioning - picked target dimension to move the "
"sharding: "
<< picked_dim;
CHECK_NE(picked_dim, -1)
<< "Sort partitioning - sharding cannot exist in the sort dimension "
"if "
"there are no free dimensions to move it into";
std::vector<int64_t> permutation(
cur_sharding.tile_assignment().dimensions().begin(),
cur_sharding.tile_assignment().dimensions().end());
absl::c_iota(permutation, 0);
std::swap(permutation[sort_dim], permutation[picked_dim]);
auto new_sharding =
hlo_sharding_util::TransposeSharding(cur_sharding, permutation);
VLOG(2) << "Sort partitioning - new sharding: "
<< new_sharding.ToString();
for (auto& operand : hlo->operands()) {
new_operands.push_back(
GetPartitionedHlo(operand).Reshard(new_sharding).hlo());
new_shardings.push_back(new_sharding);
}
new_output_sharding = new_sharding;
if (sharding.IsTuple()) {
new_output_sharding = HloSharding::Tuple(sort->shape(), new_shardings);
}
} else {
auto new_sharding =
hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(cur_sharding,
{sort_dim});
for (auto& operand : hlo->operands()) {
new_operands.push_back(
GetPartitionedHlo(operand).Reshard(new_sharding).hlo());
new_shardings.push_back(new_sharding);
}
new_output_sharding = new_sharding;
if (sharding.IsTuple()) {
new_output_sharding = HloSharding::Tuple(sort->shape(), new_shardings);
}
}
auto final_sort = b_.AddInstruction(hlo->CloneWithNewOperands(
MakePartitionedShape(sort->shape(), *new_output_sharding),
new_operands));
final_sort->set_sharding(*new_output_sharding);
PartitionedHlo psort(final_sort, sort->shape(), MakePartitioningState());
SetPartitionedHlo(sort, psort.Reshard(sort->sharding()));
return absl::OkStatus();
}
if (hlo->shape().IsTuple()) {
if (hlo->shape().tuple_shapes_size() == 0) {
return DefaultAction(hlo);
}
sharding = hlo->sharding().GetSubSharding(hlo->shape(), {0});
for (int64_t i = 1; i < hlo->operand_count(); ++i) {
if (sharding != hlo->sharding().GetSubSharding(hlo->shape(), {i})) {
return DefaultAction(hlo);
}
}
}
if (sharding.IsTileMaximal()) {
return DefaultAction(hlo);
}
for (int64_t dim : hlo->dimensions()) {
if (sharding.tile_assignment().dim(dim) > 1) {
return DefaultAction(hlo);
}
}
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : hlo->operands()) {
new_operands.push_back(GetPartitionedHlo(operand).Reshard(sharding).hlo());
}
SetPartitionedHlo(hlo, [&] {
return b_.AddInstruction(hlo->CloneWithNewOperands(
MakePartitionedShape(hlo->shape(), hlo->sharding()), new_operands));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleTranspose(HloInstruction* hlo) {
const HloSharding& sharding = hlo->sharding();
if (sharding.IsTileMaximal()) {
return DefaultAction(hlo);
}
std::vector<int64_t> inverse_dimensions(hlo->shape().rank());
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
inverse_dimensions[hlo->dimensions(i)] = i;
}
auto desired_operand_sharding =
hlo_sharding_util::TransposeSharding(sharding, inverse_dimensions);
auto operand = GetPartitionedHlo(hlo->operand(0))
.Reshard(desired_operand_sharding)
.hlo();
SetPartitionedHlo(hlo, [&] {
return b_.AddInstruction(hlo->CloneWithNewOperands(
MakePartitionedShape(hlo->shape(), hlo->sharding()), {operand}));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleReshape(HloInstruction* hlo) {
const HloSharding& sharding = hlo->sharding();
if (sharding.IsTileMaximal()) {
return DefaultAction(hlo);
}
auto operand = GetPartitionedHlo(hlo->operand(0));
auto desired_operand = [&](const HloSharding& output_sharding)
-> std::optional<HloInstruction*> {
std::optional<HloSharding> desired_operand_sharding =
hlo_sharding_util::ReshapeSharding(
hlo->shape(), hlo->operand(0)->shape(), output_sharding);
if (desired_operand_sharding.has_value() &&
output_sharding.NumTiles() == desired_operand_sharding->NumTiles()) {
return b_.AddInstruction(hlo->CloneWithNewOperands(
MakePartitionedShape(hlo->shape(), output_sharding),
{operand.Reshard(*desired_operand_sharding).hlo()}));
}
return std::nullopt;
};
if (auto operand_hlo = desired_operand(hlo->sharding())) {
SetPartitionedHlo(hlo, [&] { return *operand_hlo; });
return absl::OkStatus();
}
std::optional<HloSharding> desired_output_sharding =
hlo_sharding_util::ReshapeSharding(hlo->operand(0)->shape(), hlo->shape(),
operand.sharding());
if (desired_output_sharding.has_value()) {
if (auto operand_hlo = desired_operand(*desired_output_sharding)) {
(*operand_hlo)->set_sharding(*desired_output_sharding);
SetPartitionedHlo(hlo, [&] {
return PartitionedHlo(*operand_hlo, hlo->shape(),
MakePartitioningState())
.Reshard(hlo->sharding())
.hlo();
});
return absl::OkStatus();
}
}
auto shard_reshape =
[](PartitionedHlo& operand, const HloSharding& sharding,
const Shape& base_shape) -> absl::StatusOr<HloInstruction*> {
auto replicate = [&] {
HloInstruction* rep = operand.Replicate().hlo();
HloInstruction* reshape = operand.state().b->AddInstruction(
HloInstruction::CreateReshape(base_shape, rep));
reshape->set_sharding(HloSharding::Replicate());
return PartitionedHlo(reshape, base_shape, operand.state())
.Reshard(sharding)
.hlo();
};
if (operand.sharding().NumTiles() != sharding.NumTiles()) {
return replicate();
}
auto maybe_input_sharded_dim = UniqueTiledDim(operand.sharding());
auto maybe_output_sharded_dim = UniqueTiledDim(sharding);
if (!maybe_input_sharded_dim || !maybe_output_sharded_dim) {
return replicate();
}
int64_t input_sharded_dim = *maybe_input_sharded_dim;
int64_t output_sharded_dim = *maybe_output_sharded_dim;
int64_t input_major_dims_size = 1;
for (int64_t i = 0; i < input_sharded_dim; ++i) {
input_major_dims_size *= operand.base_shape().dimensions(i);
}
int64_t output_major_dims_size = 1;
for (int64_t i = 0; i < output_sharded_dim; ++i) {
output_major_dims_size *= base_shape.dimensions(i);
}
if (input_major_dims_size != output_major_dims_size) {
return replicate();
}
auto new_input_tile_assignment = sharding.tile_assignment().Reshape(
operand.sharding().tile_assignment().dimensions());
auto aligned_sharding =
sharding.ReplicateOnLastTileDim()
? HloSharding::PartialTile(new_input_tile_assignment)
: HloSharding::Tile(new_input_tile_assignment);
operand = operand.Reshard(aligned_sharding);
auto replication_count =
sharding.ReplicateOnLastTileDim()
? sharding.tile_assignment().dimensions().back()
: 1;
int64_t input_dim_size = operand.base_shape().dimensions(input_sharded_dim);
int64_t output_dim_size = base_shape.dimensions(output_sharded_dim);
auto input_shard_shape =
MakePartitionedShape(operand.base_shape(), operand.sharding());
auto output_shard_shape = MakePartitionedShape(base_shape, sharding);
if (input_dim_size % output_dim_size == 0) {
int64_t split_factor = input_dim_size / output_dim_size;
int64_t output_shard_size =
output_shard_shape.dimensions(output_sharded_dim);
Window window;
for (int64_t i = 0; i < base_shape.rank(); ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_size(1);
dim->set_stride(1);
dim->set_window_dilation(1);
dim->set_window_reversal(false);
dim->set_base_dilation(1);
dim->set_padding_low(0);
if (i == input_sharded_dim) {
dim->set_padding_high(output_shard_size * split_factor *
sharding.tile_assignment().num_elements() /
replication_count -
input_dim_size);
} else {
dim->set_padding_high(0);
}
}
auto reshard_operand = operand.ReshardAsWindowedInput(
window, operand.sharding(),
CreateZero(ShapeUtil::MakeShape(base_shape.element_type(), {}),
operand.state().b),
false);
if (!reshard_operand.has_value()) {
return replicate();
}
TF_RET_CHECK(!reshard_operand->dynamic_slice_index_on_output.has_value());
CHECK_EQ(
reshard_operand->sharded_input->shape().dimensions(input_sharded_dim),
output_shard_size * split_factor);
return operand.state().b->AddInstruction(HloInstruction::CreateReshape(
output_shard_shape, reshard_operand->sharded_input));
} else if (output_dim_size % input_dim_size == 0) {
int64_t merge_factor = output_dim_size / input_dim_size;
auto tmp_shard_shape = output_shard_shape;
tmp_shard_shape.set_dimensions(
output_sharded_dim,
input_shard_shape.dimensions(input_sharded_dim) * merge_factor);
auto tmp_reshape = operand.state().b->AddInstruction(
HloInstruction::CreateReshape(tmp_shard_shape, operand.hlo()));
tmp_reshape->set_sharding(sharding);
auto tmp_full_shape = tmp_shard_shape;
tmp_full_shape.set_dimensions(
output_sharded_dim, tmp_shard_shape.dimensions(output_sharded_dim) *
sharding.tile_assignment().num_elements() /
replication_count);
auto tmp_output =
PartitionedHlo(tmp_reshape, tmp_full_shape, operand.state());
Window window;
for (int64_t i = 0; i < tmp_shard_shape.rank(); ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_size(1);
dim->set_stride(1);
dim->set_window_dilation(1);
dim->set_window_reversal(false);
dim->set_base_dilation(1);
dim->set_padding_low(0);
if (i == output_sharded_dim) {
dim->set_padding_high(output_dim_size -
tmp_shard_shape.dimensions(output_sharded_dim) *
sharding.tile_assignment().num_elements() /
replication_count);
} else {
dim->set_padding_high(0);
}
}
auto reshard_output = tmp_output.ReshardAsWindowedInput(
window, sharding,
CreateZero(ShapeUtil::MakeShape(base_shape.element_type(), {}),
operand.state().b),
false);
if (!reshard_output.has_value()) {
return replicate();
}
TF_RET_CHECK(!reshard_output->dynamic_slice_index_on_output.has_value());
CHECK_EQ(
reshard_output->sharded_input->shape().dimensions(output_sharded_dim),
output_shard_shape.dimensions(output_sharded_dim));
return reshard_output->sharded_input;
}
return replicate();
};
std::function<absl::StatusOr<HloInstruction*>(
PartitionedHlo&, const HloSharding&, const Shape&)>
recursive_shard =
[&](PartitionedHlo& operand, const HloSharding& sharding,
const Shape& base_shape) -> absl::StatusOr<HloInstruction*> {
const Shape& operand_base_shape = operand.base_shape();
HloSharding propagated = hlo_sharding_util::PropagateShardingThroughReshape(
operand_base_shape, base_shape, operand.sharding());
if (propagated.IsTiled()) {
auto operand_propagated_back = hlo_sharding_util::ReshapeSharding(
base_shape, operand_base_shape, propagated);
std::vector<int64_t> operand_group_dims;
if (!operand_propagated_back.has_value()) {
return shard_reshape(operand, sharding, base_shape);
}
CHECK(operand_propagated_back->IsTiled());
Shape inner_operand_base_shape = operand_base_shape;
for (int64_t i = 0; i < operand_base_shape.rank(); ++i) {
if (operand_propagated_back->tile_assignment().dim(i) > 1) {
operand_group_dims.push_back(i);
inner_operand_base_shape.set_dimensions(
i, operand.hlo()->shape().dimensions(i));
}
}
Shape inner_base_shape = base_shape;
bool use_original_output_sharding =
sharding.NumTiles() > propagated.NumTiles();
std::vector<int64_t> output_group_dims;
for (int64_t i = 0; i < inner_base_shape.rank(); ++i) {
int64_t num_shards = propagated.tile_assignment().dim(i);
if (num_shards > 1) {
inner_base_shape.set_dimensions(
i, CeilOfRatio(base_shape.dimensions(i), num_shards));
output_group_dims.push_back(i);
if (num_shards != sharding.tile_assignment().dim(i)) {
use_original_output_sharding = false;
}
}
}
auto operand_group = hlo_sharding_util::GroupShardingOnDims(
operand.sharding(), operand_group_dims);
auto output_group = hlo_sharding_util::GroupShardingOnDims(
use_original_output_sharding ? sharding : propagated,
output_group_dims);
if (use_original_output_sharding) {
output_group = AlignGroupsWith(std::move(output_group), operand_group);
}
auto inner_state = CreatePerGroupPartitioningState(
operand.state(), operand_group.device_groups, operand.state().b);
HloInstruction* inner_operand_hlo =
b_.AddInstruction(HloInstruction::CreateUnary(
operand.hlo()->shape(), HloOpcode::kCopy, operand.hlo()));
inner_operand_hlo->set_sharding(operand_group.sharding);
auto inner_operand = PartitionedHlo(
inner_operand_hlo, inner_operand_base_shape, inner_state);
TF_ASSIGN_OR_RETURN(HloInstruction * reshape,
recursive_shard(inner_operand, output_group.sharding,
inner_base_shape));
reshape->set_sharding(hlo_sharding_util::UngroupSharding(output_group));
return PartitionedHlo(reshape, base_shape, operand.state())
.Reshard(sharding)
.hlo();
}
return shard_reshape(operand, sharding, base_shape);
};
TF_ASSIGN_OR_RETURN(HloInstruction * partitioned,
recursive_shard(operand, sharding, hlo->shape()));
SetPartitionedHlo(hlo, [&] { return partitioned; });
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleIota(HloInstruction* hlo) {
const HloSharding& sharding = hlo->sharding();
if (sharding.IsTileMaximal()) {
return DefaultAction(hlo);
}
SetPartitionedHlo(hlo, [&] {
int64_t dimension = Cast<HloIotaInstruction>(hlo)->iota_dimension();
auto iota = b_.AddInstruction(HloInstruction::CreateIota(
MakePartitionedShape(hlo->shape(), sharding), dimension));
if (sharding.tile_assignment().dim(dimension) > 1) {
auto partition_ordinals = MakeTiledPartitionOrdinals(
sharding, MakePartitioningState().partition_id, &b_);
auto multiplier = b_.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(iota->shape().dimensions(dimension))));
auto offset = b_.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(S32, {}), HloOpcode::kMultiply,
partition_ordinals[dimension], multiplier));
if (iota->shape().element_type() != S32) {
offset = b_.AddInstruction(HloInstruction::CreateConvert(
ShapeUtil::MakeShape(iota->shape().element_type(), {}), offset));
}
auto broadcast = b_.AddInstruction(
HloInstruction::CreateBroadcast(iota->shape(), offset, {}));
return b_.AddInstruction(HloInstruction::CreateBinary(
iota->shape(), HloOpcode::kAdd, iota, broadcast));
}
return iota;
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleSingleDevice(
const HloInstruction* hlo) {
TF_RET_CHECK(hlo->sharding().HasUniqueDevice());
int64_t device = hlo->sharding().GetUniqueDevice();
const HloSharding sharding = HloSharding::AssignDevice(device);
std::vector<HloInstruction*> operands;
std::vector<const Shape*> operand_shapes;
const auto& old_operands = hlo->operands();
const auto old_operands_size = old_operands.size();
operands.reserve(old_operands_size);
operand_shapes.reserve(old_operands_size);
for (const HloInstruction* operand : old_operands) {
operands.push_back(GetPartitionedHlo(operand).Reshard(sharding).hlo());
operand_shapes.push_back(&operand->shape());
}
auto operand = b_.AddInstruction(HloInstruction::CreateTuple(operands));
auto operand_shape = ShapeUtil::MakeTupleShapeWithPtrs(operand_shapes);
auto on_device = b_.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<uint32_t>(device)));
auto pred = b_.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), MakePartitioningState().partition_id,
on_device, ComparisonDirection::kEq));
SpmdBuilder true_b("true_computation", visiting_hlo_);
HloComputation* true_computation;
{
auto param = true_b.AddInstruction(HloInstruction::CreateParameter(
0, operand_shape, "true_branch_param"));
std::vector<HloInstruction*> new_operands;
new_operands.reserve(operands.size());
for (int64_t i = 0; i < operands.size(); ++i) {
new_operands.push_back(true_b.AddInstruction(
HloInstruction::CreateGetTupleElement(*operand_shapes[i], param, i)));
}
auto root = true_b.AddInstruction(
hlo->CloneWithNewOperands(hlo->shape(), new_operands));
true_computation = module_->AddEmbeddedComputation(true_b.Build(root));
}
SpmdBuilder false_b("false_computation", visiting_hlo_);
HloComputation* false_computation;
{
false_b.AddInstruction(HloInstruction::CreateParameter(
0, operand_shape, "false_branch_param"));
auto root = CreateZero(hlo->shape(), &false_b);
false_computation = module_->AddEmbeddedComputation(false_b.Build(root));
}
SetPartitionedHlo(hlo, [&]() {
return b_.AddInstruction(HloInstruction::CreateConditional(
hlo->shape(), pred, operand, true_computation, operand,
false_computation));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleAllReduce(HloInstruction* hlo) {
if (hlo->IsCrossReplicaAllReduce() && hlo->operand_count() == 1) {
return HandleElementwise(hlo);
}
if (hlo->channel_id()) {
TF_RET_CHECK(hlo->operand_count() == 1)
<< "SPMD partitioner supports only single-operand allreduce in manual "
"partitioning mode.";
if (hlo->sharding().IsManual() || hlo->sharding().IsReplicated()) {
return HandleElementwise(hlo);
}
TF_RET_CHECK(hlo->sharding().IsManualSubgroup())
<< "Cross-partition allreduce must be in (partial) manual partitioning "
"mode.";
auto* ar = Cast<HloAllReduceInstruction>(hlo);
TF_RET_CHECK(ar->use_global_device_ids())
<< "Cross-partition allreduce in partial manual partitioning mode must "
"use global device IDs.";
std::vector<int64_t> partition_to_group_id(
hlo->sharding().tile_assignment().num_elements());
hlo->sharding().tile_assignment().Each(
[&](absl::Span<const int64_t> indices, int64_t partition) {
int64_t group_id = 0;
for (int64_t i = 0; i < indices.size(); ++i) {
if (i == hlo->sharding().SubgroupManualDim()) {
continue;
}
group_id *= hlo->sharding().tile_assignment().dim(i);
group_id += indices[i];
}
partition_to_group_id[partition] = group_id;
});
for (const auto& group : ar->replica_groups()) {
int64_t first_partition = group.replica_ids(0) % num_partitions_;
for (int64_t device : group.replica_ids()) {
int64_t partition = device % num_partitions_;
if (partition_to_group_id[partition] !=
partition_to_group_id[first_partition]) {
return InvalidArgumentStrCat(
"Manual all-reduce across devices that belong to different "
"manual subgroups: ",
ar->ToString());
}
}
}
return HandleElementwise(hlo);
}
return DefaultAction(hlo);
}
absl::Status SpmdPartitioningVisitor::HandleBroadcast(HloInstruction* hlo) {
if (hlo->sharding().IsTileMaximal()) {
return DefaultAction(hlo);
}
auto& operand = GetPartitionedHlo(hlo->operand(0));
std::vector<int64_t> new_dims;
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
if (!absl::c_linear_search(hlo->dimensions(), i)) {
new_dims.push_back(i);
}
}
auto desired_input_sharding = hlo_sharding_util::RemoveShapeDimensions(
hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(hlo->sharding(),
new_dims),
new_dims);
auto input = operand.Reshard(desired_input_sharding).hlo();
auto output_shard_shape = MakePartitionedShape(hlo->shape(), hlo->sharding());
SetPartitionedHlo(hlo, [&] {
return b_.AddInstruction(
hlo->CloneWithNewOperands(output_shard_shape, {input}));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleConstant(HloInstruction* hlo) {
const Literal& literal = hlo->literal();
if (literal.shape().IsTuple() ||
(!hlo->sharding().IsTileMaximal() &&
(!EvenlyPartitions(hlo->shape(), hlo->sharding()) ||
!literal.IsAllFirst()))) {
return DefaultAction(hlo);
}
SetPartitionedHlo(hlo, [&]() {
auto shard_shape = MakePartitionedShape(hlo->shape(), hlo->sharding());
std::vector<int64_t> start_indices(hlo->shape().rank(), 0);
auto constant = b_.AddInstruction(HloInstruction::CreateConstant(
literal.Slice(start_indices, shard_shape.dimensions())));
*constant->mutable_shape() = shard_shape;
return constant;
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleDynamicSlice(HloInstruction* hlo) {
if (hlo->sharding().IsTileMaximal()) {
return DefaultAction(hlo);
}
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
if (hlo->sharding().tile_assignment().dim(i) != 1 &&
hlo->dynamic_slice_sizes()[i] !=
hlo->operand(0)->shape().dimensions(i)) {
return DefaultAction(hlo);
}
}
std::vector<HloInstruction*> new_indices(hlo->shape().rank());
auto new_input =
GetPartitionedHlo(hlo->operand(0)).Reshard(hlo->sharding()).hlo();
for (int64_t i = 0; i < new_indices.size(); ++i) {
if (hlo->dynamic_slice_sizes()[i] ==
hlo->operand(0)->shape().dimensions(i)) {
new_indices[i] = CreateZero(hlo->operand(i + 1)->shape(), &b_);
continue;
}
new_indices[i] = GetPartitionedHlo(hlo->operand(i + 1))
.Reshard(HloSharding::Replicate())
.hlo();
}
SetPartitionedHlo(hlo, [&]() {
auto partitioned_shape =
MakePartitionedShape(hlo->shape(), hlo->sharding());
return b_.AddInstruction(HloInstruction::CreateDynamicSlice(
partitioned_shape, new_input, new_indices,
partitioned_shape.dimensions()));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleDynamicUpdateSlice(
HloInstruction* hlo) {
if (hlo->sharding().IsTileMaximal()) {
return DefaultAction(hlo);
}
std::vector<int64_t> partitioned_slice_dims;
std::vector<int64_t> slice_dims;
std::vector<int64_t> partitioned_non_slice_dims;
std::vector<int64_t> partitioned_slice_offsets;
bool any_non_constant_sliced_dim = false;
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
if (hlo->operand(1)->shape().dimensions(i) != hlo->shape().dimensions(i)) {
slice_dims.push_back(i);
int64_t slice_size = hlo->operand(1)->shape().dimensions(i);
if (hlo->sharding().tile_assignment().dim(i) != 1) {
if (!hlo->operand(i + 2)->IsConstant() && slice_size != 1) {
any_non_constant_sliced_dim = true;
continue;
}
partitioned_slice_dims.push_back(i);
if (slice_size == 1) {
partitioned_slice_offsets.push_back(-1);
} else {
partitioned_slice_offsets.push_back(
hlo->operand(i + 2)->literal().Get<int>({}));
}
}
} else if (hlo->sharding().tile_assignment().dim(i) != 1) {
partitioned_non_slice_dims.push_back(i);
}
}
auto handle_with_replicate_slice_dims = [&]() {
HloSharding replicated_sharding =
hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(
hlo->operand(0)->sharding(), partitioned_non_slice_dims);
auto base = GetPartitionedHlo(hlo->operand(0)).Reshard(replicated_sharding);
auto operand =
GetPartitionedHlo(hlo->operand(1)).Reshard(replicated_sharding);
std::vector<HloInstruction*> new_indices(hlo->shape().rank());
for (int64_t i = 0; i < new_indices.size(); ++i) {
new_indices[i] = GetPartitionedHlo(hlo->operand(i + 2))
.Reshard(HloSharding::Replicate())
.hlo();
}
auto dus = b_.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
base.hlo()->shape(), base.hlo(), operand.hlo(), new_indices));
dus->set_sharding(replicated_sharding);
SetPartitionedHlo(hlo, PartitionedHlo(dus, base.base_shape(), base.state())
.Reshard(hlo->sharding()));
};
if (any_non_constant_sliced_dim) {
if (partitioned_non_slice_dims.empty()) {
return DefaultAction(hlo);
}
handle_with_replicate_slice_dims();
return absl::OkStatus();
}
if (!partitioned_slice_dims.empty()) {
auto add_hlo = [&](std::unique_ptr<HloInstruction> to_add) {
return b_.AddInstruction(std::move(to_add));
};
std::vector<HloInstruction*> new_indices(hlo->shape().rank());
for (int64_t i = 0; i < new_indices.size(); ++i) {
if (hlo->operand(1)->shape().dimensions(i) ==
hlo->shape().dimensions(i)) {
new_indices[i] = CreateZero(hlo->operand(i + 2)->shape(), &b_);
continue;
}
new_indices[i] = GetPartitionedHlo(hlo->operand(i + 2))
.Reshard(HloSharding::Replicate())
.hlo();
}
const auto& dus_sharding = hlo->sharding();
const auto& partitioned_input =
GetPartitionedHlo(hlo->operand(0)).Reshard(dus_sharding).hlo();
auto update_sharding = HloSharding::Replicate();
if (!partitioned_non_slice_dims.empty()) {
update_sharding =
hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(dus_sharding,
slice_dims);
}
HloInstruction* replicate_update =
GetPartitionedHlo(hlo->operand(1)).Reshard(update_sharding).hlo();
const auto& update_shape = replicate_update->shape();
const auto& partitioned_shape = partitioned_input->shape();
auto partition_ordinals = MakeTiledPartitionOrdinals(
hlo->sharding(), MakePartitioningState().partition_id, &b_);
HloInstruction* all_dims_within_partition = add_hlo(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
for (int i = 0; i < partitioned_slice_dims.size(); ++i) {
int dim = partitioned_slice_dims[i];
const int64_t per_partition_size = partitioned_shape.dimensions(dim);
if ((partitioned_slice_offsets[i] != -1) &&
(partitioned_slice_offsets[i] / per_partition_size) !=
((partitioned_slice_offsets[i] + update_shape.dimensions(dim) -
1) /
per_partition_size)) {
handle_with_replicate_slice_dims();
return absl::OkStatus();
}
const Shape& compare_shape =
ShapeUtil::ChangeElementType(partition_id_->shape(), PRED);
auto per_partition_size_hlo = add_hlo(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int>(per_partition_size)));
const Shape& offset_shape = per_partition_size_hlo->shape();
auto partition_offset = add_hlo(HloInstruction::CreateBinary(
offset_shape, HloOpcode::kMultiply, partition_ordinals[dim],
per_partition_size_hlo));
auto offset_ge = add_hlo(HloInstruction::CreateCompare(
compare_shape, new_indices[dim], partition_offset,
ComparisonDirection::kGe));
auto offset_lt = add_hlo(HloInstruction::CreateCompare(
compare_shape, new_indices[dim],
add_hlo(HloInstruction::CreateBinary(
offset_shape, HloOpcode::kMultiply,
add_hlo(HloInstruction::CreateBinary(
offset_shape, HloOpcode::kAdd, partition_ordinals[dim],
add_hlo(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int>(1))))),
per_partition_size_hlo)),
ComparisonDirection::kLt));
auto update_within_partition = add_hlo(HloInstruction::CreateBinary(
compare_shape, HloOpcode::kAnd, offset_ge, offset_lt));
all_dims_within_partition = add_hlo(HloInstruction::CreateBinary(
compare_shape, HloOpcode::kAnd, all_dims_within_partition,
update_within_partition));
new_indices[dim] = add_hlo(HloInstruction::CreateTernary(
new_indices[dim]->shape(), HloOpcode::kSelect,
update_within_partition,
add_hlo(HloInstruction::CreateBinary(
new_indices[dim]->shape(), HloOpcode::kSubtract, new_indices[dim],
partition_offset)),
add_hlo(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(0)))));
}
auto dus = add_hlo(HloInstruction::CreateDynamicUpdateSlice(
partitioned_shape, partitioned_input, replicate_update, new_indices));
SetPartitionedHlo(hlo, [&]() {
return add_hlo(HloInstruction::CreateTernary(
dus->shape(), HloOpcode::kSelect,
add_hlo(HloInstruction::CreateBroadcast(
ShapeUtil::ChangeElementType(dus->shape(), PRED),
all_dims_within_partition, {})),
dus, partitioned_input));
});
return absl::OkStatus();
}
std::vector<HloInstruction*> new_indices(hlo->shape().rank());
auto new_input =
GetPartitionedHlo(hlo->operand(0)).Reshard(hlo->sharding()).hlo();
auto new_update =
GetPartitionedHlo(hlo->operand(1)).Reshard(hlo->sharding()).hlo();
for (int64_t i = 0; i < new_indices.size(); ++i) {
if (hlo->operand(1)->shape().dimensions(i) == hlo->shape().dimensions(i)) {
new_indices[i] = CreateZero(hlo->operand(i + 2)->shape(), &b_);
continue;
}
new_indices[i] = GetPartitionedHlo(hlo->operand(i + 2))
.Reshard(HloSharding::Replicate())
.hlo();
}
SetPartitionedHlo(hlo, [&]() {
auto partitioned_shape =
MakePartitionedShape(hlo->shape(), hlo->sharding());
return b_.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
partitioned_shape, new_input, new_update, new_indices));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleGetTupleElement(
HloInstruction* hlo) {
if (hlo->sharding().IsManual()) {
return DefaultAction(hlo);
}
const auto& tuple = GetPartitionedHlo(hlo->operand(0));
auto gte = b_.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetTupleElementShape(tuple.hlo()->shape(), hlo->tuple_index()),
tuple.hlo(), hlo->tuple_index()));
const auto source_sharding =
tuple.sharding().GetSubSharding(tuple.base_shape(), {hlo->tuple_index()});
gte->set_sharding(source_sharding);
PartitionedHlo source_partitioned_gte(
gte, tuple.base_shape().tuple_shapes(hlo->tuple_index()),
MakePartitioningState());
source_partitioned_gte = source_partitioned_gte.Reshard(hlo->sharding());
SetPartitionedHlo(hlo, source_partitioned_gte);
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleInfeed(HloInstruction* hlo) {
const Shape& shape = ShapeUtil::GetTupleElementShape(hlo->shape(), 0);
auto token = GetPartitionedHlo(hlo->operand(0)).hlo();
if (ShapeUtil::GetLeafCount(shape) == 0) {
SetPartitionedHlo(hlo, [&]() {
return b_.AddInstruction(
HloInstruction::CreateInfeed(shape, token, hlo->infeed_config()));
});
return absl::OkStatus();
}
auto sharding = hlo->sharding().GetSubSharding(hlo->shape(), {0});
auto shard_shape = MakePartitionedShape(shape, sharding);
if (EvenlyPartitions(shape, sharding)) {
SetPartitionedHlo(hlo, [&]() {
return b_.AddInstruction(HloInstruction::CreateInfeed(
shard_shape, token, hlo->infeed_config()));
});
return absl::OkStatus();
}
if (hlo->sharding().HasUniqueDevice()) {
return HandleSingleDevice(hlo);
}
std::vector<Shape> per_branch_partitioned_shapes;
std::vector<int32_t> conditional_branch_indices(num_partitions_);
for (int64_t i = 0; i < num_partitions_; ++i) {
auto partitioned_shape =
MakeNonPaddedShapeForGivenPartition(shape, sharding, i);
int64_t matching_existing_index = 0;
for (; matching_existing_index < per_branch_partitioned_shapes.size();
++matching_existing_index) {
if (ShapeUtil::Compatible(
partitioned_shape,
per_branch_partitioned_shapes[matching_existing_index])) {
break;
}
}
if (matching_existing_index < per_branch_partitioned_shapes.size()) {
conditional_branch_indices[i] = matching_existing_index;
} else {
conditional_branch_indices[i] = per_branch_partitioned_shapes.size();
per_branch_partitioned_shapes.push_back(std::move(partitioned_shape));
}
}
HloInstruction* branch_index;
auto state = MakePartitioningState();
if (per_branch_partitioned_shapes.size() == num_partitions_) {
branch_index = state.partition_id;
if (branch_index->shape().element_type() != S32) {
branch_index = b_.AddInstruction(HloInstruction::CreateConvert(
ShapeUtil::ChangeElementType(branch_index->shape(), S32),
branch_index));
}
} else {
auto branch_index_table = b_.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<int32_t>(conditional_branch_indices)));
branch_index = b_.AddInstruction(HloInstruction::CreateDynamicSlice(
ShapeUtil::MakeShape(S32, {1}), branch_index_table,
{state.partition_id}, {1}));
branch_index = b_.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(S32, {}), branch_index));
}
std::vector<HloComputation*> branches(per_branch_partitioned_shapes.size());
for (int64_t i = 0; i < branches.size(); ++i) {
SpmdBuilder branch_b(absl::StrCat("infeed_branch_", i), visiting_hlo_);
auto param = branch_b.AddInstruction(HloInstruction::CreateParameter(
0, token->shape(), "infeed_token_param"));
auto infeed = branch_b.AddInstruction(HloInstruction::CreateInfeed(
per_branch_partitioned_shapes[i], param, hlo->infeed_config()));
if (!ShapeUtil::Compatible(per_branch_partitioned_shapes[i], shard_shape)) {
std::function<HloInstruction*(const ShapeIndex&, HloInstruction*)>
pad_infeed = [&](const ShapeIndex& index,
HloInstruction* infeed_element) -> HloInstruction* {
if (index == ShapeIndex({1})) {
return infeed_element;
}
const Shape& element_shape =
ShapeUtil::GetSubshape(infeed->shape(), index);
if (element_shape.IsTuple() && element_shape.tuple_shapes_size() > 0) {
std::vector<HloInstruction*> padded_elements(
element_shape.tuple_shapes_size());
for (int64_t i = 0; i < padded_elements.size(); ++i) {
auto sub_index = index;
sub_index.push_back(i);
padded_elements[i] = pad_infeed(
sub_index,
branch_b.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(element_shape, {i}), infeed_element,
i)));
}
return branch_b.AddInstruction(
HloInstruction::CreateTuple(padded_elements));
}
const Shape& pad_shape = ShapeUtil::GetSubshape(
shard_shape, ShapeIndexView(index).subspan(1));
if (ShapeUtil::Compatible(element_shape, pad_shape)) {
return infeed_element;
}
if (element_shape.IsArray()) {
CHECK(pad_shape.IsArray());
return PadToShape(infeed_element, pad_shape, &branch_b);
}
CHECK(element_shape.IsTuple());
CHECK(element_shape.tuple_shapes().empty());
return CreateZero(pad_shape, &branch_b);
};
pad_infeed({}, infeed);
}
branches[i] = module_->AddEmbeddedComputation(branch_b.Build());
}
SetPartitionedHlo(hlo, [&]() {
return b_.AddInstruction(HloInstruction::CreateConditional(
ShapeUtil::MakeTupleShape({shard_shape, token->shape()}), branch_index,
branches, std::vector<HloInstruction*>(branches.size(), token)));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandlePad(HloInstruction* hlo) {
if (hlo->sharding().IsTileMaximal()) {
return DefaultAction(hlo);
}
auto lhs = GetPartitionedHlo(hlo->operand(0));
auto replicated_rhs = GetPartitionedHlo(hlo->operand(1))
.Reshard(HloSharding::Replicate())
.hlo();
auto reshard_operand = ReshardDataForPad(
replicated_rhs, hlo->padding_config(), lhs, hlo->sharding(), &b_);
if (!reshard_operand.has_value()) {
return DefaultAction(hlo);
}
auto* sharded_pad =
PadDataFromWindowReshard(*reshard_operand, replicated_rhs, &b_);
SetPartitionedHlo(hlo, [&]() {
if (!reshard_operand->dynamic_slice_index_on_output) {
return sharded_pad;
}
auto shard_shape = MakePartitionedShape(hlo->shape(), hlo->sharding());
return b_.AddInstruction(HloInstruction::CreateDynamicSlice(
shard_shape, sharded_pad,
*reshard_operand->dynamic_slice_index_on_output,
shard_shape.dimensions()));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleParameter(HloInstruction* hlo) {
SetPartitionedHlo(hlo, [&]() {
auto shard_shape = MakePartitionedShape(hlo->shape(), hlo->sharding());
auto new_param = b_.AddInstruction(HloInstruction::CreateParameter(
hlo->parameter_number(), shard_shape, "param"));
if (hlo->parameter_replicated_at_leaf_buffers()) {
new_param->set_parameter_replicated_at_leaf_buffers(
*hlo->parameter_replicated_at_leaf_buffers());
}
return new_param;
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleReduce(HloInstruction* hlo) {
int64_t input_count = 1;
if (hlo->shape().IsTuple()) {
input_count = hlo->shape().tuple_shapes_size();
CHECK_GT(input_count, 0);
}
if (hlo->sharding().HasUniqueDevice()) {
std::vector<HloInstruction*> new_operands(input_count * 2, nullptr);
for (auto i = 0; i != input_count; ++i) {
HloSharding subsharding =
hlo->sharding().IsTuple()
? hlo->sharding().GetSubSharding(hlo->shape(), {i})
: hlo->sharding();
CHECK(!subsharding.IsTuple() && subsharding.HasUniqueDevice());
new_operands[i] =
GetPartitionedHlo(hlo->operand(i)).Reshard(subsharding).hlo();
new_operands[input_count + i] =
GetPartitionedHlo(hlo->operand(input_count + i))
.Reshard(subsharding)
.hlo();
}
auto clone = b_.AddInstruction(
hlo->CloneWithNewOperands(hlo->shape(), new_operands));
clone->copy_sharding(hlo);
SetPartitionedHlo(
hlo, PartitionedHlo(clone, hlo->shape(), MakePartitioningState())
.Reshard(hlo->sharding()));
return absl::OkStatus();
}
std::vector<PartitionedHlo> inputs;
std::vector<HloInstruction*> inits;
std::vector<int64_t> preserved_dims;
for (int64_t i = 0; i < hlo->operand(0)->shape().rank(); ++i) {
if (!absl::c_linear_search(hlo->dimensions(), i)) {
preserved_dims.push_back(i);
}
}
for (int64_t operand_id = 0; operand_id < input_count; ++operand_id) {
inits.push_back(GetPartitionedHlo(hlo->operand(operand_id + input_count))
.Reshard(HloSharding::Replicate())
.hlo());
inputs.push_back(GetPartitionedHlo(hlo->operand(operand_id)));
if (operand_id > 0) {
inputs.back() = inputs.back().Reshard(inputs[0].sharding());
}
if (!inputs[0].sharding().IsTileMaximal()) {
inputs.back() =
inputs.back().PadWithValue(inits[operand_id], {},
preserved_dims);
}
}
std::vector<const Shape*> new_operand_shapes(input_count * 2);
for (int64_t i = 0; i < input_count; ++i) {
new_operand_shapes[i] = &inputs[i].hlo()->shape();
new_operand_shapes[i + input_count] = &inits[i]->shape();
}
TF_ASSIGN_OR_RETURN(
auto reduce_shape,
ShapeInference::InferReduceShape(new_operand_shapes, hlo->dimensions(),
hlo->to_apply()->ComputeProgramShape()));
std::vector<HloInstruction*> input_hlos(input_count);
for (int64_t i = 0; i < input_count; ++i) {
input_hlos[i] = inputs[i].hlo();
}
auto local_reduce = b_.AddInstruction(HloInstruction::CreateReduce(
reduce_shape, input_hlos, inits, hlo->dimensions(), hlo->to_apply()));
SetPartitionedHlo(hlo, [&]() {
HloInstruction* reduce = local_reduce;
const bool reduce_sharded_dimension =
!inputs[0].sharding().IsTileMaximal() &&
absl::c_any_of(hlo->dimensions(), [&](int64_t i) {
return inputs[0].sharding().tile_assignment().dim(i) > 1;
});
if (reduce_sharded_dimension) {
if (inputs[0].sharding().ReplicateOnLastTileDim()) {
preserved_dims.push_back(inputs[0].base_shape().rank());
}
if (local_reduce->shape().IsArray()) {
reduce = partitioner_->AllReduceAlongShardingDims(
&b_, local_reduce, inputs[0].sharding(), next_channel_id_,
hlo->dimensions(), collective_ops_creator_, hlo->to_apply());
} else {
auto grouped = hlo_sharding_util::GroupShardingOnDims(
inputs[0].sharding(), preserved_dims);
auto grouped_state = CreatePerGroupPartitioningState(
inputs[0].state(), grouped.device_groups, &b_);
std::vector<HloInstruction*> all_gathered_partial_results(input_count);
for (int64_t i = 0; i < input_count; ++i) {
auto gte = b_.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetTupleElementShape(reduce_shape, i), local_reduce,
i));
auto expanded_shape = input_hlos[i]->shape();
auto all_gather_shape = input_hlos[i]->shape();
for (int64_t dim : hlo->dimensions()) {
expanded_shape.set_dimensions(dim, 1);
all_gather_shape.set_dimensions(
dim, inputs[0].sharding().tile_assignment().dim(dim));
}
auto reshape = b_.AddInstruction(
HloInstruction::CreateReshape(expanded_shape, gte));
reshape->set_sharding(grouped.sharding);
all_gathered_partial_results[i] =
PartitionedHlo(reshape, all_gather_shape, grouped_state)
.Replicate()
.hlo();
}
reduce = b_.AddInstruction(HloInstruction::CreateReduce(
reduce_shape, all_gathered_partial_results, inits,
hlo->dimensions(), hlo->to_apply()));
}
}
auto sharding = hlo_sharding_util::RemoveShapeDimensions(
hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
inputs[0].sharding(), hlo->dimensions()),
hlo->dimensions());
if (local_reduce->shape().IsArray()) {
reduce->set_sharding(sharding);
} else {
reduce->set_sharding(HloSharding::Tuple(
reduce->shape(), std::vector<HloSharding>(input_count, sharding)));
}
return PartitionedHlo(reduce, hlo->shape(), MakePartitioningState())
.Reshard(hlo->sharding())
.hlo();
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleReverse(HloInstruction* hlo) {
auto reverse = Cast<HloReverseInstruction>(hlo);
if (reverse->sharding().IsTileMaximal()) {
return DefaultAction(hlo);
}
auto operand = GetPartitionedHlo(reverse->operand(0))
.Reshard(hlo_sharding_util::ReverseSharding(
reverse->sharding(), reverse->dimensions()));
auto left_padded_operand =
HaloExchangeToPadOnLeft(operand, reverse->dimensions());
if (!left_padded_operand) {
return DefaultAction(hlo);
}
SetPartitionedHlo(hlo, [&] {
return b_.AddInstruction(hlo->CloneWithNewOperands(
left_padded_operand->shape(), {left_padded_operand}));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleWhile(HloInstruction* hlo) {
const HloSharding& sharding = hlo->sharding();
hlo->while_condition()->parameter_instruction(0)->set_sharding(sharding);
hlo->while_body()->parameter_instruction(0)->set_sharding(sharding);
HloInstruction* cond_root = hlo->while_condition()->root_instruction();
const HloSharding cond_root_sharding =
hlo_sharding_util::ReplicateAllDataDims(cond_root->sharding());
cond_root->set_sharding(cond_root_sharding);
TF_RETURN_IF_ERROR(
partitioner_
->PartitionComputation(hlo->while_condition(), cond_root_sharding,
next_channel_id_, logger_, call_graph_)
.status());
TF_RETURN_IF_ERROR(partitioner_
->PartitionComputation(hlo->while_body(), sharding,
next_channel_id_, logger_,
call_graph_)
.status());
HloInstruction* whileOp = b_.AddInstruction(HloInstruction::CreateWhile(
MakePartitionedShape(hlo->shape(), sharding), hlo->while_condition(),
hlo->while_body(),
GetPartitionedHlo(hlo->operand(0)).Reshard(sharding).hlo()));
hlo->SetupDerivedInstruction(whileOp);
SetPartitionedHlo(hlo, [&] { return whileOp; });
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleConditional(HloInstruction* hlo) {
std::vector<HloInstruction*> branch_args;
for (int64_t i = 0; i < hlo->branch_count(); ++i) {
HloComputation* computation = hlo->branch_computation(i);
computation->parameter_instruction(0)->set_sharding(
hlo->operand(i + 1)->sharding());
branch_args.push_back(GetPartitionedHlo(hlo->operand(i + 1)).hlo());
}
for (int64_t i = 0; i < hlo->branch_count(); ++i) {
HloComputation* computation = hlo->branch_computation(i);
TF_RETURN_IF_ERROR(partitioner_
->PartitionComputation(computation, hlo->sharding(),
next_channel_id_, logger_,
call_graph_)
.status());
}
SetPartitionedHlo(hlo, [&] {
HloInstruction* cond = GetPartitionedHlo(hlo->operand(0)).hlo();
if (!hlo->operand(0)->sharding().IsManual()) {
if (hlo->operand(0)->sharding().IsManualSubgroup()) {
auto grouped_sharding = hlo_sharding_util::GetManualSubgroupSharding(
hlo->operand(0)->sharding());
grouped_sharding.sharding = HloSharding::Replicate();
cond =
GetPartitionedHlo(hlo->operand(0))
.Reshard(hlo_sharding_util::UngroupSharding(grouped_sharding))
.hlo();
} else {
cond = GetPartitionedHlo(hlo->operand(0))
.Reshard(HloSharding::Replicate())
.hlo();
}
}
return b_.AddInstruction(HloInstruction::CreateConditional(
MakePartitionedShape(hlo->shape(), hlo->sharding()), cond,
hlo->called_computations(), branch_args));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleOptimizationBarrier(
HloInstruction* hlo) {
return HandleElementwise(hlo);
}
absl::Status SpmdPartitioningVisitor::HandleOutfeed(HloInstruction* hlo) {
if (hlo->sharding().HasUniqueDevice()) {
return HandleSingleDevice(hlo);
}
if (hlo->sharding().IsManual()) {
auto clone_from_original = [&](const HloSharding& shared_sharding) {
std::vector<HloInstruction*> new_operands;
new_operands.reserve(hlo->operand_count());
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
new_operands.push_back(
GetPartitionedHlo(hlo->operand(i)).Reshard(shared_sharding).hlo());
}
auto clone = b_.AddInstruction(
hlo->CloneWithNewOperands(hlo->shape(), new_operands));
clone->set_sharding(shared_sharding);
return clone;
};
SetPartitionedHlo(hlo,
[&] { return clone_from_original(hlo->sharding()); });
return absl::OkStatus();
}
HloSharding sharding = hlo->sharding();
const Shape& shape = hlo->operand(0)->shape();
const int64_t required_leaves = HloSharding::RequiredLeaves(shape);
if (sharding.IsTuple() &&
sharding.tuple_elements().size() == required_leaves + 1) {
if (shape.IsTuple()) {
sharding = HloSharding::Tuple(
shape,
absl::MakeSpan(sharding.tuple_elements().data(), required_leaves));
} else {
sharding = sharding.tuple_elements().front();
}
}
auto partitioned_operand =
GetPartitionedHlo(hlo->operand(0)).Reshard(sharding);
const auto& shard_shape = partitioned_operand.hlo()->shape();
const auto& operand = partitioned_operand.hlo();
auto token = GetPartitionedHlo(hlo->operand(1)).hlo();
if (EvenlyPartitions(shape, sharding)) {
Shape outfeed_shape = operand->shape();
TF_RETURN_IF_ERROR(LayoutUtil::CopyLayoutBetweenShapes(hlo->outfeed_shape(),
&outfeed_shape));
SetPartitionedHlo(hlo, [&]() {
return b_.AddInstruction(HloInstruction::CreateOutfeed(
outfeed_shape, operand, token, hlo->outfeed_config()));
});
return absl::OkStatus();
}
std::vector<Shape> per_branch_partitioned_shapes;
std::vector<int32_t> conditional_branch_indices(num_partitions_);
for (int64_t i = 0; i < num_partitions_; ++i) {
auto partitioned_shape =
MakeNonPaddedShapeForGivenPartition(shape, sharding, i);
int64_t matching_existing_index = 0;
for (; matching_existing_index < per_branch_partitioned_shapes.size();
++matching_existing_index) {
if (ShapeUtil::Compatible(
partitioned_shape,
per_branch_partitioned_shapes[matching_existing_index])) {
break;
}
}
if (matching_existing_index < per_branch_partitioned_shapes.size()) {
conditional_branch_indices[i] = matching_existing_index;
} else {
conditional_branch_indices[i] = per_branch_partitioned_shapes.size();
per_branch_partitioned_shapes.push_back(std::move(partitioned_shape));
}
}
HloInstruction* branch_index;
auto state = MakePartitioningState();
if (per_branch_partitioned_shapes.size() == num_partitions_) {
branch_index = state.partition_id;
if (branch_index->shape().element_type() != S32) {
branch_index = b_.AddInstruction(HloInstruction::CreateConvert(
ShapeUtil::ChangeElementType(branch_index->shape(), S32),
branch_index));
}
} else {
auto branch_index_table = b_.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<int32_t>(conditional_branch_indices)));
branch_index = b_.AddInstruction(HloInstruction::CreateDynamicSlice(
ShapeUtil::MakeShape(S32, {1}), branch_index_table, {partition_id_},
{1}));
branch_index = b_.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(S32, {}), branch_index));
}
std::vector<HloComputation*> branches(per_branch_partitioned_shapes.size());
for (int64_t i = 0; i < branches.size(); ++i) {
SpmdBuilder branch_b(absl::StrCat("outfeed_branch_", i), visiting_hlo_);
auto param = branch_b.AddInstruction(HloInstruction::CreateParameter(
0,
ShapeUtil::MakeTupleShape({operand->shape(), token->shape()}),
"outfeed_token_param"));
auto outfeed_data = branch_b.AddInstruction(
HloInstruction::CreateGetTupleElement(operand->shape(), param, 0));
auto outfeed_token = branch_b.AddInstruction(
HloInstruction::CreateGetTupleElement(token->shape(), param, 1));
if (!ShapeUtil::Compatible(per_branch_partitioned_shapes[i], shard_shape)) {
std::function<HloInstruction*(const ShapeIndex&, HloInstruction*)>
slice_outfeed =
[&](const ShapeIndex& index,
HloInstruction* outfeed_operand) -> HloInstruction* {
const Shape& element_shape =
ShapeUtil::GetSubshape(outfeed_data->shape(), index);
if (element_shape.IsTuple() && element_shape.tuple_shapes_size() > 0) {
std::vector<HloInstruction*> slice_elements(
element_shape.tuple_shapes_size());
for (int64_t i = 0; i < slice_elements.size(); ++i) {
auto sub_index = index;
sub_index.push_back(i);
slice_elements[i] = slice_outfeed(
sub_index,
branch_b.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(element_shape, {i}), outfeed_operand,
i)));
}
return branch_b.AddInstruction(
HloInstruction::CreateTuple(slice_elements));
}
const Shape& slice_shape = ShapeUtil::GetSubshape(
per_branch_partitioned_shapes[i], ShapeIndexView(index));
if (ShapeUtil::Compatible(element_shape, slice_shape)) {
return outfeed_operand;
}
if (element_shape.IsArray()) {
CHECK(slice_shape.IsArray());
std::vector<int64_t> start_indices(slice_shape.rank(), 0);
std::vector<int64_t> slice_strides(slice_shape.rank(), 1);
return branch_b.AddInstruction(HloInstruction::CreateSlice(
slice_shape, outfeed_operand, start_indices,
slice_shape.dimensions(), slice_strides));
}
CHECK(element_shape.IsTuple());
CHECK(element_shape.tuple_shapes().empty());
return outfeed_operand;
};
outfeed_data = slice_outfeed({}, outfeed_data);
}
TF_RETURN_IF_ERROR(LayoutUtil::CopyLayoutBetweenShapes(
hlo->outfeed_shape(), &per_branch_partitioned_shapes[i]));
branch_b.AddInstruction(HloInstruction::CreateOutfeed(
per_branch_partitioned_shapes[i], outfeed_data, outfeed_token,
hlo->outfeed_config()));
branches[i] = module_->AddEmbeddedComputation(branch_b.Build());
}
SetPartitionedHlo(hlo, [&]() {
return b_.AddInstruction(HloInstruction::CreateConditional(
token->shape(), branch_index, branches,
std::vector<HloInstruction*>(
branches.size(),
b_.AddInstruction(HloInstruction::CreateTuple({operand, token})))));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleRng(HloInstruction* hlo) {
if (hlo->sharding().HasUniqueDevice()) {
return HandleSingleDevice(hlo);
}
auto clone_from_original = [&](const HloSharding& shared_sharding) {
std::vector<HloInstruction*> new_operands;
new_operands.reserve(hlo->operand_count());
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
new_operands.push_back(
GetPartitionedHlo(hlo->operand(i)).Reshard(shared_sharding).hlo());
}
auto clone = b_.AddInstruction(
hlo->CloneWithNewOperands(hlo->shape(), new_operands));
clone->set_sharding(shared_sharding);
return clone;
};
if (hlo->sharding().IsManual()) {
SetPartitionedHlo(hlo,
[&] { return clone_from_original(hlo->sharding()); });
return absl::OkStatus();
}
if (hlo->sharding().IsReplicated()) {
SetPartitionedHlo(hlo, [&] {
auto clone = clone_from_original(HloSharding::AssignDevice(0));
return PartitionedHlo(clone, hlo->shape(), MakePartitioningState())
.Reshard(HloSharding::Replicate())
.hlo();
});
return absl::OkStatus();
}
TF_RET_CHECK(!hlo->sharding().IsTileMaximal());
std::vector<HloInstruction*> new_operands;
new_operands.reserve(hlo->operand_count());
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
new_operands.push_back(GetPartitionedHlo(hlo->operand(i))
.Reshard(HloSharding::Replicate())
.hlo());
}
if (!hlo->sharding().ReplicateOnLastTileDim()) {
SetPartitionedHlo(hlo, [&] {
return b_.AddInstruction(HloInstruction::CreateRng(
MakePartitionedShape(hlo->shape(), hlo->sharding()),
hlo->random_distribution(), new_operands));
});
} else {
std::vector<int64_t> group_dims(
hlo->sharding().tile_assignment().num_dimensions() - 1);
std::iota(group_dims.begin(), group_dims.end(), 0);
auto sharding_grouped =
hlo_sharding_util::GroupShardingOnDims(hlo->sharding(), group_dims);
auto per_group_state = CreatePerGroupPartitioningState(
MakePartitioningState(), sharding_grouped.device_groups, &b_);
auto rng = b_.AddInstruction(HloInstruction::CreateRng(
MakePartitionedShape(hlo->shape(), hlo->sharding()),
hlo->random_distribution(), new_operands));
rng->set_sharding(HloSharding::AssignDevice(0));
SetPartitionedHlo(hlo, [&]() {
return PartitionedHlo(rng, rng->shape(), per_group_state)
.Replicate()
.hlo();
});
}
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleReduceWindow(HloInstruction* hlo) {
if (hlo->sharding().IsTileMaximal()) {
return DefaultAction(hlo);
}
auto* reduce_window = Cast<HloReduceWindowInstruction>(hlo);
absl::Span<HloInstruction* const> input_arrays = reduce_window->inputs();
absl::Span<HloInstruction* const> init_values = reduce_window->init_values();
absl::InlinedVector<PartitionedHlo::WindowedInputShardReturnValue, 2>
sharded_results;
absl::InlinedVector<const Shape*, 2> sharded_input_shapes,
replicated_init_shapes;
absl::InlinedVector<HloInstruction*, 2> sharded_inputs, replicated_inits;
int64_t input_idx = 0;
for (const HloInstruction* input_array : input_arrays) {
PartitionedHlo& operand = GetPartitionedHlo(input_array);
PartitionedHlo replicated_init = GetPartitionedHlo(init_values[input_idx])
.Reshard(HloSharding::Replicate());
const HloSharding& sharding =
hlo->sharding().IsTuple() ? hlo->sharding().tuple_elements()[input_idx]
: hlo->sharding();
auto resharded_operand_and_window = operand.ReshardAsWindowedInput(
hlo->window(), sharding, replicated_init.hlo());
if (!resharded_operand_and_window.has_value()) {
return DefaultAction(hlo);
}
sharded_results.push_back(resharded_operand_and_window.value());
sharded_inputs.push_back(resharded_operand_and_window->sharded_input);
sharded_input_shapes.push_back(&sharded_inputs.back()->shape());
replicated_inits.push_back(replicated_init.hlo());
replicated_init_shapes.push_back(&replicated_inits.back()->shape());
input_idx++;
}
TF_ASSIGN_OR_RETURN(Shape sharded_rw_shape,
ShapeInference::InferReduceWindowShape(
sharded_input_shapes, replicated_init_shapes,
sharded_results[0].shard_window,
hlo->to_apply()->ComputeProgramShape()));
Shape shard_shape = MakePartitionedShape(hlo->shape(), hlo->sharding());
if (shard_shape.has_layout()) {
*sharded_rw_shape.mutable_layout() = shard_shape.layout();
}
SetPartitionedHlo(hlo, [&]() {
HloInstruction* sharded_rw =
b_.AddInstruction(HloInstruction::CreateReduceWindow(
sharded_rw_shape, sharded_inputs, replicated_inits,
sharded_results[0].shard_window, hlo->to_apply()));
if (!sharded_results[0].dynamic_slice_index_on_output.has_value()) {
CHECK(ShapeUtil::Compatible(shard_shape, sharded_rw->shape()))
<< shard_shape << " vs " << sharded_rw->shape() << "\n";
return sharded_rw;
}
return b_.AddInstruction(HloInstruction::CreateDynamicSlice(
shard_shape, sharded_rw,
*sharded_results[0].dynamic_slice_index_on_output,
shard_shape.dimensions()));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleSelectAndScatter(
HloInstruction* hlo) {
if (hlo->sharding().IsTileMaximal()) {
return DefaultAction(hlo);
}
auto operand = GetPartitionedHlo(hlo->operand(0));
auto source = GetPartitionedHlo(hlo->mutable_operand(1));
if (hlo->sharding() != operand.sharding()) {
operand = operand.Reshard(hlo->sharding());
}
if (hlo->sharding() != source.sharding()) {
source = source.Reshard(hlo->sharding());
}
if (hlo->shape().element_type() != F32 &&
hlo->shape().element_type() != BF16) {
return DefaultAction(hlo);
}
auto select = hlo->called_computations()[0];
auto select_root = select->root_instruction();
if (select_root->opcode() != HloOpcode::kCompare ||
select_root->operand(0)->opcode() != HloOpcode::kParameter ||
select_root->operand(1)->opcode() != HloOpcode::kParameter ||
select_root->operand(0)->parameter_number() +
select_root->operand(1)->parameter_number() !=
1) {
return DefaultAction(hlo);
}
float float_pad_value;
if (select_root->comparison_direction() == ComparisonDirection::kGe ||
select_root->comparison_direction() == ComparisonDirection::kGt) {
if (select_root->operand(0)->parameter_number() == 0) {
float_pad_value = -std::numeric_limits<float>::infinity();
} else {
float_pad_value = std::numeric_limits<float>::infinity();
}
} else if (select_root->comparison_direction() == ComparisonDirection::kLe ||
select_root->comparison_direction() == ComparisonDirection::kLt) {
if (select_root->operand(0)->parameter_number() == 0) {
float_pad_value = std::numeric_limits<float>::infinity();
} else {
float_pad_value = -std::numeric_limits<float>::infinity();
}
} else {
return DefaultAction(hlo);
}
auto pad_value = b_.AddInstruction(HloInstruction::CreateConstant(
hlo->shape().element_type() == BF16
? LiteralUtil::CreateR0<bfloat16>(
static_cast<bfloat16>(float_pad_value))
: LiteralUtil::CreateR0<float>(float_pad_value)));
auto replicated_init = GetPartitionedHlo(hlo->mutable_operand(2))
.Reshard(HloSharding::Replicate());
auto state = MakePartitioningState();
auto partition_ordinals =
MakeTiledPartitionOrdinals(hlo->sharding(), state.partition_id, &b_);
std::vector<MultiplyAddDivideOffsetCalculation> first_window(
hlo->shape().rank());
std::vector<MultiplyAddDivideOffsetCalculation> limit_window(
hlo->shape().rank());
std::vector<OffsetCalculation> data_left_halo_sizes(hlo->shape().rank());
std::vector<OffsetCalculation> data_right_halo_sizes(hlo->shape().rank());
std::vector<OffsetCalculation> source_left_halo_sizes(hlo->shape().rank());
std::vector<OffsetCalculation> source_right_halo_sizes(hlo->shape().rank());
auto unpadded_data_shard_shape =
MakePartitionedShape(hlo->shape(), hlo->sharding());
auto unpadded_source_shard_shape =
MakePartitionedShape(hlo->operand(1)->shape(), hlo->sharding());
auto source_shard_hlo = source.hlo();
auto data_shard_hlo = operand.hlo();
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
int64_t shard_count = hlo->sharding().tile_assignment().dim(i);
if (shard_count == 1) {
continue;
}
auto wd = hlo->window().dimensions(i);
if (wd.stride() > wd.size()) {
wd.set_size(wd.stride());
}
first_window[i] = MultiplyAddDivideOffsetCalculation(
unpadded_data_shard_shape.dimensions(i),
wd.padding_low() - wd.size() + wd.stride(), wd.stride());
limit_window[i] = MultiplyAddDivideOffsetCalculation(
unpadded_data_shard_shape.dimensions(i),
unpadded_data_shard_shape.dimensions(i) + wd.padding_low() +
wd.stride() - 1,
wd.stride());
source_left_halo_sizes[i] =
MultiplyAddDivideOffsetCalculation(
unpadded_source_shard_shape.dimensions(i), 0, 1) -
first_window[i];
source_right_halo_sizes[i] =
limit_window[i] - MultiplyAddDivideOffsetCalculation(
unpadded_source_shard_shape.dimensions(i),
unpadded_source_shard_shape.dimensions(i), 1);
data_left_halo_sizes[i] =
OffsetCalculation(MultiplyAddDivideOffsetCalculation(
unpadded_data_shard_shape.dimensions(i), wd.padding_low(), 1)) -
OffsetCalculation(
HloOpcode::kMultiply, first_window[i],
MultiplyAddDivideOffsetCalculation(0, wd.stride(), 1));
data_right_halo_sizes[i] =
OffsetCalculation(
HloOpcode::kMultiply, limit_window[i],
MultiplyAddDivideOffsetCalculation(0, wd.stride(), 1)) -
OffsetCalculation(MultiplyAddDivideOffsetCalculation(
unpadded_data_shard_shape.dimensions(i),
unpadded_data_shard_shape.dimensions(i) + wd.stride() +
wd.padding_low() - wd.size(),
1));
int64_t max_windows =
(limit_window[i] - first_window[i]).MaxInRange(0, shard_count);
auto first_window_hlo =
first_window[i].Calculate(partition_ordinals[i], &b_);
auto resharded_source = ExchangeHaloAndGetValidData(
source_shard_hlo, source.base_shape(), source_left_halo_sizes[i],
source_right_halo_sizes[i], 0,
limit_window[i].Calculate(shard_count - 1), max_windows, i,
hlo->sharding(), first_window_hlo, replicated_init.hlo(),
partition_ordinals[i], collective_ops_creator_, next_channel_id_, &b_);
if (!resharded_source) {
return DefaultAction(hlo);
}
source_shard_hlo = *resharded_source;
auto offset_start_in_data =
MultiplyAddDivideOffsetCalculation(wd.stride(), 0, 1)
.Calculate(first_window_hlo, &b_);
int64_t padded_data_size =
(limit_window[i].Calculate(shard_count - 1) - 1) * wd.stride() +
wd.size();
int64_t data_shard_size = (max_windows - 1) * wd.stride() + wd.size();
auto resharded_data = ExchangeHaloAndGetValidData(
data_shard_hlo, operand.base_shape(), data_left_halo_sizes[i],
data_right_halo_sizes[i], wd.padding_low(), padded_data_size,
data_shard_size, i, hlo->sharding(), offset_start_in_data, pad_value,
partition_ordinals[i], collective_ops_creator_, next_channel_id_, &b_);
if (!resharded_data) {
return DefaultAction(hlo);
}
data_shard_hlo = *resharded_data;
}
Window window_on_shard = hlo->window();
for (int64_t i = 0; i < window_on_shard.dimensions_size(); ++i) {
int64_t shard_count = hlo->sharding().tile_assignment().dim(i);
if (shard_count == 1) {
continue;
}
auto reshard_wd = window_on_shard.mutable_dimensions(i);
reshard_wd->set_padding_low(0);
reshard_wd->set_padding_high(0);
}
auto sharded_select_and_scatter =
b_.AddInstruction(HloInstruction::CreateSelectAndScatter(
data_shard_hlo->shape(), data_shard_hlo, select, window_on_shard,
source_shard_hlo, replicated_init.hlo(),
hlo->called_computations()[1]));
SetPartitionedHlo(hlo, [&]() {
auto shard_shape = MakePartitionedShape(hlo->shape(), hlo->sharding());
if (ShapeUtil::Compatible(sharded_select_and_scatter->shape(),
shard_shape)) {
return sharded_select_and_scatter;
}
auto zero = b_.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));
std::vector<HloInstruction*> slice_offsets(shard_shape.rank(), zero);
for (int64_t i = 0; i < window_on_shard.dimensions_size(); ++i) {
if (hlo->sharding().tile_assignment().dim(i) == 1) {
continue;
}
int64_t pad_low = hlo->window().dimensions(i).padding_low();
auto left_halo_size =
data_left_halo_sizes[i].Calculate(partition_ordinals[i], &b_);
if (data_left_halo_sizes[i].Calculate(0) == pad_low) {
slice_offsets[i] = left_halo_size;
} else {
auto is_shard0 = b_.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), zero, partition_ordinals[i],
ComparisonDirection::kEq));
auto pad_low_hlo = b_.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(pad_low)));
slice_offsets[i] = b_.AddInstruction(HloInstruction::CreateTernary(
zero->shape(), HloOpcode::kSelect, is_shard0, pad_low_hlo,
left_halo_size));
}
}
return b_.AddInstruction(HloInstruction::CreateDynamicSlice(
shard_shape, sharded_select_and_scatter, slice_offsets,
shard_shape.dimensions()));
});
return absl::OkStatus();
}
absl::Status SpmdPartitioningVisitor::HandleTuple(HloInstruction* hlo) {
std::vector<HloInstruction*> new_operands;
new_operands.reserve(hlo->operand_count());
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
new_operands.push_back(
GetPartitionedHlo(hlo->operand(i))
.Reshard(hlo->sharding().GetSubSharding(hlo->shape(), {i}))
.hlo());
}
SetPartitionedHlo(hlo, [&]() {
return b_.AddInstruction(HloInstruction::CreateTuple(new_operands));
});
return absl::OkStatus();
}
absl::StatusOr<bool> SpmdPartitioningVisitor::DoPartition(
HloComputation* computation, const HloSharding& root_sharding,
const SpmdPartitionerOptions& options) {
VLOG(2) << "Partitioning computation " << computation->name() << " for "
<< num_replicas_ << " replicas and " << num_partitions_
<< " partitions";
TF_RETURN_IF_ERROR(computation->Accept(this));
HloModule* module = computation->parent();
auto new_root =
GetPartitionedHlo(computation->root_instruction()).Reshard(root_sharding);
auto new_computation =
module->AddEmbeddedComputation(b_.Build(new_root.hlo()));
TF_RETURN_IF_ERROR(
DoCodeMotionForWindowedDotGeneralLoops(new_computation, options));
absl::flat_hash_map<HloComputation*, HloComputation*> replacement;
replacement[computation] = new_computation;
module->ReplaceComputations(replacement);
return changed_;
}
absl::Status SpmdPartitioningVisitor::HandlePartitionId(HloInstruction* hlo) {
if (hlo->has_sharding() && hlo->sharding().IsManual()) {
hlo->set_sharding(HloSharding::AssignDevice(0));
return DefaultAction(hlo);
}
return Unimplemented(
"PartitionId instruction is not supported for SPMD partitioning since "
"the meaning is ambiguous -- whether the instruction is replicated or "
"the data is replicated, and if the latter which data is replicated.");
}
SPMDCollectiveOpsCreator GetDefaultCollectiveOpsCreator(int64_t num_partitions,
int64_t num_replicas) {
return {
[](SpmdBuilder* b) {
return b->AddInstruction(HloInstruction::CreatePartitionId());
},
[num_replicas, num_partitions](
SpmdBuilder* b, HloInstruction* operand, HloComputation* reduction,
const std::vector<std::vector<int64_t>>& partition_subgroups,
int64_t channel_id) {
std::vector<ReplicaGroup> device_groups;
if (partition_subgroups.size() <= 1) {
device_groups.reserve(num_replicas);
for (int64_t rid = 0; rid < num_replicas; ++rid) {
device_groups.emplace_back();
for (int64_t pid = 0; pid < num_partitions; ++pid) {
device_groups.back().add_replica_ids(rid * num_partitions + pid);
}
}
} else {
device_groups.reserve(partition_subgroups.size() * num_replicas);
for (int64_t rid = 0; rid < num_replicas; ++rid) {
for (const auto& pgroup : partition_subgroups) {
device_groups.emplace_back();
for (int64_t pid : pgroup) {
device_groups.back().add_replica_ids(rid * num_partitions +
pid);
}
}
}
}
HloComputation* reduction_clone =
reduction->parent()->AddComputationAndUnifyNamesAndIds(
reduction->Clone(), false);
HloInstruction* all_reduce =
b->AddInstruction(HloInstruction::CreateAllReduce(
operand->shape(), {operand}, reduction_clone,
CollectiveDeviceList(device_groups),
false, channel_id,
true));
reduction_clone->SetCollectiveCallInstruction(all_reduce);
return all_reduce;
},
[num_replicas, num_partitions](
SpmdBuilder* b, HloInstruction* operand, HloComputation* reduction,
const IotaReplicaGroupList& partition_group_list,
int64_t channel_id) {
HloComputation* reduction_clone =
reduction->parent()->AddComputationAndUnifyNamesAndIds(
reduction->Clone(), false);
HloInstruction* all_reduce =
b->AddInstruction(HloInstruction::CreateAllReduce(
operand->shape(), {operand}, reduction_clone,
ExpandPartitionGroupListAcrossReplicas(
partition_group_list, num_replicas, num_partitions),
false, channel_id,
true));
reduction_clone->SetCollectiveCallInstruction(all_reduce);
return all_reduce;
},
[num_partitions](SpmdBuilder* b, HloInstruction* operand,
std::vector<std::pair<int64_t, int64_t>>& src_dst_pairs,
int64_t channel_id) {
if (src_dst_pairs.empty()) {
return CreateZero(operand->shape(), b);
} else {
bool is_copy =
src_dst_pairs.size() == num_partitions &&
absl::c_all_of(src_dst_pairs,
[](const std::pair<int64_t, int64_t>& pair) {
return pair.first == pair.second;
});
if (is_copy) {
return operand;
} else {
return b->AddInstruction(HloInstruction::CreateCollectivePermute(
operand->shape(), operand, src_dst_pairs, channel_id));
}
}
},
[](SpmdBuilder* b, absl::Span<HloInstruction* const> operands,
const std::vector<std::vector<int64_t>>& partition_subgroups,
int64_t channel_id, std::optional<int64_t> split_dimension) {
std::vector<Shape> shapes(operands.size(), operands[0]->shape());
const Shape output_shape = (shapes.size() == 1)
? shapes[0]
: ShapeUtil::MakeTupleShape(shapes);
std::vector<ReplicaGroup> groups(partition_subgroups.size());
for (int64_t i = 0; i < groups.size(); ++i) {
for (int64_t id : partition_subgroups[i]) {
groups[i].add_replica_ids(id);
}
}
return b->AddInstruction(HloInstruction::CreateAllToAll(
output_shape, operands, CollectiveDeviceList(groups),
false, channel_id, split_dimension));
},
[num_replicas, num_partitions](
SpmdBuilder* b, HloInstruction* operand, const Shape& ag_shape,
const std::vector<std::vector<int64_t>>& partition_subgroups,
int64_t channel_id, int64_t all_gather_dimension) {
std::vector<ReplicaGroup> device_groups;
device_groups.reserve(partition_subgroups.size() * num_replicas);
for (int64_t i = 0; i < num_replicas; ++i) {
for (const auto& pgroup : partition_subgroups) {
device_groups.emplace_back();
for (int64_t pid : pgroup) {
device_groups.back().add_replica_ids(i * num_partitions + pid);
}
}
}
return b->AddInstruction(HloInstruction::CreateAllGather(
ag_shape, {operand}, all_gather_dimension,
CollectiveDeviceList(device_groups),
false, channel_id,
true));
},
[num_replicas, num_partitions](
SpmdBuilder* b, HloInstruction* operand, const Shape& ag_shape,
const IotaReplicaGroupList& partition_group_list, int64_t channel_id,
int64_t all_gather_dimension) {
return b->AddInstruction(HloInstruction::CreateAllGather(
ag_shape, {operand}, all_gather_dimension,
ExpandPartitionGroupListAcrossReplicas(
partition_group_list, num_replicas, num_partitions),
false, channel_id,
true));
}};
}
SpmdPartitioner::SpmdPartitioner(int64_t num_partitions, int64_t num_replicas,
SpmdPartitionerOptions options)
: SpmdPartitioner(
num_partitions, num_replicas, std::move(options),
GetDefaultCollectiveOpsCreator(num_partitions, num_replicas)) {}
HloInstruction* SpmdPartitioner::AllGatherShards(
SpmdBuilder* b, HloInstruction* operand, const HloSharding& sharding,
int64_t* next_channel_id, absl::Span<const int64_t> selected_dims,
const SPMDCollectiveOpsCreator& collectives_creator) {
return AllGatherShardsInternal(b, operand, sharding, next_channel_id,
selected_dims, collectives_creator,
true)
.first;
}
std::pair<HloInstruction*, HloInstruction*>
SpmdPartitioner::AllGatherShardsInternal(
SpmdBuilder* b, HloInstruction* operand, const HloSharding& sharding,
int64_t* next_channel_id, absl::Span<const int64_t> selected_dims,
const SPMDCollectiveOpsCreator& collectives_creator, bool per_dim_ag) {
if (selected_dims.empty()) {
return std::make_pair(operand, nullptr);
}
CHECK(!sharding.IsTileMaximal());
if (per_dim_ag || selected_dims.size() == 1) {
HloInstruction* result = operand;
Shape result_shape = operand->shape();
for (auto it = selected_dims.rbegin(); it != selected_dims.rend(); ++it) {
if (sharding.tile_assignment().dim(*it) == 1) {
continue;
}
auto partition_group_list = GetIotaPartitionGroupsForReplication(
sharding, {*it}, num_partitions_);
if (partition_group_list.has_value() &&
collectives_creator
.create_cross_partition_all_gather_with_iota_device_list) {
result_shape.set_dimensions(
*it, result_shape.dimensions(*it) *
partition_group_list.value().num_devices_per_group());
result = collectives_creator
.create_cross_partition_all_gather_with_iota_device_list(
b, result, result_shape, partition_group_list.value(),
(*next_channel_id)++,
*it);
} else {
auto partition_subgroups =
GetPartitionGroupsForReplication(sharding, {*it});
result_shape.set_dimensions(
*it, result_shape.dimensions(*it) * partition_subgroups[0].size());
result = collectives_creator.create_cross_partition_all_gather(
b, result, result_shape, partition_subgroups, (*next_channel_id)++,
*it);
}
}
return std::make_pair(result, result);
}
std::vector<int64_t> shape;
shape.push_back(1);
for (int64_t dim : operand->shape().dimensions()) {
shape.push_back(dim);
}
auto reshape = b->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(operand->shape().element_type(), shape), operand));
HloInstruction* ag = nullptr;
HloInstruction* result = reshape;
auto partition_group_list = GetIotaPartitionGroupsForReplication(
sharding, selected_dims, num_partitions_);
if (partition_group_list.has_value() &&
collectives_creator
.create_cross_partition_all_gather_with_iota_device_list) {
shape[0] *= partition_group_list.value().num_devices_per_group();
result =
collectives_creator
.create_cross_partition_all_gather_with_iota_device_list(
b, result,
ShapeUtil::MakeShape(operand->shape().element_type(), shape),
partition_group_list.value(), (*next_channel_id)++,
0);
} else {
auto partition_subgroups =
GetPartitionGroupsForReplication(sharding, selected_dims);
shape[0] *= partition_subgroups[0].size();
result = collectives_creator.create_cross_partition_all_gather(
b, result, ShapeUtil::MakeShape(operand->shape().element_type(), shape),
partition_subgroups, (*next_channel_id)++,
0);
}
ag = result;
std::vector<int64_t> tiled_dims;
for (int64_t i = 0; i < sharding.tile_assignment().num_dimensions(); ++i) {
if (sharding.tile_assignment().dim(i) > 1 &&
absl::c_linear_search(selected_dims, i)) {
tiled_dims.push_back(i);
}
}
if (tiled_dims.size() > 1) {
std::vector<int64_t> split_dim_shape;
split_dim_shape.reserve(tiled_dims.size() + operand->shape().rank());
for (int64_t i : tiled_dims) {
split_dim_shape.push_back(sharding.tile_assignment().dim(i));
}
for (int64_t dim : operand->shape().dimensions()) {
split_dim_shape.push_back(dim);
}
result = b->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(operand->shape().element_type(), split_dim_shape),
result));
}
std::vector<int64_t> xpose_permutation(result->shape().rank());
int64_t split_dims_added = 0;
for (int64_t i = 0; i < xpose_permutation.size(); ++i) {
if (sharding.tile_assignment().dim(i - split_dims_added) == 1 ||
!absl::c_linear_search(selected_dims, i - split_dims_added)) {
xpose_permutation[i] = i + tiled_dims.size() - split_dims_added;
} else {
xpose_permutation[i] = split_dims_added;
xpose_permutation[i + 1] = i + tiled_dims.size() - split_dims_added;
split_dims_added++;
i++;
}
}
result = b->AddInstruction(HloInstruction::CreateTranspose(
ShapeInference::InferTransposeShape(result->shape(), xpose_permutation)
.value(),
result, xpose_permutation));
auto ag_shape = operand->shape();
for (int64_t i : tiled_dims) {
ag_shape.set_dimensions(
i, ag_shape.dimensions(i) * sharding.tile_assignment().dim(i));
}
result = b->AddInstruction(HloInstruction::CreateReshape(ag_shape, result));
return std::make_pair(result, ag);
}
HloInstruction* SpmdPartitioner::AllReduceAlongShardingDims(
SpmdBuilder* b, HloInstruction* operand, const HloSharding& sharding,
int64_t* next_channel_id, absl::Span<const int64_t> selected_dims,
const SPMDCollectiveOpsCreator& collectives_creator,
HloComputation* reduction) {
return AllReduceAlongShardingDimsInternal(
b, operand, sharding, next_channel_id, selected_dims, collectives_creator,
reduction, true);
}
HloInstruction* SpmdPartitioner::AllReduceAlongShardingDimsInternal(
SpmdBuilder* b, HloInstruction* operand, const HloSharding& sharding,
int64_t* next_channel_id, absl::Span<const int64_t> selected_dims,
const SPMDCollectiveOpsCreator& collectives_creator,
HloComputation* reduction, bool per_dim_ar) {
if (!per_dim_ar) {
auto partition_group_list = GetIotaPartitionGroupsForReplication(
sharding, selected_dims, num_partitions_);
if (partition_group_list.has_value() &&
collectives_creator
.create_cross_partition_all_reduce_with_iota_device_list) {
return collectives_creator
.create_cross_partition_all_reduce_with_iota_device_list(
b, operand, reduction, partition_group_list.value(),
(*next_channel_id)++);
} else {
auto partition_subgroups =
GetPartitionGroupsForReplication(sharding, selected_dims);
return collectives_creator.create_cross_partition_all_reduce(
b, operand, reduction, partition_subgroups, (*next_channel_id)++);
}
}
auto result = operand;
for (auto it = selected_dims.rbegin(); it != selected_dims.rend(); ++it) {
if (sharding.tile_assignment().dim(*it) == 1) {
continue;
}
auto partition_group_list =
GetIotaPartitionGroupsForReplication(sharding, {*it}, num_partitions_);
if (partition_group_list.has_value() &&
collectives_creator
.create_cross_partition_all_reduce_with_iota_device_list) {
result = collectives_creator
.create_cross_partition_all_reduce_with_iota_device_list(
b, result, reduction, partition_group_list.value(),
(*next_channel_id)++);
} else {
auto partition_subgroups =
GetPartitionGroupsForReplication(sharding, {*it});
result = collectives_creator.create_cross_partition_all_reduce(
b, result, reduction, partition_subgroups, (*next_channel_id)++);
}
}
return result;
}
absl::StatusOr<bool> SpmdPartitioner::PartitionComputation(
HloComputation* computation, const HloSharding& root_sharding,
int64_t* next_channel_id, SpmdLogger* logger, const CallGraph& call_graph) {
auto visitor = CreateVisitor(computation, num_partitions_, num_replicas_,
collective_ops_creator_, next_channel_id, logger,
options_, call_graph);
return visitor->DoPartition(computation, root_sharding, options_);
}
std::unique_ptr<SpmdPartitioningVisitor> SpmdPartitioner::CreateVisitor(
HloComputation* computation, int64_t num_partitions, int64_t num_replicas,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, SpmdLogger* logger,
SpmdPartitionerOptions options, const CallGraph& call_graph) {
return std::make_unique<SpmdPartitioningVisitor>(
computation, num_partitions, num_replicas, collective_ops_creator,
next_channel_id, logger, std::move(options), this, call_graph);
}
int64_t SpmdPartitioner::MemoryCostInBytes(HloInstruction* hlo) {
auto memory_cost_for_operands = [](HloInstruction* hlo) {
int64_t memory = 0;
for (const HloInstruction* operand : hlo->operands()) {
memory += ShapeSizeInBytes(operand->shape());
}
return memory;
};
switch (hlo->opcode()) {
case HloOpcode::kAllReduce:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kScatter:
case HloOpcode::kWhile:
case HloOpcode::kTuple:
return memory_cost_for_operands(hlo);
default:
return memory_cost_for_operands(hlo) + ShapeSizeInBytes(hlo->shape());
}
}
int64_t SpmdPartitioner::CommunicationCostInBytes(HloInstruction* hlo) {
CHECK(IsCollective(hlo));
switch (hlo->opcode()) {
case HloOpcode::kAllReduce:
return ShapeSizeInBytes(hlo->shape()) * 2;
case HloOpcode::kCollectivePermute:
return ShapeSizeInBytes(hlo->shape());
case HloOpcode::kAllGather: {
HloAllGatherInstruction* ag = Cast<HloAllGatherInstruction>(hlo);
int64_t group_size =
ag->shape().dimensions(ag->all_gather_dimension()) /
ag->operand(0)->shape().dimensions(ag->all_gather_dimension());
return ShapeSizeInBytes(hlo->shape()) * (group_size - 1) / group_size;
}
case HloOpcode::kAllToAll: {
int64_t group_size;
if (!hlo->replica_groups().empty()) {
group_size = hlo->replica_groups()[0].replica_ids_size();
} else {
group_size = hlo->channel_id() ? num_partitions_ : num_replicas_;
}
return ShapeSizeInBytes(hlo->shape()) * (group_size - 1) / group_size;
}
default:
return 0;
}
}
absl::StatusOr<bool> SpmdPartitioner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
set_execution_threads(execution_threads);
TF_RETURN_IF_ERROR(PreprocessSharding(module, execution_threads));
TF_RETURN_IF_ERROR(PreprocessHlos(module, execution_threads));
XLA_VLOG_LINES(1, SpmdLogger::ReportBeforePartition(
*module, options_.report_instruction_count));
std::vector<HloSharding> entry_params_shardings;
const auto num_parameters = module->entry_computation()->num_parameters();
entry_params_shardings.reserve(num_parameters);
for (int64_t i = 0; i < num_parameters; ++i) {
auto param = module->entry_computation()->parameter_instruction(i);
CHECK(param->has_sharding()) << "Missing sharding in entry parameter " << i;
entry_params_shardings.push_back(param->sharding());
}
module->set_spmd_parameters_shardings(entry_params_shardings);
auto entry_root = module->entry_computation()->root_instruction();
CHECK(entry_root->has_sharding()) << "Missing sharding in entry root.";
module->set_spmd_output_sharding(entry_root->sharding());
FlattenCallGraph flatten;
TF_ASSIGN_OR_RETURN(auto changed, flatten.Run(module));
SpmdLogger logger(options_.report_instruction_count,
!VLOG_IS_ON(1));
auto program_shape = module->entry_computation()->ComputeProgramShape();
int64_t next_channel_id = hlo_query::NextChannelId(*module);
HloSharding root_sharding = entry_root->sharding();
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
CHECK(call_graph->IsFlattened());
TF_ASSIGN_OR_RETURN(
bool partition_changed,
PartitionComputation(module->entry_computation(), root_sharding,
&next_channel_id, &logger, *call_graph));
changed |= partition_changed;
auto new_program_shape = module->entry_computation()->ComputeProgramShape();
if (!options_.allow_module_signature_change) {
TF_RET_CHECK(Shape::Equal().MinorToMajorOnlyInLayout()(
program_shape.result(), new_program_shape.result()))
<< "Result shape changed for the entry computation";
TF_RET_CHECK(program_shape.parameters_size() ==
new_program_shape.parameters_size())
<< "Parameter count changed for the entry computation";
for (int64_t i = 0; i < program_shape.parameters_size(); ++i) {
TF_RET_CHECK(Shape::Equal().MinorToMajorOnlyInLayout()(
program_shape.parameters(i), new_program_shape.parameters(i)))
<< "Parameter shape changed for the entry computation";
}
} else {
auto update_shape = [this](Shape* subshape, const xla::ShapeIndex& index) {
if (subshape->IsArray() && subshape->has_layout()) {
UpdateLayout(subshape);
}
};
const auto& old_entry_layout = module->entry_computation_layout();
for (int64_t i = 0; i < new_program_shape.parameters_size(); ++i) {
TF_RETURN_IF_ERROR(LayoutUtil::CopyLayoutBetweenShapes(
old_entry_layout.parameter_shape(i),
new_program_shape.mutable_parameters(i)));
ShapeUtil::ForEachMutableSubshape(new_program_shape.mutable_parameters(i),
update_shape);
}
TF_RETURN_IF_ERROR(LayoutUtil::CopyLayoutBetweenShapes(
old_entry_layout.result_shape(), new_program_shape.mutable_result()));
ShapeUtil::ForEachMutableSubshape(new_program_shape.mutable_result(),
update_shape);
HloModuleConfig config = module->config();
*config.mutable_entry_computation_layout() =
ComputationLayout(new_program_shape, false);
module->set_config(config);
}
XLA_VLOG_LINES(1, SpmdLogger::ReportAfterPartition(
*module, options_.report_instruction_count));
XLA_VLOG_LINES(1, logger.MakeReport());
if (changed) {
HloPassPipeline pass("spmd-cleanup");
pass.AddPass<HloDCE>(true);
pass.AddPass<TupleSimplifier>();
pass.AddPass<HloDCE>(true);
pass.AddPass<HloCSE>(false);
pass.AddPass<FlattenCallGraph>();
TF_RETURN_IF_ERROR(pass.Run(module, execution_threads).status());
}
TF_RETURN_IF_ERROR(ClearShardingAttributes(module, execution_threads));
return changed;
}
absl::Status SpmdPartitioner::PreprocessSharding(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* hlo : computation->instructions()) {
if (hlo->HasSideEffectNoRecurse() && hlo->opcode() != HloOpcode::kRng &&
(hlo->opcode() != HloOpcode::kCustomCall ||
GetCustomCallPartitioner(hlo->custom_call_target()) == nullptr)) {
TF_RET_CHECK(hlo->has_sharding())
<< "Side-effect HLO must have sharding: " << hlo->ToString();
TF_RET_CHECK(!HasReplicatedSharding(hlo->sharding()) ||
CanSideEffectingHaveReplicatedSharding(hlo))
<< "side-effect HLO cannot have a replicated sharding: "
<< hlo->ToString();
}
if (!hlo->has_sharding()) {
if (hlo->opcode() == HloOpcode::kRng) {
hlo->set_sharding(HloSharding::AssignDevice(0));
} else {
hlo->set_sharding(
HloSharding::Single(hlo->shape(), HloSharding::Replicate()));
}
}
}
}
if (!options_.allow_module_signature_change) {
const HloComputation* entry = module->entry_computation();
TF_RET_CHECK(entry->root_instruction()->has_sharding());
const HloSharding& root_sharding = entry->root_instruction()->sharding();
if (!root_sharding.UniqueDevice().has_value()) {
if (root_sharding.IsTuple()) {
TF_RET_CHECK(absl::c_all_of(root_sharding.tuple_elements(),
[](const HloSharding& s) {
return s.IsReplicated() || s.IsManual();
}))
<< "Unsupported entry root sharding: " << root_sharding.ToString();
} else {
TF_RET_CHECK(root_sharding.IsReplicated() || root_sharding.IsManual())
<< "Unsupported entry root sharding: " << root_sharding.ToString();
}
}
for (const HloInstruction* param : entry->parameter_instructions()) {
TF_RET_CHECK(param->has_sharding());
TF_RET_CHECK(param->sharding().IsReplicated() ||
param->sharding().UniqueDevice().has_value())
<< "Unsupported entry parameter sharding:"
<< param->sharding().ToString();
}
}
return absl::OkStatus();
}
absl::Status SpmdPartitioner::PreprocessHlos(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
auto skip_copy_operands = [](HloInstruction* operand,
bool check_single_use =
true) -> HloInstruction* {
while (operand->user_count() == 1 &&
operand->opcode() == HloOpcode::kCopy) {
operand = operand->mutable_operand(0);
}
if (check_single_use && operand->user_count() != 1) {
return nullptr;
}
return operand;
};
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* hlo : computation->MakeInstructionPostOrder()) {
if (hlo->sharding().IsTileMaximal() || hlo->sharding().IsManual()) {
continue;
}
if (hlo->opcode() == HloOpcode::kSlice) {
HloInstruction* operand = skip_copy_operands(hlo->mutable_operand(0));
if (operand == nullptr || operand->sharding() != hlo->sharding()) {
continue;
}
if (operand->opcode() == HloOpcode::kPad) {
std::optional<PaddingConfig> merged_padding =
operand->padding_config();
bool may_have_multi_halo_exchanges = false;
for (int64_t i = 0; i < hlo->shape().rank(); ++i) {
const auto& dim = operand->padding_config().dimensions(i);
if (dim.interior_padding() != 0 || hlo->slice_strides(i) != 1) {
merged_padding = std::nullopt;
break;
}
if (hlo->sharding().tile_assignment().dim(i) != 1 &&
(dim.edge_padding_low() != 0 || dim.edge_padding_high() != 0) &&
hlo->shape().dimensions(i) != operand->shape().dimensions(i)) {
may_have_multi_halo_exchanges = true;
}
auto* merged_dim = merged_padding->mutable_dimensions(i);
merged_dim->set_edge_padding_low(dim.edge_padding_low() -
hlo->slice_starts(i));
merged_dim->set_edge_padding_high(hlo->slice_limits(i) -
operand->shape().dimensions(i));
}
if (merged_padding.has_value() && may_have_multi_halo_exchanges) {
HloInstruction* new_pad =
computation->AddInstruction(HloInstruction::CreatePad(
hlo->shape(), operand->mutable_operand(0),
operand->mutable_operand(1), *merged_padding));
new_pad->set_metadata(operand->metadata());
new_pad->set_sharding(hlo->sharding());
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(new_pad));
TF_RETURN_IF_ERROR(
computation->RemoveInstructionAndUnusedOperands(hlo));
}
}
}
if (hlo->opcode() == HloOpcode::kConcatenate) {
const int64_t dim = hlo->concatenate_dimension();
if (hlo->sharding().tile_assignment().dim(dim) == 1) {
continue;
}
if (hlo->operand_count() == 2) {
HloInstruction* lhs = skip_copy_operands(hlo->mutable_operand(0));
HloInstruction* rhs = skip_copy_operands(hlo->mutable_operand(1));
if (lhs == nullptr || rhs == nullptr) {
continue;
}
const int64_t amount = FindRotateRightPattern(hlo, lhs, rhs);
if (amount < 0) {
continue;
}
TF_RETURN_IF_ERROR(HandleRotateRightWhilePreprocessing(computation));
HloInstruction* to_rotate = lhs->mutable_operand(0);
HloInstruction* rotate = computation->AddInstruction(
CreateCustomCallSPMDInternal_RotateRight(to_rotate, dim, amount));
rotate->set_metadata(hlo->metadata());
rotate->set_sharding(hlo->sharding());
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(rotate));
TF_RETURN_IF_ERROR(
computation->RemoveInstructionAndUnusedOperands(hlo));
} else if (hlo->operand_count() == 3) {
HloInstruction* lhs = skip_copy_operands(hlo->mutable_operand(0));
HloInstruction* mid = skip_copy_operands(hlo->mutable_operand(1),
false);
HloInstruction* rhs = skip_copy_operands(hlo->mutable_operand(2));
std::optional<PadWithWrapPattern> pad_pattern =
FindPadWithWrapPattern(hlo, lhs, mid, rhs);
if (!pad_pattern) {
continue;
}
PaddingConfig padding_config =
MakeNoPaddingConfig(hlo->shape().rank());
auto* padding_config_dim = padding_config.mutable_dimensions(dim);
const int64_t low_pad = lhs->shape().dimensions(dim);
const int64_t high_pad = rhs->shape().dimensions(dim);
padding_config_dim->set_edge_padding_low(low_pad);
padding_config_dim->set_edge_padding_high(high_pad);
HloInstruction* zero =
computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(hlo->shape().element_type())));
zero->set_sharding(HloSharding::Replicate());
HloInstruction* pad =
computation->AddInstruction(HloInstruction::CreatePad(
hlo->shape(), mid, zero, padding_config));
pad->set_metadata(hlo->metadata());
pad->set_sharding(hlo->sharding());
const int64_t padded_size = hlo->shape().dimensions(dim);
const int rotate_lhs_amount =
padded_size - (pad_pattern->lhs_slice_start + low_pad);
HloInstruction* rotate_lhs = computation->AddInstruction(
CreateCustomCallSPMDInternal_RotateRight(pad, dim,
rotate_lhs_amount));
rotate_lhs->set_metadata(hlo->metadata());
rotate_lhs->set_sharding(hlo->sharding());
auto apply_modifiers =
[&](HloInstruction* inst,
const std::vector<const HloInstruction*>& modifiers) {
for (auto it = modifiers.crbegin(), end = modifiers.crend();
it != end; ++it) {
const HloInstruction* modifier = *it;
Shape new_shape = ShapeUtil::ChangeElementType(
inst->shape(), modifier->shape().element_type());
inst = computation->AddInstruction(
modifier->CloneWithNewOperands(new_shape, {inst}));
}
return inst;
};
rotate_lhs = apply_modifiers(rotate_lhs, pad_pattern->lhs_modifiers);
const int64_t rotate_rhs_amount =
padded_size - (pad_pattern->rhs_slice_start + low_pad + high_pad);
HloInstruction* rotate_rhs = computation->AddInstruction(
CreateCustomCallSPMDInternal_RotateRight(pad, dim,
rotate_rhs_amount));
rotate_rhs->set_metadata(hlo->metadata());
rotate_rhs->set_sharding(hlo->sharding());
rotate_rhs = apply_modifiers(rotate_rhs, pad_pattern->rhs_modifiers);
const Shape iota_shape =
ShapeUtil::ChangeElementType(hlo->shape(), U32);
HloInstruction* iota = computation->AddInstruction(
HloInstruction::CreateIota(iota_shape, dim));
iota->set_metadata(hlo->metadata());
iota->set_sharding(hlo->sharding());
struct SelectSpec {
int64_t limit;
HloInstruction* hlo;
Comparison::Direction cmp;
};
const std::array<SelectSpec, 2> selects = {
{
{low_pad, rotate_lhs, Comparison::Direction::kLt},
{padded_size - high_pad, rotate_rhs,
Comparison::Direction::kGe}}};
Shape pred_shape = ShapeUtil::ChangeElementType(hlo->shape(), PRED);
HloInstruction* merged = pad;
for (const SelectSpec& select_spec : selects) {
HloInstruction* limit =
computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<uint32_t>(select_spec.limit)));
limit->set_sharding(HloSharding::Replicate());
HloInstruction* limit_bcast = computation->AddInstruction(
HloInstruction::CreateBroadcast(iota_shape, limit, {}));
limit_bcast->set_metadata(hlo->metadata());
limit_bcast->set_sharding(hlo->sharding());
HloInstruction* compare =
computation->AddInstruction(HloInstruction::CreateCompare(
pred_shape, iota, limit_bcast, select_spec.cmp));
compare->set_metadata(hlo->metadata());
compare->set_sharding(hlo->sharding());
merged = computation->AddInstruction(HloInstruction::CreateTernary(
hlo->shape(), HloOpcode::kSelect, compare, select_spec.hlo,
merged));
merged->set_metadata(hlo->metadata());
merged->set_sharding(hlo->sharding());
}
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(merged));
TF_RETURN_IF_ERROR(
computation->RemoveInstructionAndUnusedOperands(hlo));
}
}
}
}
return absl::OkStatus();
}
}
} | #include "xla/service/spmd/spmd_partitioner.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/layout_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/sharding_format_picker.h"
#include "xla/service/spmd/spmd_prepare.h"
#include "xla/shape.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace spmd {
namespace {
using ::testing::_;
using ::testing::AllOf;
namespace op = xla::testing::opcode_matchers;
class SpmdPartitioningTest
: public HloTestBase,
public ::testing::WithParamInterface<ShardingFormatPicker::ShardingType> {
public:
absl::StatusOr<std::unique_ptr<HloModule>> PartitionComputation(
absl::string_view hlo_module, int64_t num_devices,
bool conv_halo_exchange_always_on_lhs = true,
bool choose_faster_windowed_einsum = false,
bool unroll_windowed_einsum = false,
bool bidirectional_windowed_einsum = false,
int64_t threshold_for_windowed_einsum_mib = -1,
PartitioningMethod gather_method = PartitioningMethod::kIndexParallel,
PartitioningMethod scatter_method = PartitioningMethod::kIndexParallel) {
SpmdPartitionerOptions options;
options.conv_halo_exchange_always_on_lhs = conv_halo_exchange_always_on_lhs;
options.allow_module_signature_change = true;
options.choose_faster_windowed_einsum_over_mem =
choose_faster_windowed_einsum;
options.unroll_windowed_einsum = unroll_windowed_einsum;
options.bidirectional_windowed_einsum = bidirectional_windowed_einsum;
if (threshold_for_windowed_einsum_mib >= 0) {
options.threshold_for_windowed_einsum_mib =
threshold_for_windowed_einsum_mib;
}
options.gather_partition_method = gather_method;
options.scatter_partition_method = scatter_method;
auto collective_ops_creator =
GetDefaultCollectiveOpsCreator(num_devices, 1);
collective_ops_creator.create_cross_partition_all_gather = nullptr;
HloModuleConfig config = GetModuleConfigForTest();
config.set_use_spmd_partitioning(true);
config.set_num_partitions(num_devices);
TF_ASSIGN_OR_RETURN(auto module,
ParseAndReturnVerifiedModule(hlo_module, config));
ShardingFormatPicker format_picker(GetParam());
TF_ASSIGN_OR_RETURN(bool changed, format_picker.Run(module.get()));
if (changed) {
VLOG(1) << "Sharding format changed: "
<< module->ToString(HloPrintOptions()
.set_print_program_shape(false)
.set_print_operand_shape(false));
}
HloPassPipeline pass("spmd-partitioning");
pass.AddPass<HloVerifier>(false,
false);
pass.AddPass<SpmdPrepare>();
pass.AddPass<SpmdPartitioner>(num_devices, 1, options,
collective_ops_creator);
pass.AddPass<HloVerifier>(false,
false);
TF_RETURN_IF_ERROR(pass.Run(module.get()).status());
VerifyNoShardingOnCollectives(module.get());
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
void VerifyNoShardingOnCollectives(HloModule* module) {
for (const HloComputation* c : module->computations()) {
for (const HloInstruction* inst : c->instructions()) {
if (!absl::c_linear_search(
std::vector<HloOpcode>{
HloOpcode::kAllToAll, HloOpcode::kAllReduce,
HloOpcode::kAllGather, HloOpcode::kCollectivePermute,
HloOpcode::kReduceScatter},
inst->opcode())) {
continue;
}
EXPECT_FALSE(inst->has_sharding());
}
}
}
};
std::string TestParamToString(
const ::testing::TestParamInfo<ShardingFormatPicker::ShardingType>& data) {
switch (data.param) {
case ShardingFormatPicker::ShardingType::kV1:
return "V1";
case ShardingFormatPicker::ShardingType::kBestEffortV2:
return "BestEffortV2";
}
}
INSTANTIATE_TEST_SUITE_P(
All, SpmdPartitioningTest,
::testing::Values(ShardingFormatPicker::ShardingType::kV1,
ShardingFormatPicker::ShardingType::kBestEffortV2),
TestParamToString);
TEST_P(SpmdPartitioningTest, SingleDeviceToReplicated) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant = s32[2,3]{1,0} constant({{1,1,1},{1,1,1}}),
sharding={maximal device=0}
ROOT %copy = s32[2,3]{1,0} copy(%constant), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Copy(op::AllReduce(
op::Select(op::Broadcast(op::Compare()),
op::Constant(), op::Broadcast()))),
op::Shape("s32[2,3]")));
}
TEST_P(SpmdPartitioningTest, SingleDeviceCustomCall) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant = s32[2,3]{1,0} constant({{1,1,1},{1,1,1}}),
sharding={maximal device=0}
%cc = s32[2,3] custom-call(%constant), custom_call_target="SomeCustomCall",
sharding={maximal device=0}
ROOT %copy = s32[2,3]{1,0} copy(%cc), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* custom_call = FindInstruction(module.get(), "cc.1");
EXPECT_NE(custom_call, nullptr);
EXPECT_NE(custom_call->parent(), module->entry_computation());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Copy(op::AllReduce(
op::Select(op::Broadcast(op::Compare()),
op::Conditional(), op::Broadcast()))),
op::Shape("s32[2,3]")));
}
TEST_P(SpmdPartitioningTest, SingleDeviceToSingleDevice) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant = s32[2,3]{1,0} constant({{1,1,1},{1,1,1}}),
sharding={maximal device=0}
ROOT %copy = s32[2,3]{1,0} copy(%constant), sharding={maximal device=1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
HloInstruction* root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
EXPECT_THAT(root, op::Copy(AllOf(op::Copy(op::AllReduce(op::Select(
op::Broadcast(op::Compare()),
op::Constant(), op::Broadcast()))),
op::Shape("s32[2,3]"))));
}
TEST_P(SpmdPartitioningTest, SingleDeviceToTiled) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant = s32[2,3]{1,0} constant({{1,1,1},{1,1,1}}),
sharding={maximal device=0}
ROOT %copy = s32[2,3]{1,0} copy(%constant),
sharding={devices=[2,1]1,0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(
op::Copy(op::DynamicSlice(
op::AllReduce(op::Select(
op::Broadcast(op::Compare(op::PartitionId(), op::Constant())),
op::Constant(), op::Broadcast())),
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId())),
op::Constant())),
op::Shape("s32[1,3]")));
}
TEST_P(SpmdPartitioningTest, PartitionCall) {
absl::string_view hlo_string = R"(
HloModule jit_f
g {
Arg_0.6 = s32[8,2]{1,0} parameter(0), sharding={devices=[2,2]<=[4]}
constant.0 = s32[] constant(2), sharding={replicated}
broadcast.0 = s32[8,2]{1,0} broadcast(constant.0), dimensions={}, sharding={devices=[2,2]<=[4]}
ROOT multiply.9 = s32[8,2]{1,0} multiply(Arg_0.6, broadcast.0), sharding={devices=[2,2]<=[4]}
}
ENTRY main {
Arg_0.1 = s32[8,2]{1,0} parameter(0), sharding={devices=[2,2]<=[4]}
constant.1 = s32[] constant(3), sharding={replicated}
broadcast.1 = s32[8,2]{1,0} broadcast(constant.1), dimensions={}, sharding={devices=[2,2]<=[4]}
multiply.4 = s32[8,2]{1,0} multiply(Arg_0.1, broadcast.1), sharding={devices=[2,2]<=[4]}
ROOT call = s32[8,2]{1,0} call(multiply.4), to_apply=g, sharding={devices=[2,2]<=[4]}, backend_config={"flag_configs":[],"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_DEFAULT","device_type":"DEVICE_TYPE_HOST","used_scoped_memory_configs":[]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Call(), op::Shape("s32[4,1]")));
HloInstruction* call_comp_root =
root->called_computations()[0]->root_instruction();
EXPECT_THAT(call_comp_root, AllOf(op::Multiply(op::Parameter(0),
op::Broadcast(op::Constant())),
op::Shape("s32[4,1]")));
}
TEST_P(SpmdPartitioningTest, TiledToReplicated) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant = s32[2,3]{1,0} constant({{1,1,1},{1,1,1}}),
sharding={devices=[2,1]0,1}
ROOT %copy = s32[2,3]{1,0} copy(%constant), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::Copy(op::AllReduce(AllOf(
op::DynamicUpdateSlice(
op::Broadcast(), AllOf(op::Constant(), op::Shape("s32[1,3]")),
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId())),
op::Constant()),
op::Shape("s32[2,3]")))));
}
TEST_P(SpmdPartitioningTest,
TiledToReplicatedWhenV2ShardingGeneratesReplicaGroupV2) {
if (GetParam() != ShardingFormatPicker::ShardingType::kBestEffortV2) {
GTEST_SKIP() << "This test only runs when input sharding is in V2 format.";
}
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant = s32[4,3]{1,0} constant({{1,1,1},{1,1,1},{1,1,1},{1,1,1}}),
sharding={devices=[4,1]<=[4]}
ROOT %copy = s32[4,3]{1,0} copy(%constant), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto all_reduce_instruction =
std::find_if(module->entry_computation()->instructions().begin(),
module->entry_computation()->instructions().end(),
HloPredicateIsOp<HloOpcode::kAllReduce>);
EXPECT_NE(all_reduce_instruction,
module->entry_computation()->instructions().end());
EXPECT_TRUE((*all_reduce_instruction)
->device_list()
.iota_replica_group_list()
.has_value());
IotaReplicaGroupList list = (*all_reduce_instruction)
->device_list()
.iota_replica_group_list()
.value();
EXPECT_EQ(list.num_replica_groups(), 1);
EXPECT_EQ(list.num_devices_per_group(), 4);
EXPECT_THAT(list.reshape_dims(), ::testing::ElementsAre(4));
EXPECT_THAT(list.transpose_perm(), ::testing::ElementsAre(0));
}
TEST_P(SpmdPartitioningTest, TiledToSingleDevice) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant = s32[2,3]{1,0} constant({{1,1,1},{1,1,1}}),
sharding={devices=[2,1]0,1}
ROOT %copy = s32[2,3]{1,0} copy(%constant), sharding={maximal device=0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::Copy(op::Copy(op::AllReduce(AllOf(
op::DynamicUpdateSlice(
op::Broadcast(), AllOf(op::Constant(), op::Shape("s32[1,3]")),
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId())),
op::Constant()),
op::Shape("s32[2,3]"))))));
}
TEST_P(SpmdPartitioningTest, TiledToTiledEven) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param= s32[8,2]{1,0} parameter(0), sharding={devices=[2,1]0,1}
ROOT %copy = s32[8,2]{1,0} copy(%param), sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::Copy(op::Reshape(op::Transpose(op::AllToAll(AllOf(
op::Reshape(op::Parameter()), op::Shape("s32[4,2,1]")))))),
op::Shape("s32[8,1]")));
}
TEST_P(SpmdPartitioningTest, TiledToTiledUneven) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param= f32[7,31,128]{2,1,0} parameter(0), sharding={devices=[1,2,1]0,1}
ROOT %copy = f32[7,31,128]{2,1,0} copy(%param), sharding={devices=[2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::Copy(op::Slice(op::Reshape(AllOf(op::Transpose(op::AllToAll(
op::Reshape(AllOf(op::Pad(), op::Shape("f32[8,16,128]")))))))))));
}
TEST_P(SpmdPartitioningTest, GetTupleElementSwapDevice) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param.0 = (f32[2,3]{1,0}, u32[]) parameter(0),
sharding={{maximal device=1}, {maximal device=1}}
%gte.0 = f32[2,3]{1,0} get-tuple-element(%param.0), index=0,
sharding={maximal device=0}
%gte.1 = u32[] get-tuple-element(%param.0), index=1,
sharding={maximal device=0}
ROOT %tuple = (f32[2,3]{1,0}, u32[]) tuple(%gte.0, %gte.1),
sharding={{maximal device=0},{maximal device=0}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, op::Tuple());
EXPECT_THAT(root->operand(0),
op::Copy(op::AllReduce(op::Select(
op::Broadcast(op::Compare(op::PartitionId(), op::Constant())),
op::GetTupleElement(op::Parameter()), op::Broadcast()))));
EXPECT_THAT(root->operand(1),
op::Copy(op::AllReduce(op::Select(
op::Broadcast(op::Compare(op::PartitionId(), op::Constant())),
op::GetTupleElement(op::Parameter()), op::Broadcast()))));
}
TEST_P(SpmdPartitioningTest, GetTupleElementTiled) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param.0 = (f32[2,3]{1,0}, u32[2,3]{1,0}) parameter(0),
sharding={{replicated}, {replicated}}
gte.0 = f32[2,3]{1,0} get-tuple-element(param.0), index=0,
sharding={devices=[2,1]0,1}
gte.1 = u32[2,3]{1,0} get-tuple-element(param.0), index=1,
sharding={devices=[2,1]0,1}
ROOT %tuple = (f32[2,3]{1,0}, u32[2,3]{1,0}) tuple(gte.0, gte.1),
sharding={{devices=[2,1]0,1},{devices=[2,1]0,1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, op::Tuple());
auto offset =
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId()));
EXPECT_THAT(root->operand(0),
op::DynamicSlice(op::GetTupleElement(op::Parameter()), offset,
op::Constant()));
EXPECT_THAT(root->operand(1),
op::DynamicSlice(op::GetTupleElement(op::Parameter()), offset,
op::Constant()));
}
TEST_P(SpmdPartitioningTest, TiledInfeed) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
token0 = token[] after-all(), sharding={maximal device=0}
infeed = (f32[8,2]{1,0}, token[]) infeed(token0),
sharding={{devices=[2,1]0,1}, {maximal device=0}}
ROOT infeed.data = f32[8,2]{1,0} get-tuple-element(infeed), index=0,
sharding={maximal device=0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::Copy(op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(),
op::GetTupleElement(
AllOf(op::Infeed(), op::Shape("(f32[4,2]{1,0}, token[])"))),
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId())),
op::Constant()))));
}
TEST_P(SpmdPartitioningTest, UnevenTiledInfeed) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
token0 = token[] after-all(), sharding={maximal device=0}
infeed = (f32[9,2]{1,0}, token[]) infeed(token0),
sharding={{devices=[2,1]0,1}, {maximal device=0}}
ROOT infeed.data = f32[9,2]{1,0} get-tuple-element(infeed), index=0,
sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::Shape("f32[5,2]"), op::GetTupleElement(op::Conditional(
op::Convert(op::PartitionId()),
op::AfterAll(), op::AfterAll()))));
EXPECT_THAT(
root->operand(0)->called_computations()[0]->root_instruction(),
AllOf(op::Shape("(f32[5,2], token[])"), op::Infeed(op::Parameter())));
auto second_infeed =
AllOf(op::Shape("(f32[4,2], token[])"), op::Infeed(op::Parameter()));
EXPECT_THAT(root->operand(0)->called_computations()[1]->root_instruction(),
AllOf(op::Shape("(f32[5,2], token[])"),
op::Tuple(op::Pad(op::GetTupleElement(second_infeed),
op::Constant()),
op::GetTupleElement(second_infeed))));
}
TEST_P(SpmdPartitioningTest, UnevenTiledTupleInfeed) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
token0 = token[] after-all(), sharding={maximal device=0}
infeed = ((f32[9,2]{1,0}, f32[2]{0}), token[]) infeed(token0),
sharding={{devices=[2,1]0,1}, {replicated}, {maximal device=0}}
ROOT infeed.data = (f32[9,2]{1,0}, f32[2]{0}) get-tuple-element(infeed),
index=0, sharding={{devices=[2,1]0,1}, {replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("(f32[5,2], f32[2])"),
op::GetTupleElement(op::Conditional(
op::Convert(op::PartitionId()), op::AfterAll(),
op::AfterAll()))));
EXPECT_THAT(root->operand(0)->called_computations()[0]->root_instruction(),
AllOf(op::Shape("((f32[5,2], f32[2]), token[])"),
op::Infeed(op::Parameter())));
auto second_infeed = AllOf(op::Shape("((f32[4,2], f32[2]), token[])"),
op::Infeed(op::Parameter()));
EXPECT_THAT(
root->operand(0)->called_computations()[1]->root_instruction(),
AllOf(op::Shape("((f32[5,2], f32[2]), token[])"),
op::Tuple(op::Tuple(op::Pad(op::GetTupleElement(
op::GetTupleElement(second_infeed)),
op::Constant()),
op::GetTupleElement(
op::GetTupleElement(second_infeed))),
op::GetTupleElement(second_infeed))));
}
TEST_P(SpmdPartitioningTest, MixedTupleInfeed) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
token0 = token[] after-all(), sharding={maximal device=0}
infeed = ((f32[9,2]{1,0}, f32[2]{0}), token[]) infeed(token0),
sharding={{maximal device=0}, {maximal device=1}, {maximal device=0}}
ROOT infeed.data = (f32[9,2]{1,0}, f32[2]{0}) get-tuple-element(infeed),
index=0, sharding={{maximal device=0}, {maximal device=1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("(f32[9,2], f32[2])"),
op::GetTupleElement(op::Conditional(
op::Convert(op::PartitionId()), op::AfterAll(),
op::AfterAll()))));
auto first_infeed = AllOf(op::Shape("((f32[9,2], ()), token[])"),
op::Infeed(op::Parameter()));
EXPECT_THAT(root->operand(0)->called_computations()[0]->root_instruction(),
AllOf(op::Shape("((f32[9,2], f32[2]), token[])"),
op::Tuple(op::Tuple(op::GetTupleElement(
op::GetTupleElement(first_infeed)),
op::Broadcast(op::Constant())),
op::GetTupleElement(first_infeed))));
auto second_infeed =
AllOf(op::Shape("(((), f32[2]), token[])"), op::Infeed(op::Parameter()));
EXPECT_THAT(root->operand(0)->called_computations()[1]->root_instruction(),
AllOf(op::Shape("((f32[9,2], f32[2]), token[])"),
op::Tuple(op::Tuple(op::Broadcast(op::Constant()),
op::GetTupleElement(op::GetTupleElement(
second_infeed))),
op::GetTupleElement(second_infeed))));
}
TEST_P(SpmdPartitioningTest, TiledToReplicatedReduce) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
constant = f32[3,3]{1,0} constant({{1,1,1},{1,1,1},{1,1,1}}),
sharding={devices=[2,1]0,1}
constant.1 = f32[] constant(0), sharding={replicated}
ROOT reduce = f32[] reduce(constant, constant.1), dimensions={0,1},
to_apply=sum, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::AllReduce(op::Reduce(
op::Select(
op::Compare(op::Add(op::Iota(), op::Broadcast(op::Reshape())),
op::Broadcast(op::Constant())),
AllOf(op::Shape("f32[2,3]{1,0}"),
op::DynamicSlice(op::Pad(op::Constant(), op::Constant()),
op::Reshape(), op::Constant())),
op::Broadcast(op::Constant())),
op::Constant())));
}
TEST_P(SpmdPartitioningTest, TiledElementwise) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[3,3]{1,0} constant({{1,1,1},{1,1,1},{1,1,1}}),
sharding={devices=[2,1]0,1}
constant.1 = f32[3,3]{1,0} constant({{2,2,2},{2,2,2},{2,2,2}}),
sharding={replicated}
multiply = f32[3,3]{1,0} multiply(constant, constant.1),
sharding={devices=[2,1]0,1}
ROOT add = f32[3,3]{1,0} add(multiply, constant.1),
sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(
op::Shape("f32[2,3]{1,0}"),
op::Add(op::Multiply(
op::DynamicSlice(op::Pad(op::Constant(), op::Constant()),
op::Reshape(), op::Constant()),
op::DynamicSlice(op::Pad(op::Constant(), op::Constant()),
op::Reshape(), op::Constant())),
op::DynamicSlice(op::Pad(op::Constant(), op::Constant()),
op::Reshape(), op::Constant()))));
}
TEST_P(SpmdPartitioningTest, TiledAllReduce) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
parameter = f32[3,3]{1,0} parameter(0), sharding={devices=[2,1]0,1}
ROOT all-reduce = f32[3,3]{1,0} all-reduce(parameter), to_apply=sum,
replica_groups={}, sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::Shape("f32[2,3]{1,0}"), op::AllReduce(op::Parameter(0))));
}
TEST_P(SpmdPartitioningTest, BroadcastOnlyNewDimsSharded) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[4,3]{1,0} constant({{1,1,1},{1,1,1},{1,1,1},{1,1,1}}),
sharding={replicated}
ROOT broadcast = f32[3,4,3]{2,1,0} broadcast(constant), dimensions={1,2},
sharding={devices=[2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[2,4,3]{2,1,0}"),
op::Broadcast(op::Constant())));
}
TEST_P(SpmdPartitioningTest, BroadcastOnlyOldDimsSharded) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[4,3]{1,0} constant({{1,1,1},{1,1,1},{1,1,1},{1,1,1}}),
sharding={replicated}
ROOT broadcast = f32[4,4,3]{2,1,0} broadcast(constant), dimensions={1,2},
sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[4,2,3]{2,1,0}"),
op::Broadcast(op::DynamicSlice(
op::Constant(), op::Reshape(), op::Constant()))));
}
TEST_P(SpmdPartitioningTest, BroadcastBothOldAndNewDimsSharded) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[4,3]{1,0} constant({{1,1,1},{1,1,1},{1,1,1},{1,1,1}}),
sharding={replicated}
ROOT broadcast = f32[4,4,3]{2,1,0} broadcast(constant), dimensions={1,2},
sharding={devices=[2,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::Shape("f32[2,2,3]{2,1,0}"),
op::Broadcast(AllOf(op::Shape("f32[2,3]{1,0}"),
op::DynamicSlice(op::Constant(), op::Reshape(),
op::Constant())))));
}
TEST_P(SpmdPartitioningTest,
BroadcastBothOldAndNewDimsShardedPartiallySharded) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %entry {
%param = f32[4,3]{1,0} parameter(0),
sharding={devices=[1,2,4]<=[2,2,2]T(1,0,2) last_tile_dim_replicate}
ROOT %broadcast = f32[4,4,3]{2,1,0} broadcast(%param), dimensions={1,2},
sharding={devices=[2,1,2,2]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::Shape("f32[2,4,2]"),
op::Broadcast(AllOf(op::Shape("f32[4,2]"), op::Parameter(0)))));
}
TEST_P(SpmdPartitioningTest,
ConvWithParallelDimAndNonParallelSpatialDimPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,12,12,24,32] parameter(0)
%lhs.copy = f32[32,12,12,24,32] copy(%lhs),
sharding={devices=[2,2,1,1,1]<=[4]}
%rhs = f32[32,6,6,16,32] parameter(1)
%rhs.copy = f32[32,6,6,16,32] copy(%rhs),
sharding={devices=[2,2,1,1,1]<=[4]}
ROOT %conv = f32[32,7,7,24,16] convolution(%lhs.copy, %rhs.copy),
dim_labels=012bf_012oi->012bf,
window={size=32x6x6 stride=31x1x1 lhs_dilate=32x1x1},
sharding={devices=[2,2,1,1,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Copy(op::DynamicSlice(
op::Parameter(), op::Reshape(), op::Reshape(),
op::Constant(), op::Constant(), op::Constant())),
op::Shape("f32[16,6,12,24,32]"));
const auto rhs = AllOf(op::Copy(op::DynamicSlice(
op::Parameter(), op::Reshape(), op::Reshape(),
op::Constant(), op::Constant(), op::Constant())),
op::Shape("f32[16,3,6,16,32]"));
auto resharded_rhs =
AllOf(op::Shape("f32[16,6,6,16,32]"),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(), rhs, op::Constant(), op::Reshape(),
op::Constant(), op::Constant(), op::Constant())));
auto left_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[16,2,12,24,32]"));
auto right_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[16,3,12,24,32]"));
EXPECT_THAT(
root,
AllOf(op::Convolution(
op::Select(op::Compare(),
op::DynamicSlice(
op::Concatenate(left_halo, lhs, right_halo),
op::Constant(), op::Add(), op::Constant(),
op::Constant(), op::Constant()),
op::Broadcast()),
resharded_rhs),
op::Shape("f32[16,4,7,24,16]")));
}
TEST_P(SpmdPartitioningTest, BroadcastPropagateTiledSharding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[4,3]{1,0} constant({{1,1,1},{1,4,1},{1,3,1},{1,2,1}}),
sharding={devices=[2,1]0,1}
ROOT broadcast = f32[4,4,3]{2,1,0} broadcast(constant), dimensions={1,2},
sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[4,2,3]{2,1,0}"),
op::Broadcast(op::DynamicSlice(
op::Constant(), op::Reshape(), op::Constant()))));
}
TEST_P(SpmdPartitioningTest, OutfeedSingleDevice) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
token.0 = token[] after-all()
data = f32[1024]{0} parameter(0), sharding={maximal device=0}
outfeed = token[] outfeed(data, token.0), sharding={maximal device=0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("token[]"),
op::Conditional(
op::Compare(op::PartitionId(), op::Constant()),
op::Tuple(op::Parameter(0), op::AfterAll()),
op::Tuple(op::Parameter(0), op::AfterAll()))));
HloInstruction* root_b0 = root->branch_computation(0)->root_instruction();
EXPECT_THAT(root_b0,
AllOf(op::Shape("token[]"),
op::Outfeed(op::GetTupleElement(op::Parameter(), 0),
op::GetTupleElement(op::Parameter(), 1))));
HloInstruction* root_b1 = root->branch_computation(1)->root_instruction();
EXPECT_THAT(root_b1, AllOf(op::Shape("token[]"), op::AfterAll()));
}
TEST_P(SpmdPartitioningTest, OutfeedEvenlyTiled) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
token.0 = token[] after-all()
data = f32[1024]{0} parameter(0), sharding={devices=[2]0,1}
ROOT outfeed = token[] outfeed(data, token.0), sharding={devices=[2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("token[]"),
op::Outfeed(op::Parameter(), op::AfterAll())));
}
TEST_P(SpmdPartitioningTest, OutfeedTupleEvenlyTiled) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
token.0 = token[] after-all()
data = (f32[1024,2]{1,0}, f32[2]{0}) parameter(0), sharding={{devices=[2,1]0,1},
{devices=[2]0,1}}
ROOT outfeed = token[] outfeed(data, token.0),
outfeed_shape=(f32[1024,2]{0,1}, f32[2]{0}), sharding={{devices=[2,1]0,1},
{devices=[2]0,1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("token[]"),
op::Outfeed(op::Parameter(), op::AfterAll())));
auto expected_layout0 = LayoutUtil::MakeLayout({0, 1});
auto expected_layout1 = LayoutUtil::MakeLayout({0});
EXPECT_TRUE(LayoutUtil::Equal(root->outfeed_shape().tuple_shapes(0).layout(),
expected_layout0));
EXPECT_TRUE(LayoutUtil::Equal(root->outfeed_shape().tuple_shapes(1).layout(),
expected_layout1));
}
TEST_P(SpmdPartitioningTest, OutfeedReplicated) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
token.0 = token[] after-all()
data = (f32[1024,2]{1,0}, f32[2]{0}) parameter(0), sharding={{devices=[2,1]0,1},
{replicated}}
ROOT outfeed = token[] outfeed(data, token.0), sharding={{devices=[2,1]0,1},
{replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("token[]"),
op::Outfeed(op::Parameter(), op::AfterAll())));
}
TEST_P(SpmdPartitioningTest, OutfeedUnevenlyTiled) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
token.0 = token[] after-all()
data = (f32[1023,2]{1,0}, f32[3]{0}) parameter(0), sharding={{devices=[2,1]0,1},
{devices=[2]0,1}}
outfeed = token[] outfeed(data, token.0),
outfeed_shape=(f32[1023,2]{0,1}, f32[3]{0}), sharding={{devices=[2,1]0,1},
{devices=[2]0,1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::Shape("token[]"),
op::Conditional(op::Convert(),
op::Tuple(op::Parameter(), op::AfterAll()),
op::Tuple(op::Parameter(), op::AfterAll()))));
auto first_outfeed =
AllOf(op::Shape("(f32[512,2], f32[2])"), op::GetTupleElement());
EXPECT_THAT(root->called_computations()[0]->root_instruction(),
AllOf(op::Shape("token[]"),
op::Outfeed(first_outfeed, op::GetTupleElement())));
auto second_outfeed = AllOf(op::Shape("(f32[511,2], f32[1])"), op::Tuple());
EXPECT_THAT(root->called_computations()[1]->root_instruction(),
AllOf(op::Shape("token[]"),
op::Outfeed(second_outfeed, op::GetTupleElement())));
auto expected_layout0 = LayoutUtil::MakeLayout({0, 1});
auto expected_layout1 = LayoutUtil::MakeLayout({0});
auto first_outfeed_instr = root->called_computations()[0]->root_instruction();
auto second_outfeed_instr =
root->called_computations()[1]->root_instruction();
EXPECT_TRUE(LayoutUtil::Equal(
first_outfeed_instr->outfeed_shape().tuple_shapes(0).layout(),
expected_layout0));
EXPECT_TRUE(LayoutUtil::Equal(
first_outfeed_instr->outfeed_shape().tuple_shapes(1).layout(),
expected_layout1));
EXPECT_TRUE(LayoutUtil::Equal(
second_outfeed_instr->outfeed_shape().tuple_shapes(0).layout(),
expected_layout0));
EXPECT_TRUE(LayoutUtil::Equal(
second_outfeed_instr->outfeed_shape().tuple_shapes(1).layout(),
expected_layout1));
}
TEST_P(SpmdPartitioningTest, ReduceWindowReplicatedInput) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
constant = f32[6,2]{1,0} constant({{1,1},{1,4},{2,1},{3,1},{1,2},{2,2}}),
sharding={replicated}
constant.1 = f32[] constant(0), sharding={replicated}
ROOT reduce-window = f32[3,2]{1,0} reduce-window(constant, constant.1),
window={size=3x1 stride=2x1 pad=1_0x0_0}, to_apply=sum,
sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::Shape("f32[2,2]{1,0}"),
op::ReduceWindow(
op::DynamicSlice(AllOf(op::Shape("f32[9,2]{1,0}"),
op::Pad(op::Constant(), op::Constant())),
op::Multiply(op::Reshape(), op::Constant()),
op::Constant()),
op::Constant())));
}
TEST_P(SpmdPartitioningTest, ReduceWindowTiledNegativeLeftHalo) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
constant = f32[6,2]{1,0} constant({{1,1},{1,4},{2,1},{3,1},{1,2},{2,2}}),
sharding={devices=[2,1]0,1}
constant.1 = f32[] constant(0), sharding={replicated}
ROOT %reduce-window = f32[3,2]{1,0} reduce-window(%constant, %constant.1),
window={size=3x1 stride=2x1 pad=0_1x0_0}, to_apply=sum,
sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
auto sharded_input =
op::DynamicSlice(op::Constant(), op::Reshape(), op::Constant());
auto right_halo = AllOf(op::Shape("f32[2,2]{1,0}"),
op::CollectivePermute(op::Slice(sharded_input)));
auto pre_masking = op::DynamicSlice(
AllOf(
op::Shape("f32[6,2]{1,0}"),
op::Pad(op::Concatenate(sharded_input, right_halo), op::Constant())),
op::Reshape(), op::Constant());
auto index_in_padded = op::Add(
op::Iota(), op::Broadcast(op::Multiply(op::Reshape(), op::Constant())));
auto masked =
op::Select(op::Compare(index_in_padded, op::Broadcast(op::Constant())),
pre_masking, op::Broadcast(op::Constant()));
EXPECT_THAT(root, AllOf(op::Shape("f32[2,2]{1,0}"),
op::ReduceWindow(masked, op::Constant())));
}
TEST_P(SpmdPartitioningTest, ReduceWindowTiledOneSideHaloBeyondNeighbor) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
param = f32[9,2] parameter(0), sharding={devices=[5,1]0,1,2,3,4}
constant.1 = f32[] constant(0), sharding={replicated}
ROOT reduce-window = f32[5,2]{1,0} reduce-window(param, constant.1),
window={size=4x1 stride=2x1 pad=3_0x0_0}, to_apply=sum,
sharding={devices=[5,1]0,1,2,3,4}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 5));
VLOG(1) << module->ToString();
auto halo0 = AllOf(op::Shape("f32[1,2]"),
op::CollectivePermute(op::Slice(op::Parameter(0))));
auto halo1 =
AllOf(op::Shape("f32[2,2]"), op::CollectivePermute(op::Parameter(0)));
auto pre_mask =
AllOf(op::Shape("f32[4,2]"),
op::Concatenate(halo0, halo1, op::Slice(op::Parameter(0))));
auto masked =
op::Select(op::Compare(op::Add(op::Iota(), op::Broadcast(op::Multiply())),
op::Broadcast(op::Constant())),
pre_mask, op::Broadcast(op::Constant()));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[1,2]{1,0}"),
op::ReduceWindow(masked, op::Constant())));
}
TEST_P(SpmdPartitioningTest, ReduceWindowTiledOneSideUnequalHalo) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
constant = f32[9,2]{1,0} constant(
{{1,1},{1,4},{2,1},{3,1},{1,2},{2,2},{4,1},{1,2},{2,1}}),
sharding={devices=[3,1]0,1,2}
constant.1 = f32[] constant(0), sharding={replicated}
ROOT reduce-window = f32[5,2]{1,0} reduce-window(constant, constant.1),
window={size=3x1 stride=2x1 pad=1_1x0_0}, to_apply=sum,
sharding={devices=[3,1]0,1,2}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 3));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
auto sharded_input =
op::DynamicSlice(op::Constant(), op::Reshape(), op::Constant());
auto right_halo = AllOf(op::Shape("f32[2,2]{1,0}"),
op::CollectivePermute(op::Slice(sharded_input)));
auto pre_masking = op::DynamicSlice(
AllOf(
op::Shape("f32[7,2]{1,0}"),
op::Pad(op::Concatenate(sharded_input, right_halo), op::Constant())),
op::Reshape(), op::Constant());
auto index_in_padded = op::Add(
op::Iota(), op::Broadcast(op::Multiply(op::Reshape(), op::Constant())));
auto masked = op::Select(
op::And(op::Compare(index_in_padded, op::Broadcast(op::Constant())),
op::Compare(index_in_padded, op::Broadcast(op::Constant()))),
pre_masking, op::Broadcast(op::Constant()));
EXPECT_THAT(root, AllOf(op::Shape("f32[2,2]{1,0}"),
op::ReduceWindow(masked, op::Constant())));
}
TEST_P(SpmdPartitioningTest, ReduceWindowTiledTwoSideHalo) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
constant = f32[4,2]{1,0} constant({{1,1},{1,4},{2,1},{3,1}}),
sharding={devices=[2,1]0,1}
constant.1 = f32[] constant(0), sharding={replicated}
ROOT reduce-window = f32[2,2]{1,0} reduce-window(constant, constant.1),
window={size=5x1 stride=3x1 pad=2_2x0_0}, to_apply=sum,
sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
auto sharded_input =
op::DynamicSlice(op::Constant(), op::Reshape(), op::Constant());
auto left_halo = AllOf(op::Shape("f32[1,2]{1,0}"),
op::CollectivePermute(op::Slice(sharded_input)));
auto right_halo = AllOf(op::Shape("f32[1,2]{1,0}"),
op::CollectivePermute(op::Slice(sharded_input)));
auto pre_masking = AllOf(
op::Shape("f32[5,2]{1,0}"),
op::DynamicSlice(
AllOf(op::Shape("f32[6,2]{1,0}"),
op::Pad(op::Concatenate(left_halo, sharded_input, right_halo),
op::Constant())),
op::Reshape(), op::Constant()));
auto index_in_padded = op::Add(
op::Iota(), op::Broadcast(op::Multiply(op::Reshape(), op::Constant())));
auto masked = op::Select(
op::And(op::Compare(index_in_padded, op::Broadcast(op::Constant())),
op::Compare(index_in_padded, op::Broadcast(op::Constant()))),
pre_masking, op::Broadcast(op::Constant()));
EXPECT_THAT(root, AllOf(op::Shape("f32[1,2]{1,0}"),
op::ReduceWindow(masked, op::Constant())));
}
TEST_P(SpmdPartitioningTest, ReduceWindowTiled2D) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
token0 = token[] after-all(), sharding={maximal device=0}
infeed = (f32[4,4,2,2]{3,2,1,0}, token[]) infeed(token0),
sharding={{devices=[2,2,1,1]<=[4]}, {maximal device=0}}
infeed.data = f32[4,4,2,2]{3,2,1,0} get-tuple-element(infeed), index=0,
sharding={devices=[2,2,1,1]<=[4]}
constant = f32[] constant(0), sharding={replicated}
ROOT reduce-window = f32[2,2,2,2]{3,2,1,0} reduce-window(infeed.data, constant),
window={size=5x5x1x1 stride=3x3x1x1 pad=2_2x2_2x0_0x0_0}, to_apply=sum,
sharding={devices=[2,2,1,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
auto sharded_input = AllOf(op::Shape("f32[2,2,2,2]{3,2,1,0}"),
op::GetTupleElement(op::Infeed()));
auto dim0_left_halo = AllOf(op::Shape("f32[1,2,2,2]{3,2,1,0}"),
op::CollectivePermute(op::Slice(sharded_input)));
auto dim0_right_halo = AllOf(op::Shape("f32[1,2,2,2]{3,2,1,0}"),
op::CollectivePermute(op::Slice(sharded_input)));
auto dim0_pre_masking = op::DynamicSlice(
AllOf(op::Shape("f32[6,2,2,2]{3,2,1,0}"),
op::Pad(
op::Concatenate(dim0_left_halo, sharded_input, dim0_right_halo),
op::Constant())),
op::Reshape(), op::Constant(), op::Constant(), op::Constant());
auto dim0_index_in_padded = op::Add(
op::Iota(), op::Broadcast(op::Multiply(op::Reshape(), op::Constant())));
auto dim0_masked = op::Select(
op::And(op::Compare(dim0_index_in_padded, op::Broadcast(op::Constant())),
op::Compare(dim0_index_in_padded, op::Broadcast(op::Constant()))),
dim0_pre_masking, op::Broadcast(op::Constant()));
auto dim0_resharded = AllOf(op::Shape("f32[5,2,2,2]{3,2,1,0}"), dim0_masked);
auto dim1_left_halo = AllOf(op::Shape("f32[5,1,2,2]{3,2,1,0}"),
op::CollectivePermute(op::Slice(dim0_resharded)));
auto dim1_right_halo =
AllOf(op::Shape("f32[5,1,2,2]{3,2,1,0}"),
op::CollectivePermute(op::Slice(dim0_resharded)));
auto dim1_pre_masking = op::DynamicSlice(
AllOf(op::Shape("f32[5,6,2,2]{3,2,1,0}"),
op::Pad(op::Concatenate(dim1_left_halo, dim0_resharded,
dim1_right_halo),
op::Constant())),
op::Constant(), op::Reshape(), op::Constant(), op::Constant());
auto dim1_index_in_padded = op::Add(
op::Iota(), op::Broadcast(op::Multiply(op::Reshape(), op::Constant())));
auto dim1_masked = op::Select(
op::And(op::Compare(dim1_index_in_padded, op::Broadcast(op::Constant())),
op::Compare(dim1_index_in_padded, op::Broadcast(op::Constant()))),
dim1_pre_masking, op::Broadcast(op::Constant()));
auto dim1_resharded = AllOf(op::Shape("f32[5,5,2,2]{3,2,1,0}"), dim1_masked);
EXPECT_THAT(root, AllOf(op::Shape("f32[1,1,2,2]{3,2,1,0}"),
op::ReduceWindow(dim1_resharded, op::Constant())));
}
TEST_P(SpmdPartitioningTest, ConvolutionLhsTiledRhsReplicated) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,224,224,3] parameter(0)
%lhs.copy = f32[128,224,224,3] copy(f32[128,224,224,3] %lhs),
sharding={devices=[1,2,1,1]0,1}
%rhs = f32[7,7,3,64] parameter(1)
%rhs.copy = f32[7,7,3,64] copy(f32[7,7,3,64] %rhs),
sharding={replicated}
ROOT %conv = f32[128,112,112,64] convolution(
f32[128,224,224,3] %lhs.copy,
f32[7,7,3,64] %rhs.copy),
window={size=7x7 stride=2x2 pad=3_3x3_3},
dim_labels=b01f_01io->b01f,
sharding={devices=[1,2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,112,224,3]"));
const auto rhs = AllOf(op::Copy(op::Parameter()), op::Shape("f32[7,7,3,64]"));
auto left_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[128,3,224,3]"));
auto right_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[128,2,224,3]"));
EXPECT_THAT(root,
AllOf(op::Convolution(
op::Select(op::And(),
op::Concatenate(left_halo, lhs, right_halo),
op::Broadcast()),
rhs),
op::Shape("f32[128,56,112,64]")));
}
TEST_P(SpmdPartitioningTest, ConvolutionLhsTiledRhsReplicatedNeedReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,224,224,3] parameter(0)
%lhs.copy = f32[128,224,224,3] copy(f32[128,224,224,3] %lhs),
sharding={devices=[2,1,1,1]0,1}
%rhs = f32[7,7,3,64] parameter(1)
%rhs.copy = f32[7,7,3,64] copy(f32[7,7,3,64] %rhs),
sharding={replicated}
ROOT %conv = f32[128,112,112,64] convolution(
f32[128,224,224,3] %lhs.copy,
f32[7,7,3,64] %rhs.copy),
window={size=7x7 stride=2x2 pad=3_3x3_3},
dim_labels=b01f_01io->b01f,
sharding={devices=[1,2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(), op::Constant(),
op::Constant(), op::Constant())),
op::Shape("f32[64,224,224,3]"));
auto all_to_all =
AllOf(op::AllToAll(op::Reshape(lhs)), op::Shape("f32[64,2,112,224,3]"));
auto reshard_lhs = AllOf(op::Reshape(op::Transpose(all_to_all)),
op::Shape("f32[128,112,224,3]"));
const auto rhs = AllOf(op::Copy(op::Parameter()), op::Shape("f32[7,7,3,64]"));
auto left_halo = AllOf(op::CollectivePermute(op::Slice(reshard_lhs)),
op::Shape("f32[128,3,224,3]"));
auto right_halo = AllOf(op::CollectivePermute(op::Slice(reshard_lhs)),
op::Shape("f32[128,2,224,3]"));
EXPECT_THAT(
root,
AllOf(op::Convolution(
op::Select(op::And(),
op::Concatenate(left_halo, reshard_lhs, right_halo),
op::Broadcast()),
rhs),
op::Shape("f32[128,56,112,64]")));
}
TEST_P(SpmdPartitioningTest, ConvolutionLhsTiledRhsReplicatedReordered) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[224,224,3,128] parameter(0)
%lhs.copy = f32[224,224,3,128] copy(%lhs), sharding={devices=[2,1,1,1]0,1}
%rhs = f32[7,7,3,64] parameter(1)
%rhs.copy = f32[7,7,3,64] copy(%rhs), sharding={replicated}
ROOT %conv = f32[128,112,112,64] convolution(%lhs.copy, %rhs.copy),
window={size=7x7 stride=2x2 pad=3_3x3_3},
dim_labels=01fb_01io->b01f,
sharding={devices=[1,2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(), op::Constant(),
op::Constant(), op::Constant())),
op::Shape("f32[112,224,3,128]"));
const auto rhs = AllOf(op::Copy(op::Parameter()), op::Shape("f32[7,7,3,64]"));
auto left_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[3,224,3,128]"));
auto right_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[2,224,3,128]"));
EXPECT_THAT(root,
AllOf(op::Convolution(
op::Select(op::And(),
op::Concatenate(left_halo, lhs, right_halo),
op::Broadcast()),
rhs),
op::Shape("f32[128,56,112,64]")));
}
TEST_P(SpmdPartitioningTest,
ConvolutionBaseDilationSameStartPatternLhsTiledRhsReplicated) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,7,7,512] parameter(0)
%lhs.copy = f32[128,7,7,512] copy(%lhs),
sharding={devices=[1,2,1,1]0,1}
%rhs = f32[3,3,512,512] parameter(1)
%rhs.copy = f32[3,3,512,512] copy(%rhs),
sharding={replicated}
ROOT %conv = f32[128,4,4,512] convolution(%lhs.copy, %rhs.copy),
window={size=3x3 stride=4x4 pad=1_1x1_1 lhs_dilate=2x2 rhs_reversal=1x1},
dim_labels=b01f_01io->b01f,
sharding={devices=[1,2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto sliced_lhs =
AllOf(op::Slice(op::Copy(op::DynamicSlice(
op::Pad(op::Parameter(), op::Constant()), op::Constant(),
op::Reshape(), op::Constant(), op::Constant()))),
op::Shape("f32[128,3,7,512]"));
const auto rhs =
AllOf(op::Copy(op::Parameter()), op::Shape("f32[3,3,512,512]"));
EXPECT_THAT(root, AllOf(op::Convolution(sliced_lhs, rhs),
op::Shape("f32[128,2,4,512]")));
EXPECT_EQ(root->window().dimensions(0).padding_low(), 1);
EXPECT_EQ(root->window().dimensions(0).padding_high(), 1);
}
TEST_P(SpmdPartitioningTest,
ConvolutionBaseDilationStride1LhsTiledRhsReplicated) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,7,7,512] parameter(0)
%lhs.copy = f32[128,7,7,512] copy(%lhs),
sharding={devices=[1,2,1,1]0,1}
%rhs = f32[3,3,512,512] parameter(1)
%rhs.copy = f32[3,3,512,512] copy(%rhs),
sharding={replicated}
ROOT %conv = f32[128,14,14,512] convolution(%lhs.copy, %rhs.copy),
window={size=3x3 pad=1_2x1_2 lhs_dilate=2x2 rhs_reversal=1x1},
dim_labels=b01f_01io->b01f,
sharding={devices=[1,2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,4,7,512]"));
const auto rhs =
AllOf(op::Copy(op::Parameter()), op::Shape("f32[3,3,512,512]"));
auto left_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[128,1,7,512]"));
auto start_window = op::Multiply(op::Reshape(), op::Constant());
auto start_input_element = op::Divide(start_window, op::Constant());
auto dynamic_offset_for_padded_concat = op::Subtract(
op::Constant(), op::Subtract(op::Multiply(op::Reshape(), op::Constant()),
start_input_element));
auto pre_masking =
AllOf(op::Shape("f32[128,5,7,512]"),
op::DynamicSlice(
AllOf(op::Shape("f32[128,6,7,512]"),
op::Pad(op::Concatenate(left_halo, lhs), op::Constant())),
op::Constant(), dynamic_offset_for_padded_concat,
op::Constant(), op::Constant()));
auto masked = op::Select(
op::Compare(op::Add(op::Iota(), op::Broadcast(start_input_element)),
op::Broadcast(op::Constant())),
pre_masking, op::Broadcast(op::Constant()));
auto dynamic_offset_on_output = op::Subtract(
start_window, op::Multiply(start_input_element, op::Constant()));
EXPECT_THAT(root,
AllOf(op::DynamicSlice(AllOf(op::Convolution(masked, rhs),
op::Shape("f32[128,8,14,512]")),
op::Constant(), dynamic_offset_on_output,
op::Constant(), op::Constant()),
op::Shape("f32[128,7,14,512]")));
EXPECT_EQ(root->operand(0)->window().dimensions(0).padding_low(), 1);
EXPECT_EQ(root->operand(0)->window().dimensions(0).padding_high(), 0);
}
TEST_P(SpmdPartitioningTest, SelectAndScatterNoOverlap) {
absl::string_view hlo_string = R"(
HloModule module
ge {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT compare = pred[] compare(a, b), direction=GE
}
sum {
c = f32[] parameter(0)
d = f32[] parameter(1)
ROOT add = f32[] add(c, d)
}
ENTRY entry {
%param = f32[11,4]{1,0} parameter(0)
%param.copy = f32[11,4] copy(%param), sharding={devices=[4,1]<=[4]}
constant = f32[4,2]{1,0} constant({{1,2},{3,4},{1,0},{2,8}}),
sharding={devices=[4,1]<=[4]}
constant.1 = f32[] constant(0), sharding={replicated}
ROOT select-and-scatter = f32[11,4]{1,0} select-and-scatter(param.copy,
constant, constant.1), window={size=3x2 stride=3x2 pad=0_1x0_0},
select=ge, scatter=sum, sharding={devices=[4,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto source =
AllOf(op::Shape("f32[1,2]{1,0}"),
op::DynamicSlice(op::Constant(), op::Reshape(), op::Constant()));
auto masked_data = AllOf(
op::Shape("f32[3,4]{1,0}"),
op::Select(
op::Compare(op::Add(op::Iota(), op::Broadcast(op::Multiply(
op::Reshape(), op::Constant()))),
op::Broadcast(op::Constant())),
op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Reshape(), op::Constant())),
op::Broadcast(op::Constant())));
EXPECT_THAT(root,
AllOf(op::SelectAndScatter(masked_data, source, op::Constant()),
op::Shape("f32[3,4]{1,0}")));
EXPECT_EQ(root->window().dimensions(0).padding_low(), 0);
EXPECT_EQ(root->window().dimensions(0).padding_high(), 0);
}
TEST_P(SpmdPartitioningTest, SelectAndScatterNoOverlapReshard) {
absl::string_view hlo_string = R"(
HloModule module
ge {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT compare = pred[] compare(a, b), direction=GE
}
sum {
c = f32[] parameter(0)
d = f32[] parameter(1)
ROOT add = f32[] add(c, d)
}
ENTRY entry {
%param = f32[11,4]{1,0} parameter(0)
%param.copy = f32[11,4] copy(%param),
sharding={devices=[1,4]<=[4]}
constant = f32[4,2]{1,0} constant({{1,2},{3,4},{1,0},{2,8}}),
sharding={devices=[4,1]<=[4]}
constant.1 = f32[] constant(0), sharding={replicated}
ROOT select-and-scatter = f32[11,4]{1,0} select-and-scatter(param.copy,
constant, constant.1), window={size=3x2 stride=3x2 pad=0_1x0_0},
select=ge, scatter=sum, sharding={devices=[4,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto source =
AllOf(op::Shape("f32[1,2]{1,0}"),
op::DynamicSlice(op::Constant(), op::Reshape(), op::Constant()));
auto operand = AllOf(op::Copy(op::DynamicSlice(
op::Parameter(0), op::Constant(), op::Reshape())),
op::Shape("f32[11,1]"));
auto reshard_operand = op::Reshape(op::Transpose(
op::AllToAll(op::Reshape(op::Pad(operand, op::Constant())))));
auto masked_data = AllOf(
op::Shape("f32[3,4]{1,0}"),
op::Select(
op::Compare(op::Add(op::Iota(), op::Broadcast(op::Multiply(
op::Reshape(), op::Constant()))),
op::Broadcast(op::Constant())),
reshard_operand, op::Broadcast(op::Constant())));
EXPECT_THAT(root,
AllOf(op::SelectAndScatter(masked_data, source, op::Constant()),
op::Shape("f32[3,4]{1,0}")));
EXPECT_EQ(root->window().dimensions(0).padding_low(), 0);
EXPECT_EQ(root->window().dimensions(0).padding_high(), 0);
}
TEST_P(SpmdPartitioningTest, SelectAndScatterWithOverlap) {
absl::string_view hlo_string = R"(
HloModule module
ge {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT compare = pred[] compare(a, b), direction=GE
}
sum {
c = f32[] parameter(0)
d = f32[] parameter(1)
ROOT add = f32[] add(c, d)
}
ENTRY entry {
%param = f32[11,4]{1,0} parameter(0)
%param.copy = f32[11,4] copy(%param),
sharding={devices=[4,1]<=[4]}
constant = f32[6,2]{1,0} constant({{1,2},{3,4},{1,0},{2,8},{6,6},{1,9}}),
sharding={devices=[4,1]<=[4]}
constant.1 = f32[] constant(0), sharding={replicated}
ROOT select-and-scatter = f32[11,4]{1,0} select-and-scatter(param.copy,
constant, constant.1), window={size=3x2 stride=2x2 pad=1_1x0_0},
select=ge, scatter=sum, sharding={devices=[4,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto source_shard =
AllOf(op::Shape("f32[2,2]{1,0}"),
op::DynamicSlice(op::Pad(), op::Reshape(), op::Constant()));
auto source_left_halo = op::CollectivePermute(source_shard);
auto required_source_shard_start =
op::Divide(op::Multiply(op::Reshape(), op::Constant()), op::Constant());
auto source_with_halo = op::DynamicSlice(
AllOf(op::Shape("f32[5,2]{1,0}"),
op::Pad(op::Concatenate(source_left_halo, source_shard),
op::Constant())),
op::Subtract(op::Constant(),
op::Subtract(op::Multiply(op::Reshape(), op::Constant()),
required_source_shard_start)),
op::Constant());
auto masked_source_with_halo = AllOf(
AllOf(op::Shape("f32[3,2]{1,0}")),
op::Select(
op::Compare(
op::Add(op::Iota(), op::Broadcast(required_source_shard_start)),
op::Broadcast(op::Constant())),
source_with_halo, op::Broadcast(op::Constant())));
auto data_shard =
AllOf(op::Shape("f32[3,4]{1,0}"),
op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Reshape(), op::Constant())));
auto data_left_halo = AllOf(op::Shape("f32[2,4]{1,0}"),
op::CollectivePermute(op::Slice(data_shard)));
auto data_right_halo = AllOf(op::Shape("f32[2,4]{1,0}"),
op::CollectivePermute(op::Slice(data_shard)));
auto required_data_start_on_padded =
op::Multiply(required_source_shard_start, op::Constant());
auto left_halo_size = op::Subtract(
op::Add(op::Multiply(op::Reshape(), op::Constant()), op::Constant()),
required_data_start_on_padded);
auto data_with_halo =
AllOf(op::Shape("f32[7,4]{1,0}"),
op::DynamicSlice(
AllOf(op::Shape("f32[8,4]{1,0}"),
op::Pad(op::Concatenate(data_left_halo, data_shard,
data_right_halo),
op::Constant())),
op::Subtract(op::Constant(), left_halo_size), op::Constant()));
auto index_on_padded =
op::Add(op::Iota(), op::Broadcast(required_data_start_on_padded));
auto masked_data_with_halo = op::Select(
op::And(op::Compare(index_on_padded, op::Broadcast(op::Constant())),
op::Compare(index_on_padded, op::Broadcast(op::Constant()))),
data_with_halo, op::Broadcast(op::Constant()));
EXPECT_THAT(
root, AllOf(op::DynamicSlice(op::SelectAndScatter(masked_data_with_halo,
masked_source_with_halo,
op::Constant()),
left_halo_size, op::Constant()),
op::Shape("f32[3,4]{1,0}")));
EXPECT_EQ(root->operand(0)->window().dimensions(0).padding_low(), 0);
EXPECT_EQ(root->operand(0)->window().dimensions(0).padding_high(), 0);
}
TEST_P(SpmdPartitioningTest, ConvolutionLhsTiledRhsTiled) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,56,56,64] parameter(0)
%lhs.copy = f32[128,56,56,64] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[128,56,56,256] parameter(1)
%rhs.copy = f32[128,56,56,256] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
ROOT %conv = f32[1,1,64,256] convolution(%lhs.copy, %rhs.copy),
window={size=56x56}, dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,28,56,64]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,28,56,256]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::Convolution(lhs, rhs)),
op::Shape("f32[1,1,64,256]")));
}
TEST_P(SpmdPartitioningTest,
ConvolutionLhsTiledRhsTiledWhenV2ShardingGeneratesReplicaGroupV2) {
if (GetParam() != ShardingFormatPicker::ShardingType::kBestEffortV2) {
GTEST_SKIP() << "This test only runs when input sharding is in V2 format.";
}
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,56,56,64] parameter(0)
%lhs.copy = f32[128,56,56,64] copy(%lhs), sharding={devices=[1,8,1,1]<=[8]}
%rhs = f32[128,56,56,256] parameter(1)
%rhs.copy = f32[128,56,56,256] copy(%rhs), sharding={devices=[1,8,1,1]<=[8]}
ROOT %conv = f32[1,1,64,256] convolution(%lhs.copy, %rhs.copy),
window={size=56x56}, dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto all_reduce_instruction =
std::find_if(module->entry_computation()->instructions().begin(),
module->entry_computation()->instructions().end(),
HloPredicateIsOp<HloOpcode::kAllReduce>);
EXPECT_NE(all_reduce_instruction,
module->entry_computation()->instructions().end());
EXPECT_TRUE((*all_reduce_instruction)
->device_list()
.iota_replica_group_list()
.has_value());
IotaReplicaGroupList list = (*all_reduce_instruction)
->device_list()
.iota_replica_group_list()
.value();
EXPECT_EQ(list.num_replica_groups(), 1);
EXPECT_EQ(list.num_devices_per_group(), 8);
EXPECT_THAT(list.reshape_dims(), ::testing::ElementsAre(8));
EXPECT_THAT(list.transpose_perm(), ::testing::ElementsAre(0));
}
TEST_P(SpmdPartitioningTest, ConvolutionLhsTiledRhsTiledWindowReversal) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[5,128,64] parameter(0), sharding={devices=[2,1,1]0,1}
%rhs = f32[5,128,256] parameter(1), sharding={devices=[2,1,1]1,0}
ROOT %conv = f32[1,64,256] convolution(%lhs, %rhs),
window={size=5 rhs_reversal=1}, dim_labels=0fb_0io->0bf,
sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto lhs_masked =
AllOf(op::Shape("f32[3,128,64]"), op::Select(_, op::Parameter(0), _));
const auto rhs_left_padded =
op::Concatenate(op::CollectivePermute(op::Slice(op::Parameter(1))),
op::Slice(op::Parameter(1)));
const auto rhs_masked =
AllOf(op::Shape("f32[3,128,256]"), op::Select(_, rhs_left_padded, _));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::AllReduce(op::Convolution(lhs_masked, rhs_masked)),
op::Shape("f32[1,64,256]")));
}
TEST_P(SpmdPartitioningTest, DotLhsTiledRhsTiledWithReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,56,56,64] parameter(0)
%lhs.copy = f32[128,56,56,64] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[128,56,56,256] parameter(1)
%rhs.copy = f32[128,56,56,256] copy(%rhs), sharding={devices=[2,1,1,1]0,1}
ROOT %conv = f32[1,1,64,256] convolution(%lhs.copy, %rhs.copy),
window={size=56x56}, dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,28,56,64]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(), op::Constant(),
op::Constant(), op::Constant())),
op::Shape("f32[64,56,56,256]"));
auto all_to_all =
AllOf(op::AllToAll(op::Reshape(lhs)), op::Shape("f32[2,64,28,56,64]"));
auto reshard = AllOf(op::Reshape(op::Transpose(all_to_all)));
EXPECT_THAT(root, AllOf(op::AllReduce(op::Convolution(reshard, rhs)),
op::Shape("f32[1,1,64,256]")));
}
TEST_P(SpmdPartitioningTest, ConvolutionLhsTiledRhsTiledWithReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,56,56,512] parameter(0)
%lhs.copy = f32[128,56,56,512] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[128,28,28,64] parameter(1)
%rhs.copy = f32[128,28,28,64] copy(%rhs), sharding={devices=[2,1,1,1]0,1}
ROOT %conv = f32[1,1,512,64] convolution(%lhs.copy, %rhs.copy),
window={size=28x28 pad=0_-1x0_-1 rhs_dilate=2x2},
dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,28,56,512]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(), op::Constant(),
op::Constant(), op::Constant())),
op::Shape("f32[64,28,28,64]"));
auto all_to_all =
AllOf(op::AllToAll(op::Reshape(rhs)), op::Shape("f32[64,2,14,28,64]"));
auto reshard = op::Reshape(op::Transpose(all_to_all));
EXPECT_THAT(root,
AllOf(op::AllReduce(op::Convolution(op::Slice(lhs), reshard)),
op::Shape("f32[1,1,512,64]")));
}
TEST_P(SpmdPartitioningTest,
ConvolutionLhsTiledRhsTiled_UnevenDilatedRHSPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[8,28,28,8] parameter(0)
%lhs.copy = f32[8,28,28,8] copy(%lhs), sharding={devices=[1,4,1,1]<=[4]}
%rhs = f32[8,14,14,64] parameter(1)
%rhs.copy = f32[8,14,14,64] copy(%rhs), sharding={devices=[1,4,1,1]<=[4]}
ROOT %conv = f32[1,1,8,64] convolution(%lhs.copy, %rhs.copy),
window={size=14x14 pad=0_-1x0_-1 rhs_dilate=2x2},
dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[8,7,28,8]"));
const auto rhs = AllOf(op::Pad(op::Parameter(), op::Constant()),
op::Shape("f32[8,16,14,64]"));
auto selected_rhs = AllOf(
op::Select(op::Compare(),
op::Copy(op::DynamicSlice(rhs, op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Broadcast()),
op::Shape("f32[8,4,14,64]"));
auto right_halo =
AllOf(op::CollectivePermute(op::Slice(lhs)), op::Shape("f32[8,2,28,8]"));
auto selected_lhs =
AllOf(op::DynamicSlice(
op::Pad(op::Concatenate(lhs, right_halo), op::Constant()),
op::Constant(), op::Reshape(), op::Constant(), op::Constant()),
op::Shape("f32[8,7,28,8]"));
EXPECT_THAT(root,
AllOf(op::AllReduce(op::Convolution(selected_lhs, selected_rhs)),
op::Shape("f32[1,1,8,64]")));
}
TEST_P(SpmdPartitioningTest, ConvolutionLhsTiledRhsTiledWithPadding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,28,28,128] parameter(0)
%lhs.copy = f32[32,28,28,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,28,28,64] parameter(1)
%rhs.copy = f32[32,28,28,64] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
ROOT %conv = f32[3,3,128,64] convolution(%lhs.copy, %rhs.copy),
window={size=28x28 pad=1_1x1_1}, dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
false));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,14,28,128]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,14,28,64]"));
auto left_halo = AllOf(op::CollectivePermute(op::Slice(rhs)),
op::Shape("f32[32,1,28,64]"));
auto right_halo = AllOf(op::CollectivePermute(op::Slice(rhs)),
op::Shape("f32[32,1,28,64]"));
EXPECT_THAT(root,
AllOf(op::AllReduce(op::Convolution(
lhs, AllOf(op::Concatenate(left_halo, rhs, right_halo),
op::Shape("f32[32,16,28,64]")))),
op::Shape("f32[3,3,128,64]")));
}
TEST_P(SpmdPartitioningTest, ConvolutionLhsTiledRhsTiledWindowDilate) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,224,224,3] parameter(0)
%lhs.copy = f32[128,224,224,3] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[128,112,112,64] parameter(1)
%rhs.copy = f32[128,112,112,64] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
ROOT %conv = f32[7,7,3,64] convolution(%lhs.copy, %rhs.copy),
window={size=112x112 pad=3_2x3_2 rhs_dilate=2x2}, dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
false));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,112,224,3]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,56,112,64]"));
auto left_halo = AllOf(op::CollectivePermute(op::Slice(rhs)),
op::Shape("f32[128,2,112,64]"));
auto right_halo = AllOf(op::CollectivePermute(op::Slice(rhs)),
op::Shape("f32[128,2,112,64]"));
EXPECT_THAT(root,
AllOf(op::AllReduce(op::Convolution(
lhs, AllOf(op::Concatenate(left_halo, rhs, right_halo),
op::Shape("f32[128,60,112,64]")))),
op::Shape("f32[7,7,3,64]")));
}
TEST_P(SpmdPartitioningTest,
ConvolutionLhsTiledRhsTiledWindowDilateNegativeRhsPadding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,56,56,256] parameter(0)
%lhs.copy = f32[128,56,56,256] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[128,28,28,512] parameter(1)
%rhs.copy = f32[128,28,28,512] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
ROOT %conv = f32[1,1,256,512] convolution(%lhs.copy, %rhs.copy),
window={size=28x28 pad=0_-1x0_-1 rhs_dilate=2x2}, dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
false));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,28,56,256]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,14,28,512]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::Convolution(lhs, rhs)),
op::Shape("f32[1,1,256,512]")));
}
TEST_P(SpmdPartitioningTest, ConvolutionLhsTiledRhsTiledWindowDilateUneven) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,14,14,512] parameter(0)
%lhs.copy = f32[128,14,14,512] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[128,7,7,512] parameter(1)
%rhs.copy = f32[128,7,7,512] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
ROOT %conv = f32[3,3,512,512] convolution(%lhs.copy, %rhs.copy),
window={size=7x7 pad=1_0x1_0 rhs_dilate=2x2}, dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
false));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,7,14,512]"));
const auto rhs = AllOf(
op::Select(op::Compare(),
op::Copy(op::DynamicSlice(
op::Pad(op::Parameter(), op::Constant()), op::Constant(),
op::Reshape(), op::Constant(), op::Constant())),
op::Broadcast()),
op::Shape("f32[128,4,7,512]"));
auto left_halo = AllOf(op::CollectivePermute(op::Slice(rhs)),
op::Shape("f32[128,1,7,512]"));
EXPECT_THAT(root,
AllOf(op::AllReduce(op::Convolution(
AllOf(op::DynamicSlice(op::Pad(lhs, op::Constant()),
op::Constant(), op::Subtract(),
op::Constant(), op::Constant()),
op::Shape("f32[128,10,14,512]")),
AllOf(op::Concatenate(left_halo, rhs),
op::Shape("f32[128,5,7,512]")))),
op::Shape("f32[3,3,512,512]")));
}
TEST_P(SpmdPartitioningTest, ConvolutionLhsTiledRhsTiledWithPadding_HaloOnLhs) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,28,28,128] parameter(0)
%lhs.copy = f32[32,28,28,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,28,28,64] parameter(1)
%rhs.copy = f32[32,28,28,64] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
ROOT %conv = f32[3,3,128,64] convolution(%lhs.copy, %rhs.copy),
window={size=28x28 pad=1_1x1_1}, dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,14,28,128]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,14,28,64]"));
auto left_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[32,1,28,128]"));
auto right_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[32,1,28,128]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::Convolution(
AllOf(op::Concatenate(left_halo, lhs, right_halo),
op::Shape("f32[32,16,28,128]")),
rhs)),
op::Shape("f32[3,3,128,64]")));
}
TEST_P(SpmdPartitioningTest,
ConvolutionLhsTiledRhsTiledWindowDilate_HaloOnLhs) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,224,224,3] parameter(0)
%lhs.copy = f32[128,224,224,3] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[128,112,112,64] parameter(1)
%rhs.copy = f32[128,112,112,64] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
ROOT %conv = f32[7,7,3,64] convolution(%lhs.copy, %rhs.copy),
window={size=112x112 pad=3_2x3_2 rhs_dilate=2x2}, dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,112,224,3]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,56,112,64]"));
auto left_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[128,3,224,3]"));
auto right_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[128,2,224,3]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::Convolution(
AllOf(op::Concatenate(left_halo, lhs, right_halo),
op::Shape("f32[128,117,224,3]")),
rhs)),
op::Shape("f32[7,7,3,64]")));
}
TEST_P(SpmdPartitioningTest,
ConvolutionLhsTiledRhsTiledWindowDilateNegativeRhsPadding_HaloOnLhs) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,56,56,256] parameter(0)
%lhs.copy = f32[128,56,56,256] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[128,28,28,512] parameter(1)
%rhs.copy = f32[128,28,28,512] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
ROOT %conv = f32[1,1,256,512] convolution(%lhs.copy, %rhs.copy),
window={size=28x28 pad=0_-1x0_-1 rhs_dilate=2x2}, dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,28,56,256]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,14,28,512]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::Convolution(op::Slice(lhs), rhs)),
op::Shape("f32[1,1,256,512]")));
}
TEST_P(SpmdPartitioningTest,
ConvolutionLhsTiledRhsTiledWindowDilateUneven_HaloOnLhs) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,14,14,512] parameter(0)
%lhs.copy = f32[128,14,14,512] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[128,7,7,512] parameter(1)
%rhs.copy = f32[128,7,7,512] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
ROOT %conv = f32[3,3,512,512] convolution(%lhs.copy, %rhs.copy),
window={size=7x7 pad=1_0x1_0 rhs_dilate=2x2}, dim_labels=f01b_i01o->01bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[128,7,14,512]"));
const auto rhs = AllOf(
op::Select(op::Compare(),
op::Copy(op::DynamicSlice(
op::Pad(op::Parameter(), op::Constant()), op::Constant(),
op::Reshape(), op::Constant(), op::Constant())),
op::Broadcast()),
op::Shape("f32[128,4,7,512]"));
auto right_halo = AllOf(op::CollectivePermute(op::Slice(lhs)),
op::Shape("f32[128,1,14,512]"));
EXPECT_THAT(
root, AllOf(op::AllReduce(op::Convolution(
AllOf(op::DynamicSlice(
AllOf(op::Pad(op::Concatenate(lhs, right_halo),
op::Constant()),
op::Shape("f32[128,10,14,512]")),
op::Constant(), op::Reshape(), op::Constant(),
op::Constant()),
op::Shape("f32[128,9,14,512]")),
rhs)),
op::Shape("f32[3,3,512,512]")));
}
TEST_P(SpmdPartitioningTest, ConcatenateAlongNonPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[14,257] parameter(0)
%param0.copy = f32[14,257] copy(%param0), sharding={devices=[2,1]0,1}
%param1 = f32[14,116] parameter(1)
%param1.copy = f32[14,116] copy(%param1), sharding={devices=[2,1]0,1}
ROOT %concatenate = f32[14,373] concatenate(%param0.copy, %param1.copy),
dimensions={1}, sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(),
op::Constant())),
op::Shape("f32[7,257]"));
auto param1 = AllOf(op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(),
op::Constant())),
op::Shape("f32[7,116]"));
EXPECT_THAT(root,
AllOf(op::Concatenate(param0, param1), op::Shape("f32[7,373]")));
}
TEST_P(SpmdPartitioningTest, ConcatenateAlongPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[14,257] parameter(0)
%param0.copy = f32[14,257] copy(%param0), sharding={devices=[1,2]0,1}
%param1 = f32[14,116] parameter(1)
%param1.copy = f32[14,116] copy(%param1), sharding={devices=[1,2]0,1}
ROOT %concatenate = f32[14,373] concatenate(%param0.copy, %param1.copy),
dimensions={1}, sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Constant(), op::Reshape())),
op::Shape("f32[14,129]"));
auto param0_adjusted =
AllOf(op::Select(op::Compare(op::Add(), op::Broadcast(op::Constant())),
param0, op::Broadcast(op::Constant())),
op::Shape("f32[14,129]"));
auto param1 = AllOf(op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(),
op::Reshape())),
op::Shape("f32[14,58]"));
EXPECT_THAT(root, AllOf(op::DynamicSlice(
AllOf(op::AllReduce(op::DynamicUpdateSlice(
op::DynamicUpdateSlice(
op::Broadcast(), param0_adjusted,
op::Constant(), op::Multiply()),
param1, op::Constant(), op::Add())),
op::Shape("f32[14,374]")),
op::Constant(), op::Multiply()),
op::Shape("f32[14,187]")));
}
TEST_P(SpmdPartitioningTest, ConcatenateAlongBothDimensions) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[14,257] parameter(0), sharding={devices=[2,2]<=[4]}
%param1 = f32[14,116] parameter(1), sharding={devices=[2,2]<=[4]}
ROOT %concatenate = f32[14,373] concatenate(%param0, %param1),
dimensions={1}, sharding={devices=[2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(0), op::Shape("f32[7,129]"));
auto param0_adjusted =
AllOf(op::Select(op::Compare(op::Add(), op::Broadcast(op::Constant())),
param0, op::Broadcast(op::Constant())),
op::Shape("f32[7,129]"));
auto param1 = AllOf(op::Parameter(1), op::Shape("f32[7,58]"));
EXPECT_THAT(root, AllOf(op::DynamicSlice(
AllOf(op::AllReduce(op::DynamicUpdateSlice(
op::DynamicUpdateSlice(
op::Broadcast(), param0_adjusted,
op::Constant(), op::Multiply()),
param1, op::Constant(), op::Add())),
op::Shape("f32[7,374]")),
op::Constant(), op::Multiply()),
op::Shape("f32[7,187]")));
}
TEST_P(SpmdPartitioningTest, PadAlongNonPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[128,14,257] parameter(0), sharding={devices=[1,1,2]0,1}
%const = f32[] constant(0)
ROOT %pad = f32[128,17,257] pad(%param0, %const), padding=0_0x1_2x0_0,
sharding={devices=[1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(), op::Shape("f32[128,14,129]"));
EXPECT_THAT(root, AllOf(op::Pad(param0, op::Constant()),
op::Shape("f32[128,17,129]")));
}
TEST_P(SpmdPartitioningTest, PadAlongNonPartitionedDimensionReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[128,14,257] parameter(0), sharding={replicated}
%const = f32[] constant(0)
ROOT %pad = f32[128,17,257] pad(%param0, %const), padding=0_0x1_2x0_0,
sharding={devices=[1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(), op::Shape("f32[128,14,257]"));
auto operand = op::DynamicSlice(op::Pad(param0, _), op::Constant(),
op::Constant(), op::Multiply());
EXPECT_THAT(root, AllOf(op::Pad(operand, op::Constant()),
op::Shape("f32[128,17,129]")));
}
TEST_P(SpmdPartitioningTest, PadAlongPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[14,257] parameter(0), sharding={devices=[1,2]0,1}
%const = f32[] constant(0)
ROOT %pad = f32[14,259] pad(%param0, %const), padding=0_0x0_2,
sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(), op::Shape("f32[14,129]"));
auto after_halo_exchange =
AllOf(op::Shape("f32[14,130]"),
op::Concatenate(param0, op::CollectivePermute(op::Slice(param0))));
auto pad = AllOf(op::Shape("f32[14,131]"),
op::Pad(after_halo_exchange, op::Constant()));
EXPECT_THAT(root, op::Select(_, op::DynamicSlice(pad, op::Constant(), _), _));
}
TEST_P(SpmdPartitioningTest, PadAlongPartitionedDimensionWithInteriorPadding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[7] parameter(0), sharding={devices=[2]0,1}
%param1 = f32[] parameter(1), sharding={replicated}
ROOT %pad = f32[22] pad(%param0, %param1), padding=2_1_2,
sharding={devices=[2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(), op::Shape("f32[4]"));
auto after_halo_exchange = AllOf(
op::Shape("f32[4]"),
op::DynamicSlice(
AllOf(op::Shape("f32[5]"),
op::Pad(AllOf(op::Shape("f32[4]"),
op::Concatenate(
op::CollectivePermute(op::Slice(param0)),
op::Slice(param0))),
op::Parameter(1))),
_));
auto pad = op::Pad(after_halo_exchange, op::Parameter(1));
EXPECT_THAT(root, op::DynamicSlice(pad, _));
}
TEST_P(SpmdPartitioningTest, PartialReplicatePad) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[11,7] parameter(0),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
%param1 = f32[] parameter(1), sharding={replicated}
ROOT %pad = f32[27,22] pad(%param0, %param1), padding=2_4_1x2_1_2,
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(), op::Shape("f32[11,4]"));
auto after_halo_exchange = AllOf(
op::Shape("f32[11,4]"),
op::DynamicSlice(
AllOf(op::Shape("f32[11,5]"),
op::Pad(AllOf(op::Shape("f32[11,4]"),
op::Concatenate(
op::CollectivePermute(op::Slice(param0)),
op::Slice(param0))),
op::Parameter(1))),
op::Constant(), _));
auto pad = op::Pad(after_halo_exchange, op::Parameter(1));
EXPECT_THAT(root, AllOf(op::DynamicSlice(pad, op::Constant(), _),
op::Shape("f32[27,11]")));
}
TEST_P(SpmdPartitioningTest, SliceAlongNonPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[128,14,257] parameter(0)
%param0.copy = f32[128,14,257] copy(%param0), sharding={devices=[1,1,2]0,1}
ROOT %slice = f32[128,11,257] slice(%param0.copy),
slice={[0:128:1], [2:13:1], [0:257:1]}, sharding={devices=[1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(
op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Constant(), op::Constant(), op::Reshape())),
op::Shape("f32[128,14,129]"));
EXPECT_THAT(root, AllOf(op::Slice(param0), op::Shape("f32[128,11,129]")));
}
TEST_P(SpmdPartitioningTest, SliceAlongPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[128,14,257] parameter(0), sharding={devices=[1,1,2]0,1}
ROOT %slice = f32[63,14,251] slice(%param0),
slice={[2:128:2], [0:14:1], [5:256:1]}, sharding={devices=[1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(0), op::Shape("f32[128,14,129]"));
EXPECT_THAT(
root,
AllOf(op::Slice(AllOf(
op::DynamicSlice(
AllOf(op::Concatenate(
op::Slice(param0),
AllOf(op::CollectivePermute(op::Slice(param0)),
op::Shape("f32[128,14,2]"))),
op::Shape("f32[128,14,129]")),
op::Constant(), op::Constant(), op::Add()),
op::Shape("f32[128,14,126]"))),
op::Shape("f32[63,14,126]")));
}
TEST_P(SpmdPartitioningTest, SliceAlongPartitionedDimension2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[4] parameter(0), sharding={devices=[4]<=[4]}
ROOT %slice = f32[1] slice(%param0),
slice={[3:4]}, sharding={devices=[4]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(0), op::Shape("f32[1]"));
EXPECT_THAT(root, AllOf(op::Copy(op::CollectivePermute(param0)),
op::Shape("f32[1]")));
}
TEST_P(SpmdPartitioningTest, MergedPadThenSliceShiftRight) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[4] parameter(0), sharding={devices=[4]<=[4]}
%init = f32[] constant(2.0)
%pad = f32[5] pad(%param0, %init), padding=1_0, sharding={devices=[4]<=[4]}
%copy = f32[5] copy(%pad), sharding={devices=[4]<=[4]}
%copy.1 = f32[5] copy(%copy), sharding={devices=[4]<=[4]}
ROOT %slice = f32[4] slice(%copy.1), slice={[0:4]}, sharding={devices=[4]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(0), op::Shape("f32[1]"));
EXPECT_THAT(root, AllOf(op::Select(_, op::CollectivePermute(param0), _),
op::Shape("f32[1]")));
}
TEST_P(SpmdPartitioningTest, MergedPadThenSliceShiftRightNoMasking) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[4] parameter(0), sharding={devices=[4]<=[4]}
%init = f32[] constant(0)
%pad = f32[5] pad(%param0, %init), padding=1_0, sharding={devices=[4]<=[4]}
%copy = f32[5] copy(%pad), sharding={devices=[4]<=[4]}
%copy.1 = f32[5] copy(%copy), sharding={devices=[4]<=[4]}
ROOT %slice = f32[4] slice(%copy.1), slice={[0:4]}, sharding={devices=[4]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(0), op::Shape("f32[1]"));
EXPECT_THAT(root, AllOf(op::CollectivePermute(param0), op::Shape("f32[1]")));
}
TEST_P(SpmdPartitioningTest, MergedSliceThenConcatRotateRight) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[12] parameter(0), sharding={devices=[4]<=[4]}
%slice0 = f32[2] slice(%param0), slice={[10:12]}, sharding={devices=[4]<=[4]}
%slice1 = f32[10] slice(%param0), slice={[0:10]}, sharding={devices=[4]<=[4]}
ROOT %concat = f32[12] concatenate(%slice0, %slice1), dimensions={0},
sharding={devices=[4]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(0), op::Shape("f32[3]"));
auto rotate = op::Concatenate(op::CollectivePermute(op::Slice(param0)),
op::Slice(param0));
EXPECT_THAT(root, AllOf(rotate, op::Shape("f32[3]")));
}
TEST_P(SpmdPartitioningTest,
MergedSliceThenConcatRotateRightWithAlignedPadding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[6] parameter(0), sharding={devices=[4]<=[4]}
%slice0 = f32[2] slice(%param0), slice={[4:6]}, sharding={devices=[4]<=[4]}
%slice1 = f32[4] slice(%param0), slice={[0:4]}, sharding={devices=[4]<=[4]}
ROOT %concat = f32[6] concatenate(%slice0, %slice1), dimensions={0},
sharding={devices=[4]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(0), op::Shape("f32[2]"));
EXPECT_THAT(root, op::CollectivePermute(param0));
}
TEST_P(SpmdPartitioningTest,
MergedSliceThenConcatRotateRightWithUnalignedPadding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[10] parameter(0), sharding={devices=[4]<=[4]}
%slice0 = f32[6] slice(%param0), slice={[4:10]}, sharding={devices=[4]<=[4]}
%slice1 = f32[4] slice(%param0), slice={[0:4]}, sharding={devices=[4]<=[4]}
ROOT %concat = f32[10] concatenate(%slice0, %slice1), dimensions={0},
sharding={devices=[4]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(0), op::Shape("f32[3]"));
auto rotate0 = op::CollectivePermute(param0);
auto rotate1 = op::Concatenate(op::CollectivePermute(op::Slice(param0)),
op::CollectivePermute(op::Slice(param0)));
EXPECT_THAT(root,
AllOf(op::Select(_, rotate1, rotate0), op::Shape("f32[3]")));
}
TEST_P(SpmdPartitioningTest,
PartialReplicateSliceAlongNonPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[128,14,257] parameter(0), sharding={devices=[1,1,2,2]<=[4] last_tile_dim_replicate}
ROOT %slice = f32[128,11,257] slice(%param0),
slice={[0:128:1], [2:13:1], [0:257:1]}, sharding={devices=[1,1,2,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(), op::Shape("f32[128,14,129]"));
EXPECT_THAT(root, AllOf(op::Slice(param0), op::Shape("f32[128,11,129]")));
}
TEST_P(SpmdPartitioningTest, PartialReplicateSliceAlongPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[128,14,257] parameter(0), sharding={devices=[1,1,2,2]<=[4] last_tile_dim_replicate}
ROOT %slice = f32[63,14,251] slice(%param0),
slice={[2:128:2], [0:14:1], [5:256:1]}, sharding={devices=[1,1,2,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(), op::Shape("f32[128,14,129]"));
EXPECT_THAT(
root,
AllOf(
op::Slice(AllOf(
op::DynamicSlice(
AllOf(op::Concatenate(
op::Slice(param0),
AllOf(op::CollectivePermute(op::Slice(param0)),
op::Shape("f32[128,14,2]"))),
op::Shape("f32[128,14,129]")),
op::Constant(), op::Constant(),
op::Add(op::Multiply(op::Reshape(op::DynamicSlice(
op::Constant(), op::PartitionId())),
op::Constant()),
op::Constant())),
op::Shape("f32[128,14,126]"))),
op::Shape("f32[63,14,126]")));
}
TEST_P(SpmdPartitioningTest, DeviceMaximalTupleSort) {
absl::string_view hlo_string = R"(
HloModule module
ge {
p.0 = f32[] parameter(0)
p.1 = f32[] parameter(1)
p.2 = s32[] parameter(2)
p.3 = s32[] parameter(3)
ROOT compare = pred[] compare(p.0, p.1), direction=GT
}
ENTRY %main {
%p.0 = f32[3]{0} parameter(0), sharding={maximal device=0}
%iota = s32[3]{0} iota(), iota_dimension=0, sharding={maximal device=0}
ROOT %sort = (f32[3]{0}, s32[3]{0}) sort(p.0, iota), dimensions={0},
to_apply=ge, sharding={{maximal device=0}, {maximal device=0}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Sort(op::Parameter(0), op::Iota()),
op::Shape("(f32[3], s32[3])")));
}
TEST_P(SpmdPartitioningTest, SortAlongNonPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ge {
p.0.lhs.1247 = f32[]{:T(256)} parameter(0), sharding={replicated}
bitcast-convert = s32[]{:T(256)} bitcast-convert(p.0.lhs.1247), sharding={replicated}
constant = s32[]{:T(256)} constant(0), sharding={replicated}
compare = pred[]{:T(256)} compare(bitcast-convert, constant), direction=LT, sharding={replicated}
constant.1 = u32[]{:T(256)} constant(2147483647), sharding={replicated}
bitcast-convert.1 = u32[]{:T(256)} bitcast-convert(p.0.lhs.1247), sharding={replicated}
subtract = u32[]{:T(256)} subtract(constant.1, bitcast-convert.1), sharding={replicated}
bitcast-convert.2 = s32[]{:T(256)} bitcast-convert(subtract), sharding={replicated}
select = s32[]{:T(256)} select(compare, bitcast-convert.2, bitcast-convert), sharding={replicated}
p.0.rhs.1248 = f32[]{:T(256)} parameter(1), sharding={replicated}
bitcast-convert.3 = s32[]{:T(256)} bitcast-convert(p.0.rhs.1248), sharding={replicated}
compare.1 = pred[]{:T(256)} compare(bitcast-convert.3, constant), direction=LT, sharding={replicated}
bitcast-convert.4 = u32[]{:T(256)} bitcast-convert(p.0.rhs.1248), sharding={replicated}
subtract.1 = u32[]{:T(256)} subtract(constant.1, bitcast-convert.4), sharding={replicated}
bitcast-convert.5 = s32[]{:T(256)} bitcast-convert(subtract.1), sharding={replicated}
select.1 = s32[]{:T(256)} select(compare.1, bitcast-convert.5, bitcast-convert.3), sharding={replicated}
compare.2 = pred[]{:T(256)} compare(select, select.1), direction=GT, sharding={replicated}
compare.258 = pred[]{:T(256)} compare(select.1, select), direction=GT, sharding={replicated}
compare.259 = pred[]{:T(256)} compare(compare.2, compare.258), direction=EQ, sharding={replicated}
p.1.lhs.1249 = s32[]{:T(256)} parameter(2), sharding={replicated}
p.1.rhs.1250 = s32[]{:T(256)} parameter(3), sharding={replicated}
compare.260 = pred[]{:T(256)} compare(p.1.lhs.1249, p.1.rhs.1250), direction=LT, sharding={replicated}
ROOT select.86 = pred[]{:T(256)} select(compare.259, compare.260, compare.2), sharding={replicated}
}
ENTRY entry {
%param0 = f32[128,14,257] parameter(0)
%param0.copy = f32[128,14,257] copy(%param0), sharding={devices=[1,2,1]0,1}
%param1 = s32[128,14,257] parameter(1)
%param1.copy = s32[128,14,257] copy(%param1), sharding={devices=[1,2,1]0,1}
ROOT %sort.6 = (f32[128,14,257]{2,1,0:T(8,128)}, s32[128,14,257]{2,1,0:T(8,128)})
sort(%param0.copy, %param1.copy), dimensions={2}, is_stable=true,
to_apply=%ge, sharding={{devices=[1,2,1]0,1},{devices=[1,2,1]0,1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("f32[128,7,257]"));
auto param1 =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(1), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("s32[128,7,257]"));
EXPECT_THAT(root, AllOf(op::Sort(param0, param1),
op::Shape("(f32[128,7,257], s32[128,7,257])")));
}
TEST_P(SpmdPartitioningTest, PartitionCustomCall) {
absl::string_view hlo_string = R"(
HloModule cluster_2013453984438090939__.47
ENTRY %cluster_2013453984438090939__.47
(arg_tuple.1: ()) -> (bf16[2,2000], s32[2,2000]) {
%arg_tuple.1 = bf16[2,209664] parameter(0)
%copy.arg_tuple.1 = bf16[2,209664] copy(%arg_tuple.1), sharding={devices=[1,2]0,1}
%custom-call = (bf16[2,2000]{1,0}, s32[2,2000]{1,0})
custom-call(bf16[2,209664]{1,0} %copy.arg_tuple.1), custom_call_target="TopK"
%get-tuple-element = bf16[2,2000]{1,0}
get-tuple-element((bf16[2,2000]{1,0}, s32[2,2000]{1,0}) %custom-call),
index=0, sharding={replicated}
%get-tuple-element.1 = s32[2,2000]{1,0} get-tuple-element((bf16[2,2000]{1,0},
s32[2,2000]{1,0}) %custom-call), index=1, sharding={replicated}
ROOT %tuple.46 = (bf16[2,2000]{1,0}, s32[2,2000]{1,0})
tuple(bf16[2,2000]{1,0} %get-tuple-element, s32[2,2000]{1,0}
%get-tuple-element.1), sharding={{replicated}, {replicated}},
metadata={op_name="XLA_Retvals"}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto custom_call = FindInstruction(module.get(), "custom-call.1");
EXPECT_EQ(custom_call->operand(0)->shape().dimensions(1), 104832);
auto sort = FindInstruction(module.get(), "sort");
EXPECT_EQ(sort->operand(0)->shape().dimensions(1), 4000);
EXPECT_EQ(sort->operand(1)->shape().dimensions(1), 4000);
}
TEST_P(SpmdPartitioningTest, PartitionCustomCall_BatchPartitionedDims) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,32128] parameter(0)
%copy.0 = f32[8,32128] copy(%param0), sharding={devices=[8,1]<=[8]}
%custom-call = (f32[8,2]{1,0}, s32[8,2]{1,0})
custom-call(%copy.0), custom_call_target="TopK"
%get-tuple-element = f32[8,2]{1,0}
get-tuple-element((f32[8,2]{1,0}, s32[8,2]{1,0}) %custom-call), index=0,
sharding={devices=[8,1]<=[8]}
%get-tuple-element.1 = s32[8,2]{1,0}
get-tuple-element((f32[8,2]{1,0}, s32[8,2]{1,0}) %custom-call), index=1,
sharding={devices=[8,1]<=[8]}
ROOT %tuple = (f32[8,2]{1,0}, s32[8,2]{1,0})
tuple(%get-tuple-element, %get-tuple-element.1),
sharding={{replicated}, {replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
LOG(ERROR) << module->ToString();
auto custom_call = FindInstruction(module.get(), "custom-call.1");
EXPECT_EQ(custom_call->operand(0)->shape().dimensions(1), 32128);
auto sort = FindInstruction(module.get(), "sort");
EXPECT_EQ(sort->operand(0)->shape().dimensions(0), 1);
EXPECT_EQ(sort->operand(0)->shape().dimensions(1), 2);
EXPECT_EQ(sort->operand(1)->shape().dimensions(0), 1);
EXPECT_EQ(sort->operand(1)->shape().dimensions(1), 2);
}
TEST_P(SpmdPartitioningTest, PartitionCustomCall_TwoPartitionedDims) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,32128] parameter(0)
%copy.0 = f32[8,32128] copy(%param0), sharding={devices=[4,2]<=[8]}
%custom-call = (f32[8,2]{1,0}, s32[8,2]{1,0})
custom-call(%copy.0), custom_call_target="TopK"
%get-tuple-element = f32[8,2]{1,0}
get-tuple-element((f32[8,2]{1,0}, s32[8,2]{1,0}) %custom-call), index=0,
sharding={devices=[4,1,2]<=[8] last_tile_dim_replicate}
%get-tuple-element.1 = s32[8,2]{1,0}
get-tuple-element((f32[8,2]{1,0}, s32[8,2]{1,0}) %custom-call), index=1,
sharding={devices=[4,1,2]<=[8] last_tile_dim_replicate}
ROOT %tuple = (f32[8,2]{1,0}, s32[8,2]{1,0})
tuple(%get-tuple-element, %get-tuple-element.1),
sharding={{replicated}, {replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto custom_call = FindInstruction(module.get(), "custom-call.1");
EXPECT_EQ(custom_call->operand(0)->shape().dimensions(1), 16064);
auto sort = FindInstruction(module.get(), "sort");
EXPECT_EQ(sort->operand(0)->shape().dimensions(0), 2);
EXPECT_EQ(sort->operand(0)->shape().dimensions(1), 4);
EXPECT_EQ(sort->operand(1)->shape().dimensions(0), 2);
EXPECT_EQ(sort->operand(1)->shape().dimensions(1), 4);
}
TEST_P(SpmdPartitioningTest, PartitionSortInTopK) {
absl::string_view hlo_string = R"(
HloModule module
%compare-greater-than.8 (p.0.lhs.9: bf16[], p.0.rhs.10: bf16[], p.1.lhs.11:
s32[], p.1.rhs.12: s32[]) -> pred[] {
%p.1.lhs.11 = s32[] parameter(2)
%p.1.rhs.12 = s32[] parameter(3)
%p.0.lhs.9 = bf16[] parameter(0)
%convert.13 = f32[] convert(bf16[] %p.0.lhs.9)
%bitcast-convert.16 = s32[] bitcast-convert(f32[] %convert.13)
%constant.20 = s32[] constant(0)
%compare.21 = pred[] compare(s32[] %bitcast-convert.16, s32[] %constant.20),
direction=LT
%constant.15 = u32[] constant(2147483647)
%bitcast-convert.17 = u32[] bitcast-convert(f32[] %convert.13)
%subtract.18 = u32[] subtract(u32[] %constant.15, u32[] %bitcast-convert.17)
%bitcast-convert.19 = s32[] bitcast-convert(u32[] %subtract.18)
%select.22 = s32[] select(pred[] %compare.21, s32[] %bitcast-convert.19, s32[]
%bitcast-convert.16)
%p.0.rhs.10 = bf16[] parameter(1)
%convert.14 = f32[] convert(bf16[] %p.0.rhs.10)
%bitcast-convert.24 = s32[] bitcast-convert(f32[] %convert.14)
%constant.28 = s32[] constant(0)
%compare.29 = pred[] compare(s32[] %bitcast-convert.24, s32[] %constant.28),
direction=LT
%constant.23 = u32[] constant(2147483647)
%bitcast-convert.25 = u32[] bitcast-convert(f32[] %convert.14)
%subtract.26 = u32[] subtract(u32[] %constant.23, u32[] %bitcast-convert.25)
%bitcast-convert.27 = s32[] bitcast-convert(u32[] %subtract.26)
%select.30 = s32[] select(pred[] %compare.29, s32[] %bitcast-convert.27, s32[]
%bitcast-convert.24)
ROOT %compare.31 = pred[] compare(s32[] %select.22, s32[] %select.30),
direction=GT
}
ENTRY entry
(arg_tuple.1: ()) -> (bf16[2,2000], s32[2,2000]) {
%arg_tuple.1 = bf16[2,209664] parameter(0)
%copy.arg_tuple.1 = bf16[2,209664] copy(%arg_tuple.1), sharding={devices=[1,2]0,1}
%iota.7 = s32[2,209664] iota(), iota_dimension=1,
metadata={op_type="TopKV2" op_name="TopKV2"}
%sort.32 = (bf16[2,209664], s32[2,209664])
sort(bf16[2,209664] %copy.arg_tuple.1, s32[2,209664] %iota.7),
dimensions={1}, is_stable=true, to_apply=%compare-greater-than.8,
metadata={op_type="TopKV2" op_name="TopKV2"}
%get-tuple-element.33 = bf16[2,209664]
get-tuple-element((bf16[2,209664], s32[2,209664]) %sort.32),
index=0, metadata={op_type="TopKV2" op_name="TopKV2"}
%slice.34 = bf16[2,2000] slice(bf16[2,209664]
%get-tuple-element.33), slice={[0:2], [0:2000]},
metadata={op_type="TopKV2" op_name="TopKV2"}
%get-tuple-element.35 = s32[2,209664]
get-tuple-element((bf16[2,209664], s32[2,209664]) %sort.32),
index=1, metadata={op_type="TopKV2" op_name="TopKV2"}
%slice.36 = s32[2,2000] slice(s32[2,209664]
%get-tuple-element.35), slice={[0:2], [0:2000]},
metadata={op_type="TopKV2" op_name="TopKV2"}
ROOT %tuple.46 = (bf16[2,2000], s32[2,2000])
tuple(bf16[2,2000] %slice.34, s32[2,2000]
%slice.36), sharding={{replicated}, {replicated}},
metadata={op_name="XLA_Retvals"}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.0");
EXPECT_EQ(sort->operand(0)->shape().dimensions(1), 104832);
EXPECT_EQ(sort->operand(1)->shape().dimensions(1), 104832);
auto final_sort = FindInstruction(module.get(), "sort.1");
EXPECT_EQ(final_sort->operand(0)->shape().dimensions(1), 4000);
EXPECT_EQ(final_sort->operand(1)->shape().dimensions(1), 4000);
}
TEST_P(SpmdPartitioningTest, PartitionSortInTopKWhenComparisonWithSelect) {
absl::string_view hlo_string = R"(
HloModule module
%compare-greater-than.8 (p.0.lhs.2566: bf16[],
p.0.rhs.2567: bf16[], p.1.lhs.2586: s32[], p.1.rhs.2587: s32[]) -> pred[] {
%p.0.lhs.2566 = bf16[] parameter(0)
%convert.164 = f32[] convert(bf16[] %p.0.lhs.2566)
%bitcast-convert.48 = s32[] bitcast-convert(f32[] %convert.164)
%constant.285 = s32[] constant(0)
%compare.84 = pred[] compare(s32[] %bitcast-convert.48, s32[] %constant.285),
direction=LT
%constant.286 = u32[] constant(2147483647)
%bitcast-convert.49 = u32[] bitcast-convert(f32[] %convert.164)
%subtract.84 = u32[] subtract(u32[] %constant.286, u32[] %bitcast-convert.49)
%bitcast-convert.50 = s32[] bitcast-convert(u32[] %subtract.84)
%select.40 = s32[] select(pred[] %compare.84, s32[] %bitcast-convert.50,
s32[] %bitcast-convert.48)
%p.0.rhs.2567 = bf16[] parameter(1)
%convert.165 = f32[] convert(bf16[] %p.0.rhs.2567)
%bitcast-convert.51 = s32[] bitcast-convert(f32[] %convert.165)
%compare.85 = pred[] compare(s32[] %bitcast-convert.51, s32[] %constant.285),
direction=LT
%bitcast-convert.52 = u32[] bitcast-convert(f32[] %convert.165)
%subtract.85 = u32[] subtract(u32[] %constant.286, u32[] %bitcast-convert.52)
%bitcast-convert.53 = s32[] bitcast-convert(u32[] %subtract.85)
%select.41 = s32[] select(pred[] %compare.85, s32[] %bitcast-convert.53,
s32[] %bitcast-convert.51)
%compare.86 = pred[] compare(s32[] %select.40, s32[] %select.41), direction=GT
%compare.1645 = pred[] compare(s32[] %select.41, s32[] %select.40), direction=GT
%compare.1646 = pred[] compare(pred[] %compare.86, pred[] %compare.1645),
direction=EQ
%p.1.lhs.2586 = s32[] parameter(2)
%p.1.rhs.2587 = s32[] parameter(3)
%compare.1647 = pred[] compare(s32[] %p.1.lhs.2586, s32[] %p.1.rhs.2587),
direction=LT
ROOT %select.1054 = pred[] select(pred[] %compare.1646, pred[] %compare.1647,
pred[] %compare.86)
}
ENTRY entry
(arg_tuple.1: ()) -> (bf16[2,2000], s32[2,2000]) {
%arg_tuple.1 = bf16[2,209664] parameter(0)
%copy.arg_tuple.1 = bf16[2,209664] copy(%arg_tuple.1), sharding={devices=[1,2]0,1}
%iota.7 = s32[2,209664] iota(), iota_dimension=1,
metadata={op_type="TopKV2" op_name="TopKV2"}
%sort.32 = (bf16[2,209664], s32[2,209664])
sort(bf16[2,209664] %copy.arg_tuple.1, s32[2,209664] %iota.7),
dimensions={1}, is_stable=true, to_apply=%compare-greater-than.8,
metadata={op_type="TopKV2" op_name="TopKV2"}
%get-tuple-element.33 = bf16[2,209664]
get-tuple-element((bf16[2,209664], s32[2,209664]) %sort.32),
index=0, metadata={op_type="TopKV2" op_name="TopKV2"}
%slice.34 = bf16[2,2000] slice(bf16[2,209664]
%get-tuple-element.33), slice={[0:2], [0:2000]},
metadata={op_type="TopKV2" op_name="TopKV2"}
%get-tuple-element.35 = s32[2,209664]
get-tuple-element((bf16[2,209664], s32[2,209664]) %sort.32),
index=1, metadata={op_type="TopKV2" op_name="TopKV2"}
%slice.36 = s32[2,2000] slice(s32[2,209664]
%get-tuple-element.35), slice={[0:2], [0:2000]},
metadata={op_type="TopKV2" op_name="TopKV2"}
ROOT %tuple.46 = (bf16[2,2000], s32[2,2000])
tuple(bf16[2,2000] %slice.34, s32[2,2000]
%slice.36), sharding={{replicated}, {replicated}},
metadata={op_name="XLA_Retvals"}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.0");
EXPECT_EQ(sort->operand(0)->shape().dimensions(1), 104832);
EXPECT_EQ(sort->operand(1)->shape().dimensions(1), 104832);
auto final_sort = FindInstruction(module.get(), "sort.1");
EXPECT_EQ(final_sort->operand(0)->shape().dimensions(1), 4000);
EXPECT_EQ(final_sort->operand(1)->shape().dimensions(1), 4000);
}
TEST_P(SpmdPartitioningTest, NoPartitionSortInTopKWhenSecondOperandIsNotIota) {
absl::string_view hlo_string = R"(
HloModule module
%compare-greater-than.8 (p.0.lhs.2566: bf16[],
p.0.rhs.2567: bf16[], p.1.lhs.2586: s32[], p.1.rhs.2587: s32[]) -> pred[] {
%p.0.lhs.2566 = bf16[] parameter(0)
%convert.164 = f32[] convert(bf16[] %p.0.lhs.2566)
%bitcast-convert.48 = s32[] bitcast-convert(f32[] %convert.164)
%constant.285 = s32[] constant(0)
%compare.84 = pred[] compare(s32[] %bitcast-convert.48, s32[] %constant.285),
direction=LT
%constant.286 = u32[] constant(2147483647)
%bitcast-convert.49 = u32[] bitcast-convert(f32[] %convert.164)
%subtract.84 = u32[] subtract(u32[] %constant.286, u32[] %bitcast-convert.49)
%bitcast-convert.50 = s32[] bitcast-convert(u32[] %subtract.84)
%select.40 = s32[] select(pred[] %compare.84, s32[] %bitcast-convert.50,
s32[] %bitcast-convert.48)
%p.0.rhs.2567 = bf16[] parameter(1)
%convert.165 = f32[] convert(bf16[] %p.0.rhs.2567)
%bitcast-convert.51 = s32[] bitcast-convert(f32[] %convert.165)
%compare.85 = pred[] compare(s32[] %bitcast-convert.51, s32[] %constant.285),
direction=LT
%bitcast-convert.52 = u32[] bitcast-convert(f32[] %convert.165)
%subtract.85 = u32[] subtract(u32[] %constant.286, u32[] %bitcast-convert.52)
%bitcast-convert.53 = s32[] bitcast-convert(u32[] %subtract.85)
%select.41 = s32[] select(pred[] %compare.85, s32[] %bitcast-convert.53,
s32[] %bitcast-convert.51)
%compare.86 = pred[] compare(s32[] %select.40, s32[] %select.41), direction=GT
%compare.1645 = pred[] compare(s32[] %select.41, s32[] %select.40), direction=GT
%compare.1646 = pred[] compare(pred[] %compare.86, pred[] %compare.1645),
direction=EQ
%p.1.lhs.2586 = s32[] parameter(2)
%p.1.rhs.2587 = s32[] parameter(3)
%compare.1647 = pred[] compare(s32[] %p.1.lhs.2586, s32[] %p.1.rhs.2587),
direction=LT
ROOT %select.1054 = pred[] select(pred[] %compare.1646, pred[] %compare.1647,
pred[] %compare.86)
}
ENTRY entry {
%arg_tuple.1 = bf16[2,209664] parameter(0)
%arg_tuple.2 = s32[2,209664] parameter(1)
%copy.arg_tuple.1 = bf16[2,209664] copy(%arg_tuple.1), sharding={devices=[1,2]0,1}
%sort.32 = (bf16[2,209664], s32[2,209664])
sort(bf16[2,209664] %copy.arg_tuple.1, s32[2,209664] %arg_tuple.2),
dimensions={1}, is_stable=true, to_apply=%compare-greater-than.8,
metadata={op_type="TopKV2" op_name="TopKV2"}
%get-tuple-element.33 = bf16[2,209664]
get-tuple-element((bf16[2,209664], s32[2,209664]) %sort.32),
index=0, metadata={op_type="TopKV2" op_name="TopKV2"}
%slice.34 = bf16[2,2000] slice(bf16[2,209664]
%get-tuple-element.33), slice={[0:2], [0:2000]},
metadata={op_type="TopKV2" op_name="TopKV2"}
%get-tuple-element.35 = s32[2,209664]
get-tuple-element((bf16[2,209664], s32[2,209664]) %sort.32),
index=1, metadata={op_type="TopKV2" op_name="TopKV2"}
%slice.36 = s32[2,2000] slice(s32[2,209664]
%get-tuple-element.35), slice={[0:2], [0:2000]},
metadata={op_type="TopKV2" op_name="TopKV2"}
ROOT %tuple.46 = (bf16[2,2000], s32[2,2000])
tuple(bf16[2,2000] %slice.34, s32[2,2000]
%slice.36), sharding={{replicated}, {replicated}},
metadata={op_name="XLA_Retvals"}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.0");
EXPECT_EQ(sort->operand(0)->shape().dimensions(1), 209664);
EXPECT_EQ(sort->operand(1)->shape().dimensions(1), 209664);
}
TEST_P(SpmdPartitioningTest, NoPartitionSortInTopKWhenNoPartitionInSortDim) {
absl::string_view hlo_string = R"(
HloModule module
%compare-greater-than.8 (p.0.lhs.2566: bf16[],
p.0.rhs.2567: bf16[], p.1.lhs.2586: s32[], p.1.rhs.2587: s32[]) -> pred[] {
%p.0.lhs.2566 = bf16[] parameter(0)
%convert.164 = f32[] convert(bf16[] %p.0.lhs.2566)
%bitcast-convert.48 = s32[] bitcast-convert(f32[] %convert.164)
%constant.285 = s32[] constant(0)
%compare.84 = pred[] compare(s32[] %bitcast-convert.48, s32[] %constant.285),
direction=LT
%constant.286 = u32[] constant(2147483647)
%bitcast-convert.49 = u32[] bitcast-convert(f32[] %convert.164)
%subtract.84 = u32[] subtract(u32[] %constant.286, u32[] %bitcast-convert.49)
%bitcast-convert.50 = s32[] bitcast-convert(u32[] %subtract.84)
%select.40 = s32[] select(pred[] %compare.84, s32[] %bitcast-convert.50,
s32[] %bitcast-convert.48)
%p.0.rhs.2567 = bf16[] parameter(1)
%convert.165 = f32[] convert(bf16[] %p.0.rhs.2567)
%bitcast-convert.51 = s32[] bitcast-convert(f32[] %convert.165)
%compare.85 = pred[] compare(s32[] %bitcast-convert.51, s32[] %constant.285),
direction=LT
%bitcast-convert.52 = u32[] bitcast-convert(f32[] %convert.165)
%subtract.85 = u32[] subtract(u32[] %constant.286, u32[] %bitcast-convert.52)
%bitcast-convert.53 = s32[] bitcast-convert(u32[] %subtract.85)
%select.41 = s32[] select(pred[] %compare.85, s32[] %bitcast-convert.53,
s32[] %bitcast-convert.51)
%compare.86 = pred[] compare(s32[] %select.40, s32[] %select.41), direction=GT
%compare.1645 = pred[] compare(s32[] %select.41, s32[] %select.40), direction=GT
%compare.1646 = pred[] compare(pred[] %compare.86, pred[] %compare.1645),
direction=EQ
%p.1.lhs.2586 = s32[] parameter(2)
%p.1.rhs.2587 = s32[] parameter(3)
%compare.1647 = pred[] compare(s32[] %p.1.lhs.2586, s32[] %p.1.rhs.2587),
direction=LT
ROOT %select.1054 = pred[] select(pred[] %compare.1646, pred[] %compare.1647,
pred[] %compare.86)
}
ENTRY entry
(arg_tuple.1: ()) -> (bf16[2,2000], s32[2,2000]) {
%arg_tuple.1 = bf16[2,209664] parameter(0)
%copy.arg_tuple.1 = bf16[2,209664] copy(%arg_tuple.1), sharding={devices=[2,1]0,1}
%iota.7 = s32[2,209664] iota(), iota_dimension=1,
metadata={op_type="TopKV2" op_name="TopKV2"}
%sort.32 = (bf16[2,209664], s32[2,209664])
sort(bf16[2,209664] %copy.arg_tuple.1, s32[2,209664] %iota.7),
dimensions={1}, is_stable=true, to_apply=%compare-greater-than.8,
metadata={op_type="TopKV2" op_name="TopKV2"}
%get-tuple-element.33 = bf16[2,209664]
get-tuple-element((bf16[2,209664], s32[2,209664]) %sort.32),
index=0, metadata={op_type="TopKV2" op_name="TopKV2"}
%slice.34 = bf16[2,2000] slice(bf16[2,209664]
%get-tuple-element.33), slice={[0:2], [0:2000]},
metadata={op_type="TopKV2" op_name="TopKV2"}
%get-tuple-element.35 = s32[2,209664]
get-tuple-element((bf16[2,209664], s32[2,209664]) %sort.32),
index=1, metadata={op_type="TopKV2" op_name="TopKV2"}
%slice.36 = s32[2,2000] slice(s32[2,209664]
%get-tuple-element.35), slice={[0:2], [0:2000]},
metadata={op_type="TopKV2" op_name="TopKV2"}
ROOT %tuple.46 = (bf16[2,2000], s32[2,2000])
tuple(bf16[2,2000] %slice.34, s32[2,2000]
%slice.36), sharding={{replicated}, {replicated}},
metadata={op_name="XLA_Retvals"}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.0");
EXPECT_EQ(sort->operand(0)->shape().dimensions(1), 209664);
EXPECT_EQ(sort->operand(1)->shape().dimensions(1), 209664);
}
TEST_P(SpmdPartitioningTest, NoPartitionSortInTopKWhenSliceInOtherDim) {
absl::string_view hlo_string = R"(
HloModule module
%compare-greater-than.8 (p.0.lhs.2566: bf16[],
p.0.rhs.2567: bf16[], p.1.lhs.2586: s32[], p.1.rhs.2587: s32[]) -> pred[] {
%p.0.lhs.2566 = bf16[] parameter(0)
%convert.164 = f32[] convert(bf16[] %p.0.lhs.2566)
%bitcast-convert.48 = s32[] bitcast-convert(f32[] %convert.164)
%constant.285 = s32[] constant(0)
%compare.84 = pred[] compare(s32[] %bitcast-convert.48, s32[] %constant.285),
direction=LT
%constant.286 = u32[] constant(2147483647)
%bitcast-convert.49 = u32[] bitcast-convert(f32[] %convert.164)
%subtract.84 = u32[] subtract(u32[] %constant.286, u32[] %bitcast-convert.49)
%bitcast-convert.50 = s32[] bitcast-convert(u32[] %subtract.84)
%select.40 = s32[] select(pred[] %compare.84, s32[] %bitcast-convert.50,
s32[] %bitcast-convert.48)
%p.0.rhs.2567 = bf16[] parameter(1)
%convert.165 = f32[] convert(bf16[] %p.0.rhs.2567)
%bitcast-convert.51 = s32[] bitcast-convert(f32[] %convert.165)
%compare.85 = pred[] compare(s32[] %bitcast-convert.51, s32[] %constant.285),
direction=LT
%bitcast-convert.52 = u32[] bitcast-convert(f32[] %convert.165)
%subtract.85 = u32[] subtract(u32[] %constant.286, u32[] %bitcast-convert.52)
%bitcast-convert.53 = s32[] bitcast-convert(u32[] %subtract.85)
%select.41 = s32[] select(pred[] %compare.85, s32[] %bitcast-convert.53,
s32[] %bitcast-convert.51)
%compare.86 = pred[] compare(s32[] %select.40, s32[] %select.41), direction=GT
%compare.1645 = pred[] compare(s32[] %select.41, s32[] %select.40), direction=GT
%compare.1646 = pred[] compare(pred[] %compare.86, pred[] %compare.1645),
direction=EQ
%p.1.lhs.2586 = s32[] parameter(2)
%p.1.rhs.2587 = s32[] parameter(3)
%compare.1647 = pred[] compare(s32[] %p.1.lhs.2586, s32[] %p.1.rhs.2587),
direction=LT
ROOT %select.1054 = pred[] select(pred[] %compare.1646, pred[] %compare.1647,
pred[] %compare.86)
}
ENTRY entry {
%arg_tuple.1 = bf16[2,209664] parameter(0)
%copy.arg_tuple.1 = bf16[2,209664] copy(%arg_tuple.1), sharding={devices=[1,2]0,1}
%iota.7 = s32[2,209664] iota(), iota_dimension=1,
metadata={op_type="TopKV2" op_name="TopKV2"}
%sort.32 = (bf16[2,209664], s32[2,209664])
sort(bf16[2,209664] %copy.arg_tuple.1, s32[2,209664] %iota.7),
dimensions={1}, is_stable=true, to_apply=%compare-greater-than.8,
metadata={op_type="TopKV2" op_name="TopKV2"}
%get-tuple-element.33 = bf16[2,209664]
get-tuple-element((bf16[2,209664], s32[2,209664]) %sort.32),
index=0, metadata={op_type="TopKV2" op_name="TopKV2"}
%slice.34 = bf16[1,209664] slice(bf16[2,209664]
%get-tuple-element.33), slice={[0:1], [0:209664]},
metadata={op_type="TopKV2" op_name="TopKV2"}
%get-tuple-element.35 = s32[2,209664]
get-tuple-element((bf16[2,209664], s32[2,209664]) %sort.32),
index=1, metadata={op_type="TopKV2" op_name="TopKV2"}
%slice.36 = s32[1,209664] slice(s32[2,209664]
%get-tuple-element.35), slice={[0:1], [0:209664]},
metadata={op_type="TopKV2" op_name="TopKV2"}
ROOT %tuple.46 = (bf16[1,209664], s32[1,209664])
tuple(bf16[1,209664] %slice.34, s32[1,209664]
%slice.36), sharding={{replicated}, {replicated}},
metadata={op_name="XLA_Retvals"}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.0");
EXPECT_EQ(sort->operand(0)->shape().dimensions(1), 209664);
EXPECT_EQ(sort->operand(1)->shape().dimensions(1), 209664);
}
TEST_P(SpmdPartitioningTest, SortShardedOnSortDim_SlowSortBug) {
absl::string_view hlo_string = R"(
HloModule module, entry_computation_layout={(f32[32768,65536]{1,0})->(f32[32768,65536]{1,0}, s32[32768,65536]{1,0})}
region_174.7326 {
Arg_0.7327 = f32[] parameter(0), sharding={replicated}
compare.7339 = pred[] compare(Arg_0.7327, Arg_0.7327), direction=NE, sharding={replicated}
constant.7332 = s32[] constant(2143289344), sharding={replicated}
constant.7334 = f32[] constant(0), sharding={replicated}
compare.7337 = pred[] compare(Arg_0.7327, constant.7334), direction=EQ, sharding={replicated}
constant.7333 = s32[] constant(0), sharding={replicated}
bitcast-convert.7335 = s32[] bitcast-convert(Arg_0.7327), sharding={replicated}
select.7338 = s32[] select(compare.7337, constant.7333, bitcast-convert.7335), sharding={replicated}
select.7340 = s32[] select(compare.7339, constant.7332, select.7338), sharding={replicated}
constant.1127 = s32[] constant(0), sharding={replicated}
compare.7343 = pred[] compare(select.7340, constant.1127), direction=LT, sharding={replicated}
constant.7331 = u32[] constant(2147483647), sharding={replicated}
bitcast-convert.7336 = u32[] bitcast-convert(Arg_0.7327), sharding={replicated}
subtract.7341 = u32[] subtract(constant.7331, bitcast-convert.7336), sharding={replicated}
bitcast-convert.7342 = s32[] bitcast-convert(subtract.7341), sharding={replicated}
select.7344 = s32[] select(compare.7343, bitcast-convert.7342, select.7340), sharding={replicated}
Arg_1.7328 = f32[] parameter(1), sharding={replicated}
compare.7349 = pred[] compare(Arg_1.7328, Arg_1.7328), direction=NE, sharding={replicated}
constant.1125 = s32[] constant(2143289344), sharding={replicated}
constant.1126 = f32[] constant(0), sharding={replicated}
compare.7347 = pred[] compare(Arg_1.7328, constant.1126), direction=EQ, sharding={replicated}
constant.1128 = s32[] constant(0), sharding={replicated}
bitcast-convert.7345 = s32[] bitcast-convert(Arg_1.7328), sharding={replicated}
select.7348 = s32[] select(compare.7347, constant.1128, bitcast-convert.7345), sharding={replicated}
select.7350 = s32[] select(compare.7349, constant.1125, select.7348), sharding={replicated}
constant.1129 = s32[] constant(0), sharding={replicated}
compare.7353 = pred[] compare(select.7350, constant.1129), direction=LT, sharding={replicated}
constant.1130 = u32[] constant(2147483647), sharding={replicated}
bitcast-convert.7346 = u32[] bitcast-convert(Arg_1.7328), sharding={replicated}
subtract.7351 = u32[] subtract(constant.1130, bitcast-convert.7346), sharding={replicated}
bitcast-convert.7352 = s32[] bitcast-convert(subtract.7351), sharding={replicated}
select.7354 = s32[] select(compare.7353, bitcast-convert.7352, select.7350), sharding={replicated}
compare.7355 = pred[] compare(select.7344, select.7354), direction=LT, sharding={replicated}
compare.24 = pred[] compare(select.7354, select.7344), direction=LT, sharding={replicated}
compare.25 = pred[] compare(compare.7355, compare.24), direction=EQ, sharding={replicated}
Arg_2.7329 = s32[] parameter(2), sharding={replicated}
Arg_3.7330 = s32[] parameter(3), sharding={replicated}
compare.26 = pred[] compare(Arg_2.7329, Arg_3.7330), direction=LT, sharding={replicated}
ROOT select.21 = pred[] select(compare.25, compare.26, compare.7355), sharding={replicated}
}
ENTRY entry {
param.0 = f32[32768,65536]{1,0} parameter(0)
negate.7325 = f32[32768,65536]{1,0} negate(param.0), sharding={devices=[1,64]<=[64]}
iota.30 = s32[32768,65536]{1,0} iota(), iota_dimension=1, sharding={devices=[1,64]<=[64]}
ROOT sort.0 = (f32[32768,65536]{1,0}, s32[32768,65536]{1,0}) sort(negate.7325, iota.30), dimensions={1}, is_stable=true, to_apply=region_174.7326, sharding={{devices=[1,64]<=[64]}, {devices=[1,64]<=[64]}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 64));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.1");
for (auto operand : sort->operands()) {
EXPECT_EQ(operand->shape().dimensions(0), 512);
EXPECT_EQ(operand->shape().dimensions(1), 65536);
}
}
TEST_P(SpmdPartitioningTest, SortShardedOnSortDim_OneOperand) {
absl::string_view hlo_string = R"(
HloModule module, entry_computation_layout={(f32[1024,1024]{1,0})->f32[1024,1024]{1,0}}
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[1024,1024]{1,0} parameter(0)
negate.0 = f32[1024,1024]{1,0} negate(param.0), sharding={devices=[1,8]<=[8]}
ROOT sort.0 = f32[1024,1024]{1,0} sort(negate.0), dimensions={1}, is_stable=true, to_apply=compare, sharding={devices=[1,8]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.1");
for (auto operand : sort->operands()) {
EXPECT_EQ(operand->shape().dimensions(0), 128);
EXPECT_EQ(operand->shape().dimensions(1), 1024);
}
}
TEST_P(SpmdPartitioningTest, SortShardedOnSortDim_TwoOperands) {
absl::string_view hlo_string = R"(
HloModule module, entry_computation_layout={(f32[1024,1024]{1,0})->(f32[1024,1024]{1,0},s32[1024,1024]{1,0})}
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
p.1.lhs = s32[] parameter(2), sharding={replicated}
p.1.rhs = s32[] parameter(3), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[1024,1024]{1,0} parameter(0)
negate.0 = f32[1024,1024]{1,0} negate(param.0), sharding={devices=[1,8]<=[8]}
iota.0 = s32[1024,1024]{1,0} iota(), iota_dimension=1, sharding={devices=[1,8]<=[8]}
ROOT sort.0 = (f32[1024,1024]{1,0}, s32[1024,1024]{1,0}) sort(negate.0, iota.0), dimensions={1}, is_stable=true, to_apply=compare, sharding={{devices=[1,8]<=[8]},{devices=[1,8]<=[8]}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.1");
for (auto operand : sort->operands()) {
EXPECT_EQ(operand->shape().dimensions(0), 128);
EXPECT_EQ(operand->shape().dimensions(1), 1024);
}
}
TEST_P(SpmdPartitioningTest, SortShardedOnSortDim_TwoOperands_FreeDimOfSize1) {
absl::string_view hlo_string = R"(
HloModule module
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
p.1.lhs = s32[] parameter(2), sharding={replicated}
p.1.rhs = s32[] parameter(3), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[1,1024]{1,0} parameter(0)
negate.0 = f32[1,1024]{1,0} negate(param.0), sharding={devices=[1,8]<=[8]}
iota.0 = s32[1,1024]{1,0} iota(), iota_dimension=1, sharding={devices=[1,8]<=[8]}
ROOT sort.0 = (f32[1,1024]{1,0}, s32[1,1024]{1,0}) sort(negate.0, iota.0), dimensions={1}, is_stable=true, to_apply=compare, sharding={{devices=[1,8]<=[8]},{devices=[1,8]<=[8]}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
for (HloInstruction* inst : module->entry_computation()->instructions()) {
if (inst->opcode() == HloOpcode::kSort) {
for (HloInstruction* operand : inst->operands()) {
EXPECT_EQ(operand->shape().dimensions(0), 1);
EXPECT_EQ(operand->shape().dimensions(1), 1024);
}
EXPECT_THAT(inst, op::Sort(op::AllReduce(), op::AllReduce()));
}
EXPECT_NE(inst->opcode(), HloOpcode::kAllToAll);
}
}
TEST_P(SpmdPartitioningTest, SortShardedOnSortDim_ThreeOperands) {
absl::string_view hlo_string = R"(
HloModule module, entry_computation_layout={(f32[1024,1024]{1,0})->(f32[1024,1024]{1,0},s32[1024,1024]{1,0},s32[1024,1024]{1,0})}
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
p.1.lhs = s32[] parameter(2), sharding={replicated}
p.1.rhs = s32[] parameter(3), sharding={replicated}
p.2.lhs = s32[] parameter(4), sharding={replicated}
p.2.rhs = s32[] parameter(5), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[1024,1024]{1,0} parameter(0)
negate.0 = f32[1024,1024]{1,0} negate(param.0), sharding={devices=[1,8]<=[8]}
iota.0 = s32[1024,1024]{1,0} iota(), iota_dimension=0, sharding={devices=[1,8]<=[8]}
iota.1 = s32[1024,1024]{1,0} iota(), iota_dimension=1, sharding={devices=[1,8]<=[8]}
ROOT sort.0 = (f32[1024,1024]{1,0}, s32[1024,1024]{1,0}, s32[1024,1024]{1,0}) sort(negate.0, iota.0, iota.1), dimensions={1}, is_stable=true, to_apply=compare, sharding={{devices=[1,8]<=[8]},{devices=[1,8]<=[8]},{devices=[1,8]<=[8]}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.1");
for (auto operand : sort->operands()) {
EXPECT_EQ(operand->shape().dimensions(0), 128);
EXPECT_EQ(operand->shape().dimensions(1), 1024);
}
}
TEST_P(SpmdPartitioningTest, SortShardedOnSortDim_RankOne) {
absl::string_view hlo_string = R"(
HloModule module, entry_computation_layout={(f32[1024]{0})->(f32[1024]{0},s32[1024]{0})}
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
p.1.lhs = s32[] parameter(2), sharding={replicated}
p.1.rhs = s32[] parameter(3), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[1024]{0} parameter(0)
negate.0 = f32[1024]{0} negate(param.0), sharding={devices=[8]<=[8]}
iota.0 = s32[1024]{0} iota(), iota_dimension=0
ROOT sort.0 = (f32[1024]{0}, s32[1024]{0}) sort(negate.0, iota.0), dimensions={0}, is_stable=true, to_apply=compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.1");
for (auto operand : sort->operands()) {
EXPECT_EQ(operand->shape().dimensions(0), 1024);
}
}
TEST_P(SpmdPartitioningTest, SortShardedOnSortDim_TwoFreeDivisibleDims) {
absl::string_view hlo_string = R"(
HloModule module, entry_computation_layout={(f32[8,1024,1024]{2,1,0})->(f32[8,1024,1024]{2,1,0},s32[8,1024,1024]{2,1,0})}
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
p.1.lhs = s32[] parameter(2), sharding={replicated}
p.1.rhs = s32[] parameter(3), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[8,1024,1024]{2,1,0} parameter(0)
negate.0 = f32[8,1024,1024]{2,1,0} negate(param.0), sharding={devices=[1,1,8]<=[8]}
iota.0 = s32[8,1024,1024]{2,1,0} iota(), iota_dimension=2, sharding={devices=[1,1,8]<=[8]}
ROOT sort.0 = (f32[8,1024,1024]{2,1,0}, s32[8,1024,1024]{2,1,0}) sort(negate.0, iota.0), dimensions={2}, is_stable=true, to_apply=compare, sharding={{devices=[1,1,8]<=[8]},{devices=[1,1,8]<=[8]}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.1");
for (auto operand : sort->operands()) {
EXPECT_EQ(operand->shape().dimensions(0), 1);
EXPECT_EQ(operand->shape().dimensions(1), 1024);
EXPECT_EQ(operand->shape().dimensions(2), 1024);
}
}
TEST_P(SpmdPartitioningTest, SortShardedOnSortDim_OneFreeDivisibleDim) {
absl::string_view hlo_string = R"(
HloModule module, entry_computation_layout={(f32[7,1024,1024]{2,1,0})->(f32[7,1024,1024]{2,1,0},s32[7,1024,1024]{2,1,0})}
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
p.1.lhs = s32[] parameter(2), sharding={replicated}
p.1.rhs = s32[] parameter(3), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[7,1024,1024]{2,1,0} parameter(0)
negate.0 = f32[7,1024,1024]{2,1,0} negate(param.0), sharding={devices=[1,1,8]<=[8]}
iota.0 = s32[7,1024,1024]{2,1,0} iota(), iota_dimension=2, sharding={devices=[1,1,8]<=[8]}
ROOT sort.0 = (f32[7,1024,1024]{2,1,0}, s32[7,1024,1024]{2,1,0}) sort(negate.0, iota.0), dimensions={2}, is_stable=true, to_apply=compare, sharding={{devices=[1,1,8]<=[8]},{devices=[1,1,8]<=[8]}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.1");
for (auto operand : sort->operands()) {
EXPECT_EQ(operand->shape().dimensions(0), 7);
EXPECT_EQ(operand->shape().dimensions(1), 128);
EXPECT_EQ(operand->shape().dimensions(2), 1024);
}
}
TEST_P(SpmdPartitioningTest, SortShardedOnSortDim_OneFreeNondivisibleDim) {
absl::string_view hlo_string = R"(
HloModule module, entry_computation_layout={(f32[7,1024,1024]{2,1,0})->(f32[7,1024,1024]{2,1,0},s32[7,1024,1024]{2,1,0})}
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
p.1.lhs = s32[] parameter(2), sharding={replicated}
p.1.rhs = s32[] parameter(3), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[7,1024,1024]{2,1,0} parameter(0)
negate.0 = f32[7,1024,1024]{2,1,0} negate(param.0), sharding={devices=[1,2,4]<=[8]}
iota.0 = s32[7,1024,1024]{2,1,0} iota(), iota_dimension=2, sharding={devices=[1,2,4]<=[8]}
ROOT sort.0 = (f32[7,1024,1024]{2,1,0}, s32[7,1024,1024]{2,1,0}) sort(negate.0, iota.0), dimensions={2}, is_stable=true, to_apply=compare, sharding={{devices=[1,2,4]<=[8]},{devices=[1,2,4]<=[8]}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.1");
for (auto operand : sort->operands()) {
EXPECT_EQ(operand->shape().dimensions(0), 2);
EXPECT_EQ(operand->shape().dimensions(1), 512);
EXPECT_EQ(operand->shape().dimensions(2), 1024);
}
}
TEST_P(SpmdPartitioningTest, SortShardedOnSortDim_LastTileDimReplicate) {
absl::string_view hlo_string = R"(
HloModule module, entry_computation_layout={(f32[1024,1024]{1,0})->f32[1024,1024]{1,0}}
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[1024,1024]{1,0} parameter(0)
negate.0 = f32[1024,1024]{1,0} negate(param.0), sharding={devices=[1,2,4]<=[8] last_tile_dim_replicate}
ROOT sort.0 = f32[1024,1024]{1,0} sort(negate.0), dimensions={1}, is_stable=true, to_apply=compare, sharding={devices=[1,2,4]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto sort = FindInstruction(module.get(), "sort.1");
for (auto operand : sort->operands()) {
EXPECT_EQ(operand->shape().dimensions(0), 512);
EXPECT_EQ(operand->shape().dimensions(1), 1024);
}
}
TEST_P(SpmdPartitioningTest, ShardableTranspose) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[16,38,38,4] parameter(0)
%param0.copy = f32[16,38,38,4] copy(%param0), sharding={devices=[1,2,1,1]0,1}
ROOT %transpose = f32[16,4,38,38] transpose(%param0.copy),
dimensions={0,3,1,2}, sharding={devices=[1,1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[16,19,38,4]"));
EXPECT_THAT(root, AllOf(op::Transpose(param0), op::Shape("f32[16,4,19,38]")));
}
TEST_P(SpmdPartitioningTest, MultiDimensionShardedTranspose) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[16,38,38,4] parameter(0)
%param0.copy = f32[16,38,38,4] copy(%param0),
sharding={devices=[4,2,1,1]<=[8]}
ROOT %transpose = f32[38,4,16,38] transpose(%param0.copy),
dimensions={1,3,0,2}, sharding={devices=[2,1,4,1]<=[4,2]T(1,0)}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[4,19,38,4]"));
EXPECT_THAT(root, AllOf(op::Transpose(param0), op::Shape("f32[19,4,4,38]")));
}
TEST_P(SpmdPartitioningTest, NonShardableTranspose) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[16,38,38,4] parameter(0)
%param0.copy = f32[16,38,38,4] copy(%param0), sharding={devices=[1,2,1,1]0,1}
ROOT %transpose = f32[16,4,38,38] transpose(%param0.copy),
dimensions={0,3,1,2}, sharding={devices=[1,2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto resahrd = AllOf(op::Reshape(op::Transpose(op::Reshape(op::AllToAll()))),
op::Shape("f32[16,38,38,2]"));
EXPECT_THAT(root, AllOf(op::Transpose(), op::Shape("f32[16,2,38,38]")));
}
TEST_P(SpmdPartitioningTest, PartialReplicateShardableTranspose) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[16,38,38,4] parameter(0)
%param0.copy = f32[16,38,38,4] copy(%param0),
sharding={devices=[1,2,1,1,2]<=[4] last_tile_dim_replicate}
ROOT %transpose = f32[16,4,38,38] transpose(%param0.copy),
dimensions={0,3,1,2},
sharding={devices=[1,1,2,1,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[16,19,38,4]"));
EXPECT_THAT(root, AllOf(op::Transpose(param0), op::Shape("f32[16,4,19,38]")));
}
TEST_P(SpmdPartitioningTest, PartialReplicateNonShardableTranspose) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[16,38,38,4] parameter(0)
%param0.copy = f32[16,38,38,4] copy(%param0),
sharding={devices=[1,2,1,1,2]<=[4] last_tile_dim_replicate}
ROOT %transpose = f32[16,4,38,38] transpose(%param0.copy),
dimensions={0,3,1,2},
sharding={devices=[1,2,1,1,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto resahrd = AllOf(op::Reshape(op::Transpose(op::Reshape(op::AllToAll()))),
op::Shape("f32[16,38,38,2]"));
EXPECT_THAT(root, AllOf(op::Transpose(), op::Shape("f32[16,2,38,38]")));
}
TEST_P(SpmdPartitioningTest, PartialReplicateMultiDimensionShardedTranspose) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[16,38,38,4] parameter(0)
%param0.copy = f32[16,38,38,4] copy(%param0),
sharding={devices=[2,2,1,1,2]<=[8] last_tile_dim_replicate}
ROOT %transpose = f32[38,4,16,38] transpose(%param0.copy),
dimensions={1,3,0,2},
sharding={devices=[2,1,2,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[8,19,38,4]"));
EXPECT_THAT(root, AllOf(op::Transpose(param0), op::Shape("f32[19,4,8,38]")));
}
TEST_P(SpmdPartitioningTest, ShardableReshape) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[38,38,324] parameter(0)
%param0.copy = f32[38,38,324] copy(%param0), sharding={devices=[2,1,1]0,1}
ROOT %reshape = f32[38,38,4,81] reshape(%param0.copy),
sharding={devices=[2,1,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[19,38,324]"));
EXPECT_THAT(root, AllOf(op::Reshape(param0), op::Shape("f32[19,38,4,81]")));
}
TEST_P(SpmdPartitioningTest, ReshapePartialHaloExchange) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[4,14,4] parameter(0), sharding={devices=[2,4,2]<=[16]}
ROOT %reshape = f32[2,2,2,7,2,2] reshape(%param0),
sharding={devices=[2,1,4,1,2,1]<=[16]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 16));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto halo_exchange =
AllOf(op::Concatenate(op::Copy(op::Parameter()), op::CollectivePermute(),
op::CollectivePermute(), op::CollectivePermute()));
EXPECT_THAT(
root,
AllOf(op::Reshape(op::DynamicSlice(op::Pad(halo_exchange, _), _, _, _)),
op::Shape("f32[1,2,1,7,1,2]")));
}
TEST_P(SpmdPartitioningTest, ReshapeWithReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[38,38,324] parameter(0), sharding={devices=[2,1,1]0,1}
ROOT %reshape = f32[38,38,4,81] reshape(%param0),
sharding={devices=[1,2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto input_reshard =
op::Reshape(op::Transpose(op::AllToAll(op::Reshape(op::Parameter(0)))));
EXPECT_THAT(root,
AllOf(op::Reshape(input_reshard), op::Shape("f32[38,19,4,81]")));
}
TEST_P(SpmdPartitioningTest, ReshapeWithReshard2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[38,38,324] parameter(0), sharding={devices=[2,1,1]0,1}
ROOT %reshape = f32[38,38,2,162] reshape(%param0),
sharding={devices=[1,1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto local_reshape =
AllOf(op::Reshape(op::Parameter(0)), op::Shape("f32[19,38,2,162]"));
EXPECT_THAT(root, AllOf(op::Shape("f32[38,38,2,81]"),
op::Reshape(op::Transpose(
op::AllToAll(op::Reshape(local_reshape))))));
}
TEST_P(SpmdPartitioningTest, ReshapeWithReshard3) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %reshape {
p0 = bf16[80,64,2,2,2,2,2] parameter(0), sharding={devices=[16,8,1,1,1,1,1]<=[128]}
ROOT reshape = bf16[5120,4,8] reshape(p0), sharding={devices=[128,1,1]<=[128]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module, PartitionComputation(hlo_string, 128));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto reshape = AllOf(op::Reshape(op::AllReduce(op::DynamicUpdateSlice(
_, op::Parameter(0), _, _, _, _, _, _, _))),
op::Shape("bf16[320,4,8]"));
EXPECT_THAT(root, AllOf(op::DynamicSlice(reshape, _, _, _),
op::Shape("bf16[40,4,8]")));
}
TEST_P(SpmdPartitioningTest, ReshapeWithReshard4) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %reshape {
p0 = bf16[80,64,8,2,2,2,2] parameter(0), sharding={devices=[16,1,8,1,1,1,1]<=[128]}
ROOT reshape = bf16[5120,16,8] reshape(p0), sharding={devices=[128,1,1]<=[128]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module, PartitionComputation(hlo_string, 128));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Reshape(op::Reshape(op::Transpose(op::AllToAll()))),
op::Shape("bf16[40,16,8]")));
}
TEST_P(SpmdPartitioningTest, PartialReplicateShardableReshape) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[38,38,324] parameter(0)
%param0.copy = f32[38,38,324] copy(%param0),
sharding={devices=[2,1,1,2]<=[4] last_tile_dim_replicate}
ROOT %reshape = f32[38,38,4,81] reshape(%param0.copy),
sharding={devices=[2,1,1,1,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[19,38,324]"));
EXPECT_THAT(root, AllOf(op::Reshape(param0), op::Shape("f32[19,38,4,81]")));
}
TEST_P(SpmdPartitioningTest, ReshapeMergeDimsWithHaloExchange) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = s32[2,3,7,10] parameter(0), sharding={devices=[1,1,2,1]0,1}
ROOT %reshape = s32[3,2,1,14,5] reshape(%input),
sharding={devices=[1,1,1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto reshape =
AllOf(op::Reshape(op::Parameter(0)), op::Shape("s32[3,2,1,8,5]"));
auto halo = op::CollectivePermute(op::Slice(reshape));
auto exchanged = op::DynamicSlice(op::Concatenate(halo, op::Slice(reshape)),
_, _, _, _, _);
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(exchanged, op::Shape("s32[3,2,1,7,5]")));
}
TEST_P(SpmdPartitioningTest, PartialReplicateReshapeMergeDimsWithHaloExchange) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = s32[2,3,7,10] parameter(0),
sharding={devices=[1,1,2,1,2]<=[4] last_tile_dim_replicate}
ROOT %reshape = s32[3,2,1,14,5] reshape(%input),
sharding={devices=[1,1,1,2,1,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto reshape =
AllOf(op::Reshape(op::Parameter(0)), op::Shape("s32[3,2,1,8,5]"));
auto halo = op::CollectivePermute(op::Slice(reshape));
auto exchanged = op::DynamicSlice(op::Concatenate(halo, op::Slice(reshape)),
_, _, _, _, _);
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(exchanged, op::Shape("s32[3,2,1,7,5]")));
}
TEST_P(SpmdPartitioningTest, TileToPartialReplicateHaloExchangeWithPadding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,123]{1,0} parameter(0), sharding={devices=[8,1]<=[8]}
ROOT %reshape = f32[2,1,123]{2,1,0} reshape(%input),
sharding={devices=[2,1,1,4]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto reshape = AllOf(op::Reshape(op::AllReduce(op::Select(
_,
op::Select(_, op::CollectivePermute(op::Parameter()),
op::Parameter()),
_))),
op::Shape("f32[1,1,123]"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, reshape);
}
TEST_P(SpmdPartitioningTest, InceptionV3_4_way_ReduceWindowDilated) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
%param0 = f32[128,5,5,768] parameter(0)
%param0.copy = f32[128,5,5,768] copy(%param0),
sharding={devices=[1,4,1,1]<=[4]}
%constant.1 = f32[] constant(0), sharding={replicated}
ROOT %rw = f32[128,17,17,768] reduce-window(%param0.copy, %constant.1),
window={size=1x5x5x1 pad=0_0x4_4x4_4x0_0 lhs_dilate=1x3x3x1},
to_apply=sum, sharding={devices=[1,4,1,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto input_shard = op::Copy(op::DynamicSlice(
op::Pad(op::Parameter(0), op::Constant()), op::Constant(), op::Reshape(),
op::Constant(), op::Constant()));
auto id_mul4_add1 =
op::Add(op::Multiply(op::Reshape(), op::Constant()), op::Constant());
auto id_mul5 = op::Multiply(op::Reshape(), op::Constant());
auto id_mul5_add1_div3 =
op::Divide(op::Add(id_mul5, op::Constant()), op::Constant());
auto before_masking = AllOf(
op::Shape("f32[128,3,5,768]"),
op::DynamicSlice(
AllOf(
op::Shape("f32[128,4,5,768]"),
op::Concatenate(op::CollectivePermute(input_shard), input_shard)),
op::Constant(),
op::Subtract(op::Constant(),
op::Subtract(id_mul4_add1, id_mul5_add1_div3)),
op::Constant(), op::Constant()));
auto masked = op::Select(
op::And(op::Compare(op::Add(op::Iota(), op::Broadcast(id_mul5_add1_div3)),
op::Broadcast(op::Constant())),
op::Compare(op::Add(op::Iota(), op::Broadcast(id_mul5_add1_div3)),
op::Broadcast(op::Constant()))),
before_masking, op::Broadcast(op::Constant()));
auto rw = AllOf(op::Shape("f32[128,7,17,768]"),
op::ReduceWindow(masked, op::Constant()));
auto final_slice_index = op::Subtract(
id_mul5,
op::Add(op::Multiply(id_mul5_add1_div3, op::Constant()), op::Constant()));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Shape("f32[128,5,17,768]"),
op::DynamicSlice(rw, op::Constant(), final_slice_index,
op::Constant(), op::Constant())));
}
TEST_P(SpmdPartitioningTest, TiledToTiledReduce) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
%param0 = f32[4,32,32,128] parameter(0)
%param0.copy = f32[4,32,32,128] copy(%param0),
sharding={devices=[1,1,1,2]0,1}
%constant.1 = f32[] constant(0), sharding={replicated}
%reduce = f32[128] reduce(%param0.copy, %constant.1), dimensions={0,1,2},
to_apply=%sum, sharding={devices=[2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[4,32,32,64]"));
EXPECT_THAT(root,
AllOf(op::Reduce(param0, op::Constant()), op::Shape("f32[64]")));
}
TEST_P(SpmdPartitioningTest, PartialTiledToPartialTiledReduce) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
%param0 = f32[4,4] parameter(0),
sharding={devices=[2,2,2]<=[8] last_tile_dim_replicate}
%constant.1 = f32[] constant(0), sharding={replicated}
ROOT %reduce = f32[4] reduce(%param0, %constant.1), dimensions={0},
to_apply=%sum,
sharding={devices=[2,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::AllReduce(op::Reduce(op::Parameter(0), op::Constant())),
op::Shape("f32[2]")));
}
TEST_P(SpmdPartitioningTest, DeviceMaximalTupleReduce) {
absl::string_view hlo_string = R"(
HloModule module
%minmax_func {
%lhs_value = f32[] parameter(0)
%rhs_value = f32[] parameter(2)
%compare.2 = pred[] compare(%lhs_value, %rhs_value), direction=GT
%select.4 = f32[] select(%compare.2, %lhs_value, %rhs_value)
%lhs_index = s32[] parameter(1)
%rhs_index = s32[] parameter(3)
%select.5 = s32[] select(%compare.2, %lhs_index, %rhs_index)
ROOT %tuple.2 = (f32[], s32[]) tuple(%select.4, %select.5)
}
ENTRY %main {
%param0 = f32[28,10] parameter(0), sharding={maximal device=0}
%param1 = s32[28,10] parameter(1), sharding={maximal device=0}
%init0 = f32[] parameter(2), sharding={maximal device=0}
%init1 = s32[] parameter(3), sharding={maximal device=0}
ROOT %reduce = (f32[28], s32[28]) reduce(%param0, %param1, %init0, %init1),
dimensions={1}, to_apply=%minmax_func,
sharding={{maximal device=0}, {maximal device=0}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Reduce(op::Parameter(0), op::Parameter(1),
op::Parameter(2), op::Parameter(3)),
op::Shape("(f32[28], s32[28])")));
}
TEST_P(SpmdPartitioningTest, TiledToTiledTupleReduce) {
absl::string_view hlo_string = R"(
HloModule module
%minmax_func {
%lhs_value = f32[] parameter(0)
%rhs_value = f32[] parameter(2)
%compare.2 = pred[] compare(%lhs_value, %rhs_value), direction=GT
%select.4 = f32[] select(%compare.2, %lhs_value, %rhs_value)
%lhs_index = s32[] parameter(1)
%rhs_index = s32[] parameter(3)
%select.5 = s32[] select(%compare.2, %lhs_index, %rhs_index)
ROOT %tuple.2 = (f32[], s32[]) tuple(%select.4, %select.5)
}
ENTRY %main {
%param0 = f32[28,10] parameter(0), sharding={devices=[2,1]0,1}
%param1 = s32[28,10] parameter(1), sharding={devices=[2,1]0,1}
%init0 = f32[] parameter(2)
%init1 = s32[] parameter(3)
ROOT %reduce = (f32[28], s32[28]) reduce(%param0, %param1, %init0, %init1),
dimensions={1}, to_apply=%minmax_func,
sharding={{devices=[2]0,1}, {devices=[2]0,1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Reduce(op::Parameter(0), op::Parameter(1),
op::Parameter(2), op::Parameter(3)),
op::Shape("(f32[14], s32[14])")));
}
TEST_P(SpmdPartitioningTest, TiledToPartiallyTiledTupleReduce) {
absl::string_view hlo_string = R"(
HloModule module
%minmax_func {
%lhs_value = f32[] parameter(0)
%rhs_value = f32[] parameter(2)
%compare.2 = pred[] compare(%lhs_value, %rhs_value), direction=GT
%select.4 = f32[] select(%compare.2, %lhs_value, %rhs_value)
%lhs_index = s32[] parameter(1)
%rhs_index = s32[] parameter(3)
%select.5 = s32[] select(%compare.2, %lhs_index, %rhs_index)
ROOT %tuple.2 = (f32[], s32[]) tuple(%select.4, %select.5)
}
ENTRY %main {
%param0 = f32[28,12] parameter(0), sharding={devices=[2,4]<=[8]}
%param1 = s32[28,12] parameter(1), sharding={devices=[2,4]<=[8]}
%init0 = f32[] parameter(2)
%init1 = s32[] parameter(3)
ROOT %reduce = (f32[28], s32[28]) reduce(%param0, %param1, %init0, %init1),
dimensions={1}, to_apply=%minmax_func,
sharding={{devices=[2,4]<=[8] last_tile_dim_replicate},
{devices=[2,4]<=[8] last_tile_dim_replicate}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[14,3]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("s32[14,3]"), op::Parameter(1));
auto local_reduce =
AllOf(op::Reduce(lhs, rhs, op::Parameter(2), op::Parameter(3)),
op::Shape("(f32[14], s32[14])"));
auto reshape_l = AllOf(op::Reshape(op::GetTupleElement(local_reduce)),
op::Shape("f32[14,1]"));
auto reshape_r = AllOf(op::Reshape(op::GetTupleElement(local_reduce)),
op::Shape("s32[14,1]"));
auto broadcast_l =
AllOf(op::AllReduce(op::DynamicUpdateSlice(_, reshape_l, _, _)),
op::Shape("f32[14,4]"));
auto broadcast_r =
AllOf(op::AllReduce(op::DynamicUpdateSlice(_, reshape_r, _, _)),
op::Shape("s32[14,4]"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Reduce(broadcast_l, broadcast_r, op::Parameter(2),
op::Parameter(3)),
op::Shape("(f32[14], s32[14])")));
}
TEST_P(SpmdPartitioningTest, TupleReduceSubgroupManual) {
absl::string_view hlo_string = R"(
HloModule module
%minmax_func {
%lhs_value = f32[] parameter(0)
%rhs_value = f32[] parameter(2)
%compare.2 = pred[] compare(%lhs_value, %rhs_value), direction=GT
%select.4 = f32[] select(%compare.2, %lhs_value, %rhs_value)
%lhs_index = s32[] parameter(1)
%rhs_index = s32[] parameter(3)
%select.5 = s32[] select(%compare.2, %lhs_index, %rhs_index)
ROOT %tuple.2 = (f32[], s32[]) tuple(%select.4, %select.5)
}
ENTRY %main {
%param0 = f32[28,12] parameter(0),
sharding={devices=[1,2,2]<=[4] last_tile_dims={manual}}
%param1 = s32[28,12] parameter(1),
sharding={devices=[1,2,2]<=[4] last_tile_dims={manual}}
%init0 = f32[] parameter(2),
sharding={devices=[2,2]<=[4] last_tile_dims={replicated,manual}}
%init1 = s32[] parameter(3),
sharding={devices=[2,2]<=[4] last_tile_dims={replicated,manual}}
ROOT %reduce = (f32[28], s32[28]) reduce(%param0, %param1, %init0, %init1),
dimensions={1}, to_apply=%minmax_func,
sharding={{devices=[1,2,2]<=[4] last_tile_dims={replicated,manual}},
{devices=[1,2,2]<=[4] last_tile_dims={replicated,manual}}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[28,6]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("s32[28,6]"), op::Parameter(1));
auto local_reduce =
AllOf(op::Reduce(lhs, rhs, op::Parameter(2), op::Parameter(3)),
op::Shape("(f32[28], s32[28])"));
auto reshape_l = AllOf(op::Reshape(op::GetTupleElement(local_reduce)),
op::Shape("f32[28,1]"));
auto reshape_r = AllOf(op::Reshape(op::GetTupleElement(local_reduce)),
op::Shape("s32[28,1]"));
auto broadcast_l =
AllOf(op::AllReduce(op::DynamicUpdateSlice(_, reshape_l, _, _)),
op::Shape("f32[28,2]"));
auto broadcast_r =
AllOf(op::AllReduce(op::DynamicUpdateSlice(_, reshape_r, _, _)),
op::Shape("s32[28,2]"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Reduce(broadcast_l, broadcast_r, op::Parameter(2),
op::Parameter(3)),
op::Shape("(f32[28], s32[28])")));
}
TEST_P(SpmdPartitioningTest, TiledToTiledReduceOutputReshard) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
%param0 = f32[4,32,32,128] parameter(0)
%param0.copy = f32[4,32,32,128] copy(%param0),
sharding={devices=[1,2,1,1]0,1}
%constant.1 = f32[] constant(0), sharding={replicated}
%reduce = f32[128] reduce(%param0.copy, %constant.1), dimensions={0,1,2},
to_apply=%sum, sharding={devices=[2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[4,16,32,128]"));
EXPECT_THAT(root,
AllOf(op::DynamicSlice(
AllOf(op::AllReduce(op::Reduce(param0, op::Constant())),
op::Shape("f32[128]")),
op::Reshape()),
op::Shape("f32[64]")));
}
TEST_P(SpmdPartitioningTest, IotaAlongNonTileDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
ROOT %iota = s32[16,80,91] iota(), iota_dimension=1,
sharding={devices=[1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Iota(), op::Shape("s32[16,80,46]")));
}
TEST_P(SpmdPartitioningTest, IotaAlongTileDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
ROOT %iota = s32[16,80,91] iota(), iota_dimension=2,
sharding={devices=[1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Add(op::Iota(), op::Broadcast()),
op::Shape("s32[16,80,46]")));
}
TEST_P(SpmdPartitioningTest, U32IotaAlongTileDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
ROOT %iota = u32[16,80,91] iota(), iota_dimension=2,
sharding={devices=[1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Add(op::Iota(), op::Broadcast()),
op::Shape("u32[16,80,46]")));
}
TEST_P(SpmdPartitioningTest, Conditional) {
absl::string_view hlo_string = R"(
HloModule module
Negate {
x = f32[4,5] parameter(0), sharding={devices=[2,1]0,1}
ROOT negate = f32[4,5] negate(x), sharding={devices=[2,1]0,1}
}
Identity {
y = f32[4,5] parameter(0), sharding={devices=[2,1]0,1}
ROOT copy = f32[4,5] copy(y), sharding={devices=[2,1]0,1}
}
ENTRY entry {
%param.0 = pred[] parameter(0)
%param.0.copy = pred[] copy(%param.0), sharding={maximal device=0}
%param.1 = f32[4,5] parameter(1)
%param.1.copy = f32[4,5] copy(%param.1), sharding={replicated}
%param.2 = f32[4,5] parameter(2)
%param.2.copy = f32[4,5] copy(%param.2), sharding={devices=[2,1]0,1}
ROOT cond = f32[4,5] conditional(%param.0.copy, %param.1.copy, %param.2.copy),
true_computation=Negate, false_computation=Identity,
sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto param0 = AllOf(op::Copy(op::Copy(op::Parameter()), op::Shape("pred[]")));
auto param1 = AllOf(op::Copy(op::Parameter()), op::Shape("f32[4,5]"));
auto param2 = AllOf(op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(),
op::Constant())),
op::Shape("f32[2,5]"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Conditional(op::AllReduce(), param1, param2),
op::Shape("f32[2,5]")));
auto then_branch_root = root->branch_computation(0)->root_instruction();
EXPECT_THAT(then_branch_root,
AllOf(op::Negate(op::DynamicSlice(op::Parameter(), op::Reshape(),
op::Constant())),
op::Shape("f32[2,5]")));
auto else_branch_root = root->branch_computation(1)->root_instruction();
EXPECT_THAT(else_branch_root,
AllOf(op::Copy(op::Parameter()), op::Shape("f32[2,5]")));
}
TEST_P(SpmdPartitioningTest, ConditionalManual) {
absl::string_view hlo_string = R"(
HloModule module
Negate {
x = f32[4,5] parameter(0), sharding={manual}
ROOT negate = f32[4,5] negate(x), sharding={manual}
}
Identity {
y = f32[4,5] parameter(0), sharding={manual}
ROOT copy = f32[4,5] copy(y), sharding={manual}
}
ENTRY entry {
%param.0 = pred[] parameter(0), sharding={manual}
%param.1 = f32[4,5] parameter(1), sharding={manual}
%param.2 = f32[4,5] parameter(2), sharding={manual}
ROOT cond = f32[4,5] conditional(%param.0, %param.1, %param.2),
true_computation=Negate, false_computation=Identity, sharding={manual}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto param0 = AllOf(op::Parameter(0), op::Shape("pred[]"));
auto param1 = AllOf(op::Parameter(1), op::Shape("f32[4,5]"));
auto param2 = AllOf(op::Parameter(2), op::Shape("f32[4,5]"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Conditional(param0, param1, param2),
op::Shape("f32[4,5]")));
}
TEST_P(SpmdPartitioningTest, ConditionalPartialManual) {
absl::string_view hlo_string = R"(
HloModule module
Negate {
x = f32[4] parameter(0), sharding={devices=[2,2]<=[4] last_tile_dims={manual}}
ROOT negate = f32[4] negate(x), sharding={devices=[2,2]<=[4] last_tile_dims={manual}}
}
Identity {
y = f32[4] parameter(0), sharding={devices=[2,2]<=[4] last_tile_dims={manual}}
ROOT copy = f32[4] copy(y), sharding={devices=[2,2]<=[4] last_tile_dims={manual}}
}
ENTRY entry {
%param.0 = pred[] parameter(0), sharding={devices=[2,2]<=[4] last_tile_dims={replicated, manual}}
%param.1 = f32[4] parameter(1), sharding={devices=[2,2]<=[4] last_tile_dims={manual}}
%param.2 = f32[4] parameter(2), sharding={devices=[2,2]<=[4] last_tile_dims={manual}}
ROOT cond = f32[4] conditional(%param.0, %param.1, %param.2),
true_computation=Negate, false_computation=Identity, sharding={devices=[2,2]<=[4] last_tile_dims={manual}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto param0 = AllOf(op::Parameter(0), op::Shape("pred[]"));
auto param1 = AllOf(op::Parameter(1), op::Shape("f32[2]"));
auto param2 = AllOf(op::Parameter(2), op::Shape("f32[2]"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Conditional(param0, param1, param2),
op::Shape("f32[2]")));
}
TEST_P(SpmdPartitioningTest, WhileManual) {
absl::string_view hlo_string = R"(
HloModule module
LoopCond {
x = s32[] parameter(0), sharding={manual}
const = s32[] constant(5), sharding={manual}
ROOT lt = pred[] compare(x, const), direction=LT, sharding={manual}
}
Inc {
x = s32[] parameter(0), sharding={manual}
const = s32[] constant(1), sharding={manual}
ROOT add = s32[] add(x, const), sharding={manual}
}
ENTRY entry {
zero = s32[] parameter(0), sharding={manual}
ROOT while = s32[] while(zero), body=Inc, condition=LoopCond,
sharding={manual}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto zero = AllOf(op::Parameter(0), op::Shape("s32[]"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::While(zero), op::Shape("s32[]")));
}
TEST_P(SpmdPartitioningTest, WhilePartialManual) {
absl::string_view hlo_string = R"(
HloModule module
LoopCond {
x = s32[] parameter(0), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}
const = s32[] constant(5), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}
ROOT lt = pred[] compare(x, const), direction=LT, sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}
}
Inc {
x = s32[] parameter(0), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}
const = s32[] constant(1), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}
ROOT add = s32[] add(x, const), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}
}
ENTRY entry {
zero = s32[] parameter(0), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}
ROOT while = s32[] while(zero), body=Inc, condition=LoopCond, sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto zero = AllOf(op::Parameter(0), op::Shape("s32[]"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::While(zero), op::Shape("s32[]")));
}
TEST_P(SpmdPartitioningTest, TestWhileFrontendAttributes) {
absl::string_view hlo_string = R"(
HloModule module
LoopCond {
x = s32[] parameter(0), sharding={manual}
const = s32[] constant(5), sharding={manual}
ROOT lt = pred[] compare(x, const), direction=LT, sharding={manual}
}
Inc {
x = s32[] parameter(0), sharding={manual}
const = s32[] constant(1), sharding={manual}
ROOT add = s32[] add(x, const), sharding={manual}
}
ENTRY entry {
zero = s32[] parameter(0), sharding={manual}
ROOT while = s32[] while(zero), body=Inc, condition=LoopCond,
sharding={manual}, frontend_attributes={_xla_other_attribute="xyz"}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto zero = AllOf(op::Parameter(0), op::Shape("s32[]"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->frontend_attributes().map().at("_xla_other_attribute"),
"xyz");
EXPECT_THAT(root, AllOf(op::While(zero), op::Shape("s32[]")));
}
TEST_P(SpmdPartitioningTest, SelectAndScatter_RetinaNet) {
absl::string_view hlo_string = R"(
HloModule module
ge {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT compare = pred[] compare(a, b), direction=GE
}
sum {
c = f32[] parameter(0)
d = f32[] parameter(1)
ROOT add = f32[] add(c, d)
}
ENTRY entry {
%param.0 = f32[32,128,384,64] parameter(0)
%param.0.copy = f32[32,128,384,64] copy(%param.0),
sharding={devices=[1,8,1,1]<=[8]}
%param.1 = f32[32,64,192,64] parameter(1)
%param.1.copy = f32[32,64,192,64] copy(%param.1),
sharding={devices=[1,8,1,1]<=[8]}
constant.1 = f32[] constant(0), sharding={replicated}
ROOT select-and-scatter = f32[32,128,384,64] select-and-scatter(param.0.copy,
%param.1.copy, constant.1), window={size=1x1x1x1 stride=1x2x2x1},
select=ge, scatter=sum, sharding={devices=[1,8,1,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto source = AllOf(
op::Shape("f32[32,8,192,64]"),
op::Copy(op::DynamicSlice(op::Parameter(1), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())));
auto data = AllOf(
op::Shape("f32[32,16,384,64]"),
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())));
EXPECT_THAT(root, op::SelectAndScatter(data, source, op::Constant()));
EXPECT_EQ(root->window().dimensions(0).padding_low(), 0);
EXPECT_EQ(root->window().dimensions(0).padding_high(), 0);
}
TEST_P(SpmdPartitioningTest, TiledDot) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,64] parameter(0)
%lhs.copy = f32[128,64] copy(%lhs), sharding={devices=[1,2]0,1}
%rhs = f32[64,256] parameter(1)
%rhs.copy = f32[64,256] copy(%rhs), sharding={devices=[2,1]0,1}
ROOT %conv = f32[128,256] convolution(%lhs.copy, %rhs.copy),
dim_labels=bf_io->bf, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
false));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Copy(op::DynamicSlice(
op::Parameter(), op::Constant(), op::Reshape())),
op::Shape("f32[128,32]"));
const auto rhs = AllOf(op::Copy(op::DynamicSlice(
op::Parameter(), op::Reshape(), op::Constant())),
op::Shape("f32[32,256]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::Convolution(lhs, rhs)),
op::Shape("f32[128,256]")));
}
TEST_P(SpmdPartitioningTest, TiledDotOutputTiled) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,64] parameter(0)
%lhs.copy = f32[128,64] copy(%lhs), sharding={devices=[1,2]0,1}
%rhs = f32[64,256] parameter(1)
%rhs.copy = f32[64,256] copy(%rhs), sharding={devices=[2,1]0,1}
ROOT %conv = f32[128,256] convolution(%lhs.copy, %rhs.copy),
dim_labels=bf_io->bf, sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Copy(op::DynamicSlice(
op::Parameter(), op::Constant(), op::Reshape())),
op::Shape("f32[128,32]"));
const auto rhs = AllOf(op::Copy(op::DynamicSlice(
op::Parameter(), op::Reshape(), op::Constant())),
op::Shape("f32[32,256]"));
EXPECT_THAT(root, AllOf(op::DynamicSlice(
AllOf(op::AllReduce(op::Convolution(lhs, rhs)),
op::Shape("f32[128,256]")),
op::Constant(), op::Reshape()),
op::Shape("f32[128,128]")));
}
TEST_P(SpmdPartitioningTest, BatchPartitionedConvolution) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,256,256] parameter(0)
%lhs.copy = f32[128,256,256] copy(%lhs), sharding={devices=[1,2,1]0,1}
%rhs = f32[256,8,1] parameter(1)
%rhs.copy = f32[256,8,1] copy(%rhs), sharding={replicated}
ROOT %conv = f32[128,256,8] convolution(%lhs.copy, %rhs.copy),
window={size=1}, dim_labels=0bf_io0->0bf, sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("f32[128,128,256]"));
const auto rhs = AllOf(op::Copy(op::Parameter(1)), op::Shape("f32[256,8,1]"));
EXPECT_THAT(root,
AllOf(op::Convolution(lhs, rhs), op::Shape("f32[128,128,8]")));
}
TEST_P(SpmdPartitioningTest, DotOutputFeaturePartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[24,64] parameter(0)
%lhs.copy = f32[24,64] copy(%lhs), sharding={replicated}
%rhs = f32[39296,64] parameter(1)
%rhs.copy = f32[39296,64] copy(%rhs), sharding={devices=[2,1]0,1}
ROOT %dot = f32[24,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={1}, rhs_contracting_dims={1},
sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Copy(op::Parameter(0)), op::Shape("f32[24,64]"));
const auto rhs = AllOf(op::Copy(op::DynamicSlice(
op::Parameter(1), op::Reshape(), op::Constant())),
op::Shape("f32[19648,64]"));
EXPECT_THAT(root, AllOf(op::Dot(lhs, rhs), op::Shape("f32[24,19648]")));
}
TEST_P(SpmdPartitioningTest, WindowedEinsumTwoContractingDimsLhsReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%p0 = f32[2048,2,3264]{2,1,0} parameter(0), sharding={devices=[1,1,2]0,1}
%p1 = f32[2,3264,2176]{2,1,0} parameter(1), sharding={devices=[2,1,1]0,1}
ROOT %dot.224 = f32[2048,2176]{1,0} dot(f32[2048,2,3264]{2,1,0} %p0, f32[2,3264,2176]{2,1,0} %p1), lhs_contracting_dims={1,2}, rhs_contracting_dims={0,1}, sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
true,
false,
false,
false,
0));
VLOG(1) << module->ToString();
const auto arg0 = AllOf(
op::Reshape(op::Transpose(op::AllToAll(op::Reshape(op::Parameter(0))))),
op::Shape("f32[2048,1,3264]"));
const auto arg1 = AllOf(op::Parameter(1), op::Shape("f32[1,3264,2176]"));
const auto while_op =
AllOf(op::While(op::Tuple(arg0, arg1, op::Broadcast(), op::Broadcast(),
op::Constant())),
op::Shape("(f32[2048,1,3264]{2,1,0}, f32[1,3264,2176]{2,1,0},"
" f32[2048,1088]{1,0}, f32[2048,1088]{1,0}, u32[])"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::GetTupleElement(while_op), op::Shape("f32[2048,1088]")));
const auto while_loop = root->operand(0);
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto lhs = AllOf(op::GetTupleElement(op::Parameter(0)),
op::Shape("f32[2048,1,3264]"));
auto rhs = AllOf(op::DynamicSlice(), op::Shape("f32[1,3264,1088]"));
auto dot_op = op::Dot(lhs, rhs);
auto add_op = op::Add(op::GetTupleElement(op::Parameter(0)), dot_op);
auto cond_op =
op::Conditional(op::Compare(next_i, op::Constant()), add_op, add_op);
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)), cond_op,
op::GetTupleElement(op::Parameter(0)), next_i));
}
TEST_P(SpmdPartitioningTest, WindowedEinsumTwoContractingDimsRhsReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%p0 = f32[4096,2,3264]{2,1,0} parameter(0), sharding={devices=[1,1,2]0,1}
%p1 = f32[2,3264,2176]{2,1,0} parameter(1), sharding={devices=[2,1,1]0,1}
ROOT %dot.224 = f32[4096,2176]{1,0} dot(f32[4096,2,3264]{2,1,0} %p0, f32[2,3264,2176]{2,1,0} %p1), lhs_contracting_dims={1,2}, rhs_contracting_dims={0,1}, sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
true,
false,
false,
false,
0));
VLOG(1) << module->ToString();
const auto arg0 = AllOf(op::Parameter(0), op::Shape("f32[4096,2,1632]"));
const auto arg1 = AllOf(
op::Reshape(op::Transpose(op::AllToAll(op::Reshape(op::Parameter(1))))),
op::Shape("f32[2,1632,2176]"));
const auto while_op =
AllOf(op::While(op::Tuple(arg0, arg1, op::Broadcast(), op::Broadcast(),
op::Constant())),
op::Shape("(f32[4096,2,1632]{2,1,0}, f32[2,1632,2176]{2,1,0},"
" f32[4096,1088]{1,0}, f32[4096,1088]{1,0}, u32[])"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::GetTupleElement(while_op), op::Shape("f32[4096,1088]")));
const auto while_loop = root->operand(0);
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto lhs = AllOf(op::GetTupleElement(op::Parameter(0)),
op::Shape("f32[4096,2,1632]"));
auto rhs = AllOf(op::DynamicSlice(), op::Shape("f32[2,1632,1088]"));
auto dot_op = op::Dot(lhs, rhs);
auto add_op = op::Add(op::GetTupleElement(op::Parameter(0)), dot_op);
auto cond_op =
op::Conditional(op::Compare(next_i, op::Constant()), add_op, add_op);
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)), cond_op,
op::GetTupleElement(op::Parameter(0)), next_i));
}
TEST_P(SpmdPartitioningTest, ChooseWindowedEinsumOverIncreasedMemUsageOption) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%p0 = bf16[512,4,512]{2,1,0} parameter(0), sharding={devices=[16,1,4]<=[64]}
%p1 = bf16[512,4,512]{2,1,0} parameter(1), sharding={devices=[16,1,4]<=[64]}
%multiply.611 = bf16[512,4,512]{2,1,0} multiply(bf16[512,4,512]{2,1,0} %p0, bf16[512,4,512]{2,1,0} %p1), sharding={devices=[16,1,4]<=[64]}
%p2 = bf16[1,2048,768]{2,1,0} parameter(2), sharding={devices=[1,4,16]<=[16,4]T(1,0)}
%reshape.1074 = bf16[4,512,768]{2,1,0} reshape(bf16[1,2048,768]{2,1,0} %p2), sharding={devices=[4,1,16]<=[16,4]T(1,0)}
ROOT %dot.128 = bf16[512,768]{1,0} dot(bf16[512,4,512]{2,1,0} %multiply.611, bf16[4,512,768]{2,1,0} %reshape.1074), lhs_contracting_dims={1,2}, rhs_contracting_dims={0,1}, sharding={devices=[16,4]<=[64]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 64,
true,
true,
false,
false,
0));
VLOG(1) << module->ToString();
const auto arg0 = AllOf(op::Reshape(), op::Shape("bf16[32,1,512]{2,1,0}"));
const auto arg1 = AllOf(op::AllReduce(), op::Shape("bf16[1,512,768]{2,1,0}"));
const auto while_op =
AllOf(op::While(op::Tuple(arg0, arg1, op::Broadcast(), op::Broadcast(),
op::Constant())),
op::Shape("(bf16[32,1,512]{2,1,0}, bf16[1,512,768]{2,1,0},"
" bf16[32,192]{1,0}, bf16[32,192]{1,0}, u32[])"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::GetTupleElement(while_op),
op::Shape("bf16[32,192]{1,0}")));
const auto while_loop = root->operand(0);
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto lhs = AllOf(op::GetTupleElement(op::Parameter(0)),
op::Shape("bf16[32,1,512]{2,1,0}"));
auto rhs = AllOf(op::DynamicSlice(), op::Shape("bf16[1,512,192]{2,1,0}"));
auto dot_op = op::Dot(lhs, rhs);
auto add_op = op::Add(op::GetTupleElement(op::Parameter(0)), dot_op);
auto cond_op =
op::Conditional(op::Compare(next_i, op::Constant()), add_op, add_op);
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)), cond_op,
op::GetTupleElement(op::Parameter(0)), next_i));
}
TEST_P(SpmdPartitioningTest, WindowedEinsumKeepBatchDimensionsSorted) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
p0 = bf16[64,1025,4096]{2,1,0} parameter(0), sharding={devices=[8,1,1,8]<=[64] last_tile_dim_replicate}
p1 = bf16[1,4096,16384]{2,1,0} parameter(1), sharding={devices=[1,8,8]<=[64]}
reshape.9434 = bf16[64,1025,32,128]{3,2,1,0} reshape(p0), sharding={devices=[8,1,1,1,8]<=[64] last_tile_dim_replicate}
reshape.9438 = bf16[32,128,16384]{2,1,0} reshape(p1), sharding={devices=[8,1,8]<=[64]}
ROOT dot.1104 = bf16[32,64,1025,16384]{3,2,1,0} dot(reshape.9434, reshape.9438), lhs_batch_dims={2}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, sharding={devices=[1,8,1,8]<=[64]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 64,
true,
true,
true,
true,
0));
VLOG(1) << module->ToString();
TF_ASSERT_OK(HloVerifier(false,
false)
.Run(module.get())
.status());
const HloInstruction* while_inst =
module->entry_computation()->root_instruction()->operand(0);
for (HloInstruction* inst : while_inst->while_body()->instructions()) {
if (inst->opcode() == HloOpcode::kDot) {
auto lhs_batch_dims =
inst->dot_dimension_numbers().lhs_batch_dimensions();
CHECK_EQ(lhs_batch_dims.size(), 2);
CHECK_EQ(lhs_batch_dims[0], 2);
CHECK_EQ(lhs_batch_dims[1], 3);
auto rhs_batch_dims =
inst->dot_dimension_numbers().rhs_batch_dimensions();
CHECK_EQ(rhs_batch_dims.size(), 2);
CHECK_EQ(rhs_batch_dims[0], 0);
CHECK_EQ(rhs_batch_dims[1], 1);
}
}
}
TEST_P(SpmdPartitioningTest, DotPartialDeviceOrder) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,256,4096] parameter(0), sharding={devices=[1,1,2,2]1,3,0,2 last_tile_dim_replicate}
%rhs = f32[4096,2048] parameter(1), sharding={devices=[2,2]3,1,2,0}
ROOT %dot = f32[16,256,2048] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={2}, rhs_contracting_dims={0},
sharding={devices=[1,1,2,2]2,3,0,1 last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Parameter(0), op::Shape("f32[16,256,2048]"));
const auto rhs = AllOf(op::Parameter(1), op::Shape("f32[2048,1024]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::Dot(lhs, rhs)),
op::Shape("f32[16,256,1024]")));
}
TEST_P(SpmdPartitioningTest, EinsumBatchPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64] parameter(0)
%lhs.copy = f32[32,24,64] copy(%lhs), sharding={devices=[2,1,1]0,1}
%rhs = f32[32,39296,64] parameter(1)
%rhs.copy = f32[32,39296,64] copy(%rhs), sharding={devices=[2,1,1]0,1}
ROOT %dot = f32[32,24,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[16,24,64]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(1), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[16,39296,64]"));
EXPECT_THAT(root, AllOf(op::Dot(lhs, rhs), op::Shape("f32[16,24,39296]")));
}
TEST_P(SpmdPartitioningTest, EinsumLHSandOutputBatchPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64] parameter(0)
%lhs.copy = f32[32,24,64] copy(%lhs), sharding={devices=[2,1,1]0,1}
%rhs = f32[32,39296,64] parameter(1)
%rhs.copy = f32[32,39296,64] copy(%rhs), sharding={replicated}
ROOT %dot = f32[32,24,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[16,24,64]"));
const auto rhs =
AllOf(op::Copy(op::Parameter(1)), op::Shape("f32[32,39296,64]"));
EXPECT_THAT(root, AllOf(op::Dot(lhs, op::DynamicSlice(rhs, op::Reshape(),
op::Constant(),
op::Constant())),
op::Shape("f32[16,24,39296]")));
}
TEST_P(SpmdPartitioningTest, EinsumRHSandOutputBatchPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64] parameter(0)
%lhs.copy = f32[32,24,64] copy(%lhs), sharding={devices=[1,2,1]0,1}
%rhs = f32[32,39296,64] parameter(1)
%rhs.copy = f32[32,39296,64] copy(%rhs), sharding={devices=[2,1,1]0,1}
ROOT %dot = f32[32,24,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("f32[32,12,64]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(1), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[16,39296,64]"));
const auto lhs_reshard =
op::Reshape(op::Transpose(op::AllToAll(op::Reshape(lhs))));
EXPECT_THAT(root,
AllOf(op::Dot(lhs_reshard, rhs), op::Shape("f32[16,24,39296]")));
}
TEST_P(SpmdPartitioningTest, EinsumOutputBatchPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64] parameter(0)
%lhs.copy = f32[32,24,64] copy(%lhs), sharding={replicated}
%rhs = f32[32,39296,64] parameter(1)
%rhs.copy = f32[32,39296,64] copy(%rhs), sharding={replicated}
ROOT %dot = f32[32,24,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs_slice =
AllOf(op::DynamicSlice(op::Copy(op::Parameter(0)), op::Reshape(),
op::Constant(), op::Constant()),
op::Shape("f32[16,24,64]"));
const auto rhs_slice =
AllOf(op::DynamicSlice(op::Copy(op::Parameter(1)), op::Reshape(),
op::Constant(), op::Constant()),
op::Shape("f32[16,39296,64]"));
EXPECT_THAT(root, AllOf(op::Dot(lhs_slice, rhs_slice),
op::Shape("f32[16,24,39296]")));
}
TEST_P(SpmdPartitioningTest, EinsumContractingDimsPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,1,2,2]<=[4]}
%rhs = f32[32,39296,64,128] parameter(1)
%rhs.copy = f32[32,39296,64,128] copy(%rhs), sharding={devices=[1,1,2,2]<=[4]}
ROOT %dot = f32[32,24,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(),
op::Constant(), op::Reshape(), op::Reshape())),
op::Shape("f32[32,24,32,64]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(1), op::Constant(),
op::Constant(), op::Reshape(), op::Reshape())),
op::Shape("f32[32,39296,32,64]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::AllReduce(op::Dot(lhs, rhs))),
op::Shape("f32[32,24,39296]")));
}
TEST_P(SpmdPartitioningTest,
EinsumContractingDimsPartitionedResultPartiallySliced) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,64] parameter(0), sharding={devices=[1,4]<=[4]}
%rhs = f32[64,128] parameter(1), sharding={devices=[4,1]<=[4]}
ROOT %dot = f32[32,128] dot(%lhs, %rhs),
lhs_contracting_dims={1}, rhs_contracting_dims={0},
sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Parameter(0), op::Shape("f32[32,16]"));
const auto rhs = AllOf(op::Parameter(1), op::Shape("f32[16,128]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::DynamicSlice(
op::AllReduce(op::Dot(lhs, rhs)), _, _)),
op::Shape("f32[16,128]")));
}
TEST_P(SpmdPartitioningTest, EinsumLHSNonContractingDimsPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,2]<=[4]}
%rhs = f32[32,39296,64] parameter(1)
%rhs.copy = f32[32,39296,64] copy(%rhs), sharding={replicated}
ROOT %dot = f32[32,24,128,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[1,2,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Reshape())),
op::Shape("f32[32,12,64,64]"));
const auto rhs =
AllOf(op::Copy(op::Parameter(1)), op::Shape("f32[32,39296,64]"));
EXPECT_THAT(root, AllOf(op::Dot(lhs, rhs), op::Shape("f32[32,12,64,39296]")));
}
TEST_P(SpmdPartitioningTest, EinsumRHSNonContractingDimsPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64] parameter(0)
%lhs.copy = f32[32,24,64] copy(%lhs), sharding={replicated}
%rhs = f32[32,39296,64,128] parameter(1)
%rhs.copy = f32[32,39296,64,128] copy(%rhs), sharding={devices=[1,2,1,2]<=[4]}
ROOT %dot = f32[32,24,39296,128] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[1,1,2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::Parameter(0)), op::Shape("f32[32,24,64]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(1), op::Constant(), op::Reshape(),
op::Constant(), op::Reshape())),
op::Shape("f32[32,19648,64,64]"));
EXPECT_THAT(root, AllOf(op::Dot(lhs, rhs), op::Shape("f32[32,24,19648,64]")));
}
TEST_P(SpmdPartitioningTest, EinsumOutputLHSNonContractingDimPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={replicated}
%rhs = f32[32,39296,64,128] parameter(1)
%rhs.copy = f32[32,39296,64,128] copy(%rhs), sharding={replicated}
ROOT %dot = f32[32,24,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::Parameter(0)), op::Shape("f32[32,24,64,128]"));
const auto rhs =
AllOf(op::Copy(op::Parameter(1)), op::Shape("f32[32,39296,64,128]"));
EXPECT_THAT(
root,
AllOf(op::Dot(AllOf(op::DynamicSlice(lhs, op::Constant(), op::Reshape(),
op::Constant(), op::Constant()),
op::Shape("f32[32,12,64,128]")),
rhs),
op::Shape("f32[32,12,39296]")));
}
TEST_P(SpmdPartitioningTest, EinsumOutputRHSNonContractingDimPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={replicated}
%rhs = f32[32,39296,64,128] parameter(1)
%rhs.copy = f32[32,39296,64,128] copy(%rhs), sharding={replicated}
ROOT %dot = f32[32,24,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::Parameter(0)), op::Shape("f32[32,24,64,128]"));
const auto rhs =
AllOf(op::Copy(op::Parameter(1)), op::Shape("f32[32,39296,64,128]"));
EXPECT_THAT(root,
AllOf(op::Dot(lhs, AllOf(op::DynamicSlice(
rhs, op::Constant(), op::Reshape(),
op::Constant(), op::Constant()),
op::Shape("f32[32,19648,64,128]"))),
op::Shape("f32[32,24,19648]")));
}
TEST_P(SpmdPartitioningTest,
EinsumRHSWindowedInContractingOutNonContractingPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[320,25,64,128] parameter(0)
%lhs.copy = f32[320,25,64,128] copy(%lhs), sharding={devices=[1,1,4,1]<=[4]}
%rhs = f32[320,39296,64,128] parameter(1)
%rhs.copy = f32[320,39296,64,128] copy(%rhs),
sharding={devices=[1,1,4,1]<=[4]}
ROOT %dot = f32[320,25,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,4,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(),
op::Constant(), op::Reshape(), op::Constant())),
op::Shape("f32[320,25,16,128]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(1), op::Constant(),
op::Constant(), op::Reshape(), op::Constant())),
op::Shape("f32[320,39296,16,128]"));
EXPECT_THAT(
root,
AllOf(op::GetTupleElement(op::While(op::Tuple(
lhs, rhs, op::Broadcast(), op::Broadcast(), op::Constant()))),
op::Shape("f32[320,7,39296]")));
const auto while_loop = root->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto ds =
AllOf(op::DynamicSlice(
op::Pad(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant(), op::Reshape(), op::Constant(), op::Constant()),
op::Shape("f32[320,7,16,128]"));
auto partial_output =
AllOf(op::Add(op::GetTupleElement(op::Parameter(0)),
op::Dot(ds, op::GetTupleElement(op::Parameter(0)))),
op::Shape("f32[320,7,39296]"));
auto window = op::Conditional(op::Compare(next_i, op::Constant()),
partial_output, partial_output);
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)), window,
op::GetTupleElement(op::Parameter(0)), next_i));
auto cp_conditional =
while_loop->while_body()->root_instruction()->operand(2);
EXPECT_THAT(cp_conditional->true_computation()->root_instruction(),
op::CollectivePermute(op::Parameter(0)));
EXPECT_THAT(cp_conditional->false_computation()->root_instruction(),
op::Parameter(0));
}
TEST_P(SpmdPartitioningTest,
UnrolledEinsumRHSWindowedInContractingOutNonContractingPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[320,25,64,128] parameter(0)
%lhs.copy = f32[320,25,64,128] copy(%lhs), sharding={devices=[1,1,4,1]<=[4]}
%rhs = f32[320,39296,64,128] parameter(1)
%rhs.copy = f32[320,39296,64,128] copy(%rhs),
sharding={devices=[1,1,4,1]<=[4]}
ROOT %dot = f32[320,25,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,4,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4,
true,
false,
true));
VLOG(1) << module->ToString();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(),
op::Constant(), op::Reshape(), op::Constant())),
op::Shape("f32[320,25,16,128]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(1), op::Constant(),
op::Constant(), op::Reshape(), op::Constant())),
op::Shape("f32[320,39296,16,128]"));
const auto while_op = AllOf(
op::While(op::Tuple(lhs, rhs, op::Broadcast(), op::Broadcast(),
op::Constant())),
op::Shape("(f32[320,25,16,128], f32[320,39296,16,128], f32[320,7,39296],"
" f32[320,7,39296], u32[])"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::Add(op::CollectivePermute(op::GetTupleElement(while_op)),
op::GetTupleElement(while_op)),
op::Shape("f32[320,7,39296]")));
const auto while_loop = root->operand(1)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto ds =
AllOf(op::DynamicSlice(
op::Pad(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant(), op::Reshape(), op::Constant(), op::Constant()),
op::Shape("f32[320,7,16,128]"));
auto partial_output = AllOf(
op::Add(op::CollectivePermute(op::GetTupleElement(op::Parameter(0))),
op::Dot(ds, op::GetTupleElement(op::Parameter(0)))),
op::Shape("f32[320,7,39296]"));
auto partial_output2 =
AllOf(op::CollectivePermute(
op::Add(op::GetTupleElement(op::Parameter(0)),
op::Dot(ds, op::GetTupleElement(op::Parameter(0))))),
op::Shape("f32[320,7,39296]"));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)), partial_output,
partial_output2, next_i));
}
TEST_P(
SpmdPartitioningTest,
BidirectionalEinsumRHSWindowedInContractingOutNonContractingPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[320,25,64,128] parameter(0)
%lhs.copy = f32[320,25,64,128] copy(%lhs), sharding={devices=[1,1,4,1]<=[4]}
%rhs = f32[320,39296,64,128] parameter(1)
%rhs.copy = f32[320,39296,64,128] copy(%rhs),
sharding={devices=[1,1,4,1]<=[4]}
ROOT %dot = f32[320,25,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,4,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4,
true,
false,
false,
true));
VLOG(1) << module->ToString();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(),
op::Constant(), op::Reshape(), op::Constant())),
op::Shape("f32[320,25,16,128]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(1), op::Constant(),
op::Constant(), op::Reshape(), op::Constant())),
op::Shape("f32[320,39296,16,128]"));
const auto while_op = AllOf(
op::While(op::Tuple(lhs, rhs, op::Broadcast(), op::Broadcast(),
op::Constant())),
op::Shape("(f32[320,25,16,128], f32[320,39296,16,128], f32[320,7,39296],"
" f32[320,7,39296], u32[])"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::Add(op::GetTupleElement(while_op),
op::CollectivePermute(op::GetTupleElement(while_op))),
op::Shape("f32[320,7,39296]")));
const auto while_loop = root->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
const auto partial_dot_pattern =
AllOf(op::Reshape(op::Slice(
op::Dot(op::Maximum(), op::GetTupleElement(op::Parameter(0))))),
op::Shape("f32[320,7,39296]"));
const auto partial_output_pattern = AllOf(
op::Add(op::CollectivePermute(op::Add(
op::CollectivePermute(op::GetTupleElement(op::Parameter(0))),
partial_dot_pattern)),
partial_dot_pattern),
op::Shape("f32[320,7,39296]"));
EXPECT_THAT(
while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)), partial_output_pattern,
partial_output_pattern, next_i));
}
TEST_P(SpmdPartitioningTest,
EinsumRHSWindowedInContractingOutNonContractingFromBroadcast) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant.1 = f32[] constant(2)
%broadcast = f32[32,25,64,128] broadcast(%constant.1), dimensions={},
sharding={devices=[1,1,4,1]<=[4]}
%add = f32[32,25,64,128] add(%broadcast, %broadcast),
sharding={devices=[1,1,4,1]<=[4]}
%rhs = f32[32,39296,64,128] parameter(0)
%rhs.copy = f32[32,39296,64,128] copy(%rhs),
sharding={devices=[1,1,4,1]<=[4]}
ROOT %dot = f32[32,25,39296] dot(%add, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,4,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
4));
VLOG(1) << module->ToString();
}
TEST_P(SpmdPartitioningTest,
EinsumLHSWindowedInContractingOutNonContractingPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,1024,16384] parameter(0)
%lhs.copy = f32[16,1024,16384] copy(%lhs), sharding={devices=[2,1,4]<=[8]}
%rhs = f32[16384,67,128] parameter(1)
%rhs.copy = f32[16384,67,128] copy(%rhs),
sharding={devices=[4,1,1,2]<=[2,4]T(1,0) last_tile_dim_replicate}
ROOT %dot = f32[16,1024,67,128] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={2}, rhs_contracting_dims={0},
sharding={devices=[2,1,4,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(),
op::Constant(), op::Reshape())),
op::Shape("f32[8,1024,4096]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(1), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[4096,67,128]"));
EXPECT_THAT(
root,
AllOf(op::GetTupleElement(op::While(op::Tuple(
lhs, rhs, op::Broadcast(), op::Broadcast(), op::Constant()))),
op::Shape("f32[8,1024,17,128]")));
const auto while_loop = root->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto ds =
AllOf(op::DynamicSlice(
op::Pad(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant(), op::Reshape(), op::Constant()),
op::Shape("f32[4096,17,128]"));
auto partial_output =
AllOf(op::Add(op::GetTupleElement(op::Parameter(0)),
op::Dot(op::GetTupleElement(op::Parameter(0)), ds)),
op::Shape("f32[8,1024,17,128]"));
auto window = op::Conditional(op::Compare(next_i, op::Constant()),
partial_output, partial_output);
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)), window,
op::GetTupleElement(op::Parameter(0)), next_i));
auto cp_conditional =
while_loop->while_body()->root_instruction()->operand(2);
EXPECT_THAT(cp_conditional->true_computation()->root_instruction(),
op::CollectivePermute(op::Parameter(0)));
EXPECT_THAT(cp_conditional->false_computation()->root_instruction(),
op::Parameter(0));
}
TEST_P(SpmdPartitioningTest,
UnrollEinsumLHSWindowedInContractingOutNonContractingPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,1024,16384] parameter(0)
%lhs.copy = f32[16,1024,16384] copy(%lhs), sharding={devices=[2,1,4]<=[8]}
%rhs = f32[16384,67,128] parameter(1)
%rhs.copy = f32[16384,67,128] copy(%rhs),
sharding={devices=[4,1,1,2]<=[2,4]T(1,0) last_tile_dim_replicate}
ROOT %dot = f32[16,1024,67,128] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={2}, rhs_contracting_dims={0},
sharding={devices=[2,1,4,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 8,
true,
false,
true));
VLOG(1) << module->ToString();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(),
op::Constant(), op::Reshape())),
op::Shape("f32[8,1024,4096]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(1), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[4096,67,128]"));
const auto while_op =
AllOf(op::While(op::Tuple(lhs, rhs, op::Broadcast(), op::Broadcast(),
op::Constant())),
op::Shape("(f32[8,1024,4096], f32[4096,67,128], f32[8,1024,17,128],"
" f32[8,1024,17,128], u32[])"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::Add(op::CollectivePermute(op::GetTupleElement(while_op)),
op::GetTupleElement(while_op)),
op::Shape("f32[8,1024,17,128]")));
const auto while_loop = root->operand(1)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto ds =
AllOf(op::DynamicSlice(
op::Pad(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant(), op::Reshape(), op::Constant()),
op::Shape("f32[4096,17,128]"));
auto partial_output = AllOf(
op::Add(op::CollectivePermute(op::GetTupleElement(op::Parameter(0))),
op::Dot(op::GetTupleElement(op::Parameter(0)), ds)),
op::Shape("f32[8,1024,17,128]"));
auto partial_output2 =
AllOf(op::CollectivePermute(
op::Add(op::GetTupleElement(op::Parameter(0)),
op::Dot(op::GetTupleElement(op::Parameter(0)), ds))),
op::Shape("f32[8,1024,17,128]"));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)), partial_output,
partial_output2, next_i));
}
TEST_P(
SpmdPartitioningTest,
BidirectionalEinsumLHSWindowedInContractingOutNonContractingPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,1024,16384] parameter(0)
%lhs.copy = f32[16,1024,16384] copy(%lhs), sharding={devices=[2,1,4]<=[8]}
%rhs = f32[16384,67,128] parameter(1)
%rhs.copy = f32[16384,67,128] copy(%rhs),
sharding={devices=[4,1,1,2]<=[2,4]T(1,0) last_tile_dim_replicate}
ROOT %dot = f32[16,1024,67,128] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={2}, rhs_contracting_dims={0},
sharding={devices=[2,1,4,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 8,
true,
false,
false,
true));
VLOG(1) << module->ToString();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(),
op::Constant(), op::Reshape())),
op::Shape("f32[8,1024,4096]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(1), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[4096,67,128]"));
const auto while_op =
AllOf(op::While(op::Tuple(lhs, rhs, op::Broadcast(), op::Broadcast(),
op::Constant())),
op::Shape("(f32[8,1024,4096], f32[4096,67,128], f32[8,1024,17,128],"
" f32[8,1024,17,128], u32[])"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::Add(op::GetTupleElement(while_op),
op::CollectivePermute(op::GetTupleElement(while_op))),
op::Shape("f32[8,1024,17,128]")));
const auto while_loop = root->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
const auto partial_dot_pattern =
AllOf(op::Reshape(op::Slice(
op::Dot(op::GetTupleElement(op::Parameter(0)), op::Maximum()))),
op::Shape("f32[8,1024,17,128]"));
const auto partial_output_pattern = AllOf(
op::Add(op::CollectivePermute(op::Add(
op::CollectivePermute(op::GetTupleElement(op::Parameter(0))),
partial_dot_pattern)),
partial_dot_pattern),
op::Shape("f32[8,1024,17,128]"));
EXPECT_THAT(
while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)), partial_output_pattern,
partial_output_pattern, next_i));
}
TEST_P(SpmdPartitioningTest,
EinsumLHSWindowedInContractingOutNonContractingPartitioned2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,1024,16384] parameter(0)
%lhs.copy = f32[16,1024,16384] copy(%lhs), sharding={devices=[2,1,4]<=[8]}
%rhs = f32[16384,2,33,128] parameter(1)
%rhs.copy = f32[16384,2,33,128] copy(%rhs),
sharding={devices=[4,1,1,1,2]<=[2,4]T(1,0) last_tile_dim_replicate}
ROOT %dot = f32[16,1024,2,33,128] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={2}, rhs_contracting_dims={0},
sharding={devices=[2,1,2,2,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(),
op::Constant(), op::Reshape())),
op::Shape("f32[8,1024,4096]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(1), op::Reshape(), op::Constant(),
op::Constant(), op::Constant())),
op::Shape("f32[4096,2,33,128]"));
EXPECT_THAT(
root,
AllOf(op::GetTupleElement(op::While(op::Tuple(
lhs, rhs, op::Broadcast(), op::Broadcast(), op::Constant()))),
op::Shape("f32[8,1024,1,17,128]")));
const auto while_loop = root->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto ds =
AllOf(op::DynamicSlice(
op::Pad(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant(), op::Reshape(), op::Reshape(), op::Constant()),
op::Shape("f32[4096,1,17,128]"));
auto partial_output =
AllOf(op::Add(op::GetTupleElement(op::Parameter(0)),
op::Dot(op::GetTupleElement(op::Parameter(0)), ds)),
op::Shape("f32[8,1024,1,17,128]"));
auto window = op::Conditional(op::Compare(next_i, op::Constant()),
partial_output, partial_output);
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)), window,
op::GetTupleElement(op::Parameter(0)), next_i));
auto cp_conditional =
while_loop->while_body()->root_instruction()->operand(2);
EXPECT_THAT(cp_conditional->true_computation()->root_instruction(),
op::CollectivePermute(op::Parameter(0)));
EXPECT_THAT(cp_conditional->false_computation()->root_instruction(),
op::Parameter(0));
}
TEST_P(SpmdPartitioningTest, EinsumRHSWindowedNonContractingNoDoubleAG) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%lhs2 = f32[32,24,64,128] parameter(2)
%lhs2.copy = f32[32,24,64,128] copy(%lhs2), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
%dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
%dot2 = f32[32,24,39295] dot(%lhs2.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
ROOT %t = tuple(%dot, %dot2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto tuple_element = op::AllReduce(op::DynamicUpdateSlice(
_, op::Dot(_, op::AllReduce(op::DynamicUpdateSlice())), _, _, _));
EXPECT_THAT(root, op::Tuple(tuple_element, tuple_element));
}
TEST_P(SpmdPartitioningTest, EinsumRHSWindowedNonContractingNoSharedSharding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%lhs2 = f32[32,24,64,128] parameter(2)
%lhs2.copy = f32[32,24,64,128] copy(%lhs2), sharding={devices=[1,1,2,1]0,1}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
%dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
%dot2 = f32[32,24,39295] dot(%lhs2.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[2,1,1]0,1}
ROOT %t = tuple(%dot, %dot2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::Tuple(op::AllReduce(op::DynamicUpdateSlice(
_, op::Slice(op::GetTupleElement(op::While(_))), _, _, _)),
op::AllReduce(op::DynamicUpdateSlice(
_, op::Dot(_, op::Slice(_)), _, _, _))));
}
TEST_P(SpmdPartitioningTest,
UnrollEinsumRHSWindowedNonContractingNoSharedSharding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%lhs2 = f32[32,24,64,128] parameter(2)
%lhs2.copy = f32[32,24,64,128] copy(%lhs2), sharding={devices=[1,1,2,1]0,1}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
%dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
%dot2 = f32[32,24,39295] dot(%lhs2.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[2,1,1]0,1}
ROOT %t = tuple(%dot, %dot2)
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
true,
false,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::Tuple(op::AllReduce(op::DynamicUpdateSlice(
_, op::Slice(op::GetTupleElement(op::While(_))), _, _, _)),
op::AllReduce(op::DynamicUpdateSlice(
_, op::Dot(_, op::Slice(_)), _, _, _))));
const auto while_loop =
root->operand(0)->operand(0)->operand(1)->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
auto intermediate_output = AllOf(
op::DynamicUpdateSlice(op::GetTupleElement(op::Parameter(0)),
op::Dot(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))),
op::Constant(), op::Constant(), op::Reshape()),
op::Shape("f32[32,12,39296]"));
auto output = AllOf(
op::DynamicUpdateSlice(
intermediate_output,
op::Dot(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::GetTupleElement(op::Parameter(0)))),
op::Constant(), op::Constant(), op::Reshape()),
op::Shape("f32[32,12,39296]"));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
output, op::GetTupleElement(op::Parameter(0)), next_i));
}
TEST_P(SpmdPartitioningTest,
BidirectionalEinsumRHSWindowedNonContractingNoSharedSharding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,4,1,1]<=[4]}
%lhs2 = f32[32,24,64,128] parameter(2)
%lhs2.copy = f32[32,24,64,128] copy(%lhs2), sharding={devices=[1,1,4,1]<=[4]}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,4,1,1]<=[4]}
%dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,4,1]<=[4]}
%dot2 = f32[32,24,39295] dot(%lhs2.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[4,1,1]<=[4]}
ROOT %t = tuple(%dot, %dot2)
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4,
true,
false,
false,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::Tuple(op::AllReduce(op::DynamicUpdateSlice(
_, op::Slice(op::GetTupleElement(op::While(_))), _, _, _)),
op::AllReduce(op::DynamicUpdateSlice(
_, op::Dot(_, op::Slice(_)), _, _, _))));
const auto while_loop =
root->operand(0)->operand(0)->operand(1)->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
const auto partial_dot_pattern =
AllOf(op::Reshape(op::Slice(op::Dot(op::GetTupleElement(op::Parameter(0)),
op::Concatenate()))),
op::Shape("f32[32,6,9824]"));
auto intermediate_output1 =
AllOf(op::DynamicUpdateSlice(op::GetTupleElement(op::Parameter(0)),
partial_dot_pattern, op::Constant(),
op::Constant(), op::Reshape()),
op::Shape("f32[32,6,39296]"));
auto intermediate_output2 = AllOf(
op::DynamicUpdateSlice(intermediate_output1, partial_dot_pattern,
op::Constant(), op::Constant(), op::Reshape()),
op::Shape("f32[32,6,39296]"));
auto intermediate_output3 = AllOf(
op::DynamicUpdateSlice(intermediate_output2, partial_dot_pattern,
op::Constant(), op::Constant(), op::Reshape()),
op::Shape("f32[32,6,39296]"));
auto partial_output = AllOf(
op::DynamicUpdateSlice(intermediate_output3, partial_dot_pattern,
op::Constant(), op::Constant(), op::Reshape()),
op::Shape("f32[32,6,39296]"));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
partial_output,
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
next_i));
}
TEST_P(SpmdPartitioningTest, EinsumRHSWindowedNonContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
ROOT %dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,12,64,128]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(1), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,19648,64,128]"));
EXPECT_THAT(root,
AllOf(op::Slice(AllOf(op::GetTupleElement(op::While(op::Tuple(
lhs, rhs, op::Broadcast(),
op::Broadcast(), op::Constant()))),
op::Shape("f32[32,12,39296]"))),
op::Shape("f32[32,12,39295]")));
const auto while_loop = root->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto window = op::Conditional(op::Compare(next_i, op::Constant()),
op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)));
auto partial_output = op::Dot(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)));
EXPECT_THAT(
while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)), window,
op::DynamicUpdateSlice(op::GetTupleElement(op::Parameter(0)),
partial_output, op::Constant(),
op::Constant(), op::Reshape()),
op::GetTupleElement(op::Parameter(0)), next_i));
auto cp_conditional =
while_loop->while_body()->root_instruction()->operand(1);
EXPECT_THAT(cp_conditional->true_computation()->root_instruction(),
op::CollectivePermute(op::Parameter(0)));
EXPECT_THAT(cp_conditional->false_computation()->root_instruction(),
op::Parameter(0));
}
TEST_P(SpmdPartitioningTest, UnrollEinsumRHSWindowedNonContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
ROOT %dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
true,
false,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,12,64,128]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(1), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,19648,64,128]"));
EXPECT_THAT(root,
AllOf(op::Slice(AllOf(op::GetTupleElement(op::While(op::Tuple(
lhs, rhs, op::Broadcast(),
op::Broadcast(), op::Constant()))),
op::Shape("f32[32,12,39296]"))),
op::Shape("f32[32,12,39295]")));
const auto while_loop = root->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
auto intermediate_output = AllOf(
op::DynamicUpdateSlice(op::GetTupleElement(op::Parameter(0)),
op::Dot(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))),
op::Constant(), op::Constant(), op::Reshape()),
op::Shape("f32[32,12,39296]"));
auto output = AllOf(
op::DynamicUpdateSlice(
intermediate_output,
op::Dot(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::GetTupleElement(op::Parameter(0)))),
op::Constant(), op::Constant(), op::Reshape()),
op::Shape("f32[32,12,39296]"));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
output, op::GetTupleElement(op::Parameter(0)), next_i));
}
TEST_P(SpmdPartitioningTest, BidirectionalEinsumRHSWindowedNonContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,4,1,1]<=[4]}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,4,1,1]<=[4]}
ROOT %dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,4,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4,
true,
false,
false,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,6,64,128]"));
const auto rhs =
AllOf(op::Reshape(op::Copy(op::DynamicSlice(
op::Pad(op::Parameter(1), op::Constant()), op::Constant(),
op::Reshape(), op::Constant(), op::Constant()))),
op::Shape("f32[32,1,9824,64,128]"));
EXPECT_THAT(
root,
AllOf(op::Slice(AllOf(op::GetTupleElement(op::While(op::Tuple(
lhs, rhs, op::Broadcast(),
op::CollectivePermute(rhs), op::Constant()))),
op::Shape("f32[32,6,39296]"))),
op::Shape("f32[32,6,39295]")));
const auto while_loop = root->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
const auto partial_dot_pattern =
AllOf(op::Reshape(op::Slice(op::Dot(op::GetTupleElement(op::Parameter(0)),
op::Concatenate()))),
op::Shape("f32[32,6,9824]"));
auto intermediate_output1 =
AllOf(op::DynamicUpdateSlice(op::GetTupleElement(op::Parameter(0)),
partial_dot_pattern, op::Constant(),
op::Constant(), op::Reshape()),
op::Shape("f32[32,6,39296]"));
auto intermediate_output2 = AllOf(
op::DynamicUpdateSlice(intermediate_output1, partial_dot_pattern,
op::Constant(), op::Constant(), op::Reshape()),
op::Shape("f32[32,6,39296]"));
auto intermediate_output3 = AllOf(
op::DynamicUpdateSlice(intermediate_output2, partial_dot_pattern,
op::Constant(), op::Constant(), op::Reshape()),
op::Shape("f32[32,6,39296]"));
auto partial_output = AllOf(
op::DynamicUpdateSlice(intermediate_output3, partial_dot_pattern,
op::Constant(), op::Constant(), op::Reshape()),
op::Shape("f32[32,6,39296]"));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
partial_output,
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
next_i));
}
TEST_P(SpmdPartitioningTest, EinsumRHSWindowedContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,63,128] parameter(0)
%lhs.copy = f32[32,24,63,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,39296,63,128] parameter(1)
%rhs.copy = f32[32,39296,63,128] copy(%rhs), sharding={devices=[1,1,2,1]0,1}
ROOT %dot = f32[32,24,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,12,63,128]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(1), op::Constant()),
op::Constant(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("f32[32,39296,32,128]"));
auto masked_rhs =
op::Select(op::Compare(), rhs, op::Broadcast(op::Constant()));
EXPECT_THAT(root, AllOf(op::GetTupleElement(op::While(
op::Tuple(lhs, masked_rhs, op::Broadcast(),
op::Broadcast(), op::Constant()))),
op::Shape("f32[32,12,39296]")));
const auto while_loop = root->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto window = op::Conditional(op::Compare(next_i, op::Constant()),
op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0)));
auto partial_output = op::Dot(
op::DynamicSlice(
op::Pad(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant(), op::Constant(), op::Reshape(), op::Constant()),
op::GetTupleElement(op::Parameter(0)));
EXPECT_THAT(
while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)), window,
op::Add(op::GetTupleElement(op::Parameter(0)), partial_output),
op::GetTupleElement(op::Parameter(0)), next_i));
auto cp_conditional =
while_loop->while_body()->root_instruction()->operand(1);
EXPECT_THAT(cp_conditional->true_computation()->root_instruction(),
op::CollectivePermute(op::Parameter(0)));
EXPECT_THAT(cp_conditional->false_computation()->root_instruction(),
op::Parameter(0));
}
TEST_P(SpmdPartitioningTest, UnrollEinsumRHSWindowedContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,63,128] parameter(0)
%lhs.copy = f32[32,24,63,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,39296,63,128] parameter(1)
%rhs.copy = f32[32,39296,63,128] copy(%rhs), sharding={devices=[1,1,2,1]0,1}
ROOT %dot = f32[32,24,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
true,
false,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,12,63,128]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(1), op::Constant()),
op::Constant(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("f32[32,39296,32,128]"));
auto masked_rhs =
op::Select(op::Compare(), rhs, op::Broadcast(op::Constant()));
EXPECT_THAT(root, AllOf(op::GetTupleElement(op::While(
op::Tuple(lhs, masked_rhs, op::Broadcast(),
op::Broadcast(), op::Constant()))),
op::Shape("f32[32,12,39296]")));
const auto while_loop = root->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
auto intermediate_output =
AllOf(op::Add(op::GetTupleElement(op::Parameter(0)),
op::Dot(op::DynamicSlice(
op::Pad(op::GetTupleElement(op::Parameter(0)),
op::Constant()),
op::Constant(), op::Constant(), op::Reshape(),
op::Constant()),
op::GetTupleElement(op::Parameter(0)))),
op::Shape("f32[32,12,39296]"));
auto output = AllOf(
op::Add(
intermediate_output,
op::Dot(
op::DynamicSlice(op::Pad(op::GetTupleElement(op::Parameter(0)),
op::Constant()),
op::Constant(), op::Constant(), op::Reshape(),
op::Constant()),
op::CollectivePermute(op::GetTupleElement(op::Parameter(0))))),
op::Shape("f32[32,12,39296]"));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
output, op::GetTupleElement(op::Parameter(0)), next_i));
}
TEST_P(SpmdPartitioningTest, BidirectionalEinsumRHSWindowedContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,63,128] parameter(0)
%lhs.copy = f32[32,24,63,128] copy(%lhs), sharding={devices=[1,4,1,1]<=[4]}
%rhs = f32[32,39296,63,128] parameter(1)
%rhs.copy = f32[32,39296,63,128] copy(%rhs), sharding={devices=[1,1,4,1]<=[4]}
ROOT %dot = f32[32,24,39296] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,4,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4,
true,
false,
false,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,6,63,128]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(1), op::Constant()),
op::Constant(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("f32[32,39296,16,128]"));
auto masked_rhs = op::Reshape(
op::Select(op::Compare(), rhs, op::Broadcast(op::Constant())));
EXPECT_THAT(root,
AllOf(op::GetTupleElement(op::While(op::Tuple(
lhs, masked_rhs, op::Broadcast(),
op::CollectivePermute(masked_rhs), op::Constant()))),
op::Shape("f32[32,6,39296]")));
const auto while_loop = root->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
auto partial_output =
AllOf(op::Add(op::Add(op::GetTupleElement(op::Parameter(0)),
op::Dot(op::Maximum(), op::Concatenate())),
op::Dot(op::Maximum(), op::Concatenate())),
op::Shape("f32[32,6,39296]"));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
partial_output,
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
next_i));
}
TEST_P(SpmdPartitioningTest,
EinsumWindowedNonContractingDimensionsNoCodeMotionWithDependentNodes) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
%dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
%constant = f32[] constant(0)
%constant.1 = f32[] constant(2)
%constant.2 = f32[] constant(4)
%broadcast = f32[32,24,39295] broadcast(%constant.1), dimensions={},
sharding={devices=[1,2,1]0,1}
%multiply = f32[32,24,39295] multiply(%dot, %broadcast),
sharding={devices=[1,2,1]0,1}
%reduce = f32[32,24] reduce(%multiply, %constant), dimensions={2},
to_apply=sum, sharding={devices=[1,2]0,1}
%all-reduce = f32[32,24] all-reduce(%reduce),
to_apply=sum, sharding={devices=[1,2]0,1}
%broadcast.1 = f32[32,24,39295] broadcast(%all-reduce), dimensions={0,1},
sharding={devices=[1,2,1]0,1}
%subtract = f32[32,24,39295] subtract(%multiply, %broadcast.1),
sharding={devices=[1,2,1]0,1}
ROOT %reduce.1 = f32[32,24] reduce(%subtract, %constant.2), dimensions={2},
to_apply=sum, sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,12,64,128]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(1), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,19648,64,128]"));
const auto while_output =
AllOf(op::Slice(op::GetTupleElement(op::While(op::Tuple(
lhs, rhs, op::Broadcast(), op::Broadcast(), op::Constant())))),
op::Shape("f32[32,12,39295]"));
const auto multiply =
AllOf(op::Multiply(while_output, op::Broadcast(op::Constant())),
op::Shape("f32[32,12,39295]"));
EXPECT_THAT(
root,
AllOf(op::Reduce(
op::Subtract(multiply, op::Broadcast(op::AllReduce(op::Reduce(
multiply, op::Constant())))),
op::Constant()),
op::Shape("f32[32,12]")));
const auto while_loop =
root->operand(0)->operand(0)->operand(0)->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto output = op::DynamicUpdateSlice(
op::GetTupleElement(op::Parameter(0)),
op::Dot(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))),
op::Constant(), op::Constant(), op::Reshape(op::DynamicSlice()));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::Conditional(op::Compare(next_i, op::Constant()),
op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))),
output, op::GetTupleElement(op::Parameter(0)), next_i));
}
TEST_P(SpmdPartitioningTest, EinsumRHSWindowedNonContractingReduce1) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
%dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
%constant = f32[] constant(0)
%constant.1 = f32[] constant(2)
%broadcast = f32[32,24,39295] broadcast(%constant.1), dimensions={},
sharding={devices=[1,2,1]0,1}
%multiply = f32[32,24,39295] multiply(%dot, %broadcast),
sharding={devices=[1,2,1]0,1}
ROOT %reduce = f32[32,24] reduce(%multiply, %constant), dimensions={2},
to_apply=sum, sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,12,64,128]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(1), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,19648,64,128]"));
auto input_subtuple =
op::Tuple(op::Constant(), op::Constant(), op::Broadcast(op::Constant()));
EXPECT_THAT(
root,
AllOf(op::GetTupleElement(op::GetTupleElement(op::While(op::Tuple(
lhs, rhs, input_subtuple, op::Broadcast(), op::Constant())))),
op::Shape("f32[32,12]")));
const auto while_loop = root->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant());
auto output_tuple = op::Tuple(
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::Add(op::Reduce(
op::Select(op::Compare(),
op::Multiply(
op::Dot(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))),
op::DynamicSlice()),
op::Broadcast()),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0)))),
op::DynamicSlice(
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::Constant(), op::Constant())));
EXPECT_THAT(
while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::Conditional(op::Compare(next_i, op::Constant()),
op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))),
output_tuple, op::GetTupleElement(op::Parameter(0)), next_i));
}
TEST_P(SpmdPartitioningTest, UnrollEinsumRHSWindowedNonContractingReduce1) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
%dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
%constant = f32[] constant(0)
%constant.1 = f32[] constant(2)
%broadcast = f32[32,24,39295] broadcast(%constant.1), dimensions={},
sharding={devices=[1,2,1]0,1}
%multiply = f32[32,24,39295] multiply(%dot, %broadcast),
sharding={devices=[1,2,1]0,1}
ROOT %reduce = f32[32,24] reduce(%multiply, %constant), dimensions={2},
to_apply=sum, sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
true,
false,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,12,64,128]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(1), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,19648,64,128]"));
auto input_subtuple =
op::Tuple(op::Constant(), op::Constant(), op::Broadcast(op::Constant()));
EXPECT_THAT(
root,
AllOf(op::GetTupleElement(op::GetTupleElement(op::While(op::Tuple(
lhs, rhs, input_subtuple, op::Broadcast(), op::Constant())))),
op::Shape("f32[32,12]")));
const auto while_loop = root->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
auto intermediate_output = AllOf(
op::Add(
op::Reduce(
op::Select(op::Compare(),
op::Multiply(
op::Dot(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::GetTupleElement(
op::Parameter(0)))),
op::DynamicSlice()),
op::Broadcast()),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0)))),
op::DynamicSlice(
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::Constant(), op::Constant())),
op::Shape("f32[32,12]"));
auto output_tuple = op::Tuple(
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::Add(op::Reduce(
op::Select(op::Compare(),
op::Multiply(
op::Dot(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))),
op::DynamicSlice()),
op::Broadcast()),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0)))),
op::DynamicSlice(intermediate_output, op::Constant(),
op::Constant())));
EXPECT_THAT(
while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
output_tuple, op::GetTupleElement(op::Parameter(0)), next_i));
}
TEST_P(SpmdPartitioningTest,
BidirectionalEinsumRHSWindowedNonContractingReduce1) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,4,1,1]<=[4]}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,4,1,1]<=[4]}
%dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,4,1]<=[4]}
%constant = f32[] constant(0)
%constant.1 = f32[] constant(2)
%broadcast = f32[32,24,39295] broadcast(%constant.1), dimensions={},
sharding={devices=[1,4,1]<=[4]}
%multiply = f32[32,24,39295] multiply(%dot, %broadcast),
sharding={devices=[1,4,1]<=[4]}
ROOT %reduce = f32[32,24] reduce(%multiply, %constant), dimensions={2},
to_apply=sum, sharding={devices=[1,4]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4,
true,
false,
false,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,6,64,128]"));
const auto rhs =
AllOf(op::Reshape(op::Copy(op::DynamicSlice(
op::Pad(op::Parameter(1), op::Constant()), op::Constant(),
op::Reshape(), op::Constant(), op::Constant()))),
op::Shape("f32[32,1,9824,64,128]"));
auto input_subtuple =
op::Tuple(op::Constant(), op::Constant(), op::Broadcast(op::Constant()));
EXPECT_THAT(root,
AllOf(op::GetTupleElement(op::GetTupleElement(op::While(
op::Tuple(lhs, rhs, input_subtuple,
op::CollectivePermute(), op::Constant())))),
op::Shape("f32[32,6]")));
const auto while_loop = root->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
auto partial_reduce_pattern = AllOf(
op::Reduce(
op::Select(op::Compare(),
op::Multiply(op::Reshape(op::Slice(op::Dot(
op::GetTupleElement(op::Parameter(0)),
op::Concatenate()))),
op::DynamicSlice()),
op::Broadcast()),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0)))),
op::Shape("f32[32,6]"));
auto intermediate_output1 = AllOf(
op::Add(partial_reduce_pattern,
op::DynamicSlice(
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::Constant(), op::Constant())),
op::Shape("f32[32,6]"));
auto intermediate_output2 =
AllOf(op::Add(partial_reduce_pattern,
op::DynamicSlice(intermediate_output1, op::Constant(),
op::Constant())),
op::Shape("f32[32,6]"));
auto intermediate_output3 =
AllOf(op::Add(partial_reduce_pattern,
op::DynamicSlice(intermediate_output2, op::Constant(),
op::Constant())),
op::Shape("f32[32,6]"));
auto output_tuple =
op::Tuple(op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::Add(partial_reduce_pattern,
op::DynamicSlice(intermediate_output3, op::Constant(),
op::Constant())));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
output_tuple,
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
next_i));
}
TEST_P(SpmdPartitioningTest, EinsumRHSWindowedNonContractingReduce2) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
%dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
%constant = f32[] constant(0)
%constant.1 = f32[] constant(2)
%broadcast = f32[32,24,39295] broadcast(%constant.1), dimensions={},
sharding={devices=[1,2,1]0,1}
%multiply = f32[32,24,39295] multiply(%dot, %broadcast),
sharding={devices=[1,2,1]0,1}
ROOT %reduce = f32[32,39295] reduce(%multiply, %constant), dimensions={1},
to_apply=sum, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
2));
VLOG(1) << module->ToString();
}
TEST_P(SpmdPartitioningTest, UnrollEinsumRHSWindowedNonContractingReduce2) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,1]0,1}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,2,1,1]0,1}
%dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
%constant = f32[] constant(0)
%constant.1 = f32[] constant(2)
%broadcast = f32[32,24,39295] broadcast(%constant.1), dimensions={},
sharding={devices=[1,2,1]0,1}
%multiply = f32[32,24,39295] multiply(%dot, %broadcast),
sharding={devices=[1,2,1]0,1}
ROOT %reduce = f32[32,39295] reduce(%multiply, %constant), dimensions={1},
to_apply=sum, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
true,
false,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,12,64,128]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(1), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,19648,64,128]"));
auto input_subtuple =
op::Tuple(op::Constant(), op::Constant(), op::Broadcast(op::Constant()));
EXPECT_THAT(
root,
AllOf(op::AllReduce(op::Slice(op::GetTupleElement(op::GetTupleElement(
op::While(op::Tuple(lhs, rhs, input_subtuple, op::Broadcast(),
op::Constant())))))),
op::Shape("f32[32,39295]")));
const auto while_loop = root->operand(0)->operand(0)->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
auto intermediate_output = AllOf(
op::DynamicUpdateSlice(
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::Reduce(
op::Multiply(op::Dot(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
op::DynamicSlice()),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0)))),
op::Constant(), op::Reshape()),
op::Shape("f32[32,39296]"));
auto output_tuple = op::Tuple(
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::DynamicUpdateSlice(
intermediate_output,
op::Reduce(
op::Multiply(op::Dot(op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))),
op::DynamicSlice()),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0)))),
op::Constant(), op::Reshape()));
EXPECT_THAT(
while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
output_tuple, op::GetTupleElement(op::Parameter(0)), next_i));
}
TEST_P(SpmdPartitioningTest,
BidirectionalEinsumRHSWindowedNonContractingReduce2) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,4,1,1]<=[4]}
%rhs = f32[32,39295,64,128] parameter(1)
%rhs.copy = f32[32,39295,64,128] copy(%rhs), sharding={devices=[1,4,1,1]<=[4]}
%dot = f32[32,24,39295] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,4,1]<=[4]}
%constant = f32[] constant(0)
%constant.1 = f32[] constant(2)
%broadcast = f32[32,24,39295] broadcast(%constant.1), dimensions={},
sharding={devices=[1,4,1]<=[4]}
%multiply = f32[32,24,39295] multiply(%dot, %broadcast),
sharding={devices=[1,4,1]<=[4]}
ROOT %reduce = f32[32,39295] reduce(%multiply, %constant), dimensions={1},
to_apply=sum, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4,
true,
false,
false,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[32,6,64,128]"));
const auto rhs =
AllOf(op::Reshape(op::Copy(op::DynamicSlice(
op::Pad(op::Parameter(1), op::Constant()), op::Constant(),
op::Reshape(), op::Constant(), op::Constant()))),
op::Shape("f32[32,1,9824,64,128]"));
auto input_subtuple =
op::Tuple(op::Constant(), op::Constant(), op::Broadcast(op::Constant()));
EXPECT_THAT(
root, AllOf(op::AllReduce(op::Slice(op::GetTupleElement(
op::GetTupleElement(op::While(op::Tuple(
lhs, rhs, input_subtuple, op::CollectivePermute(rhs),
op::Constant())))))),
op::Shape("f32[32,39295]")));
const auto while_loop = root->operand(0)->operand(0)->operand(0)->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
auto partial_reduce_pattern = AllOf(
op::Reduce(op::Multiply(op::Reshape(op::Slice(
op::Dot(op::GetTupleElement(op::Parameter(0)),
op::Concatenate()))),
op::DynamicSlice(op::Broadcast(), op::Constant(),
op::Constant(), op::Reshape())),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0)))),
op::Shape("f32[32,9824]"));
auto intermediate_output1 =
AllOf(op::DynamicUpdateSlice(
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
partial_reduce_pattern, op::Constant(), op::Reshape()),
op::Shape("f32[32,39296]"));
auto intermediate_output2 =
AllOf(op::DynamicUpdateSlice(intermediate_output1, partial_reduce_pattern,
op::Constant(), op::Reshape()),
op::Shape("f32[32,39296]"));
auto intermediate_output3 =
AllOf(op::DynamicUpdateSlice(intermediate_output2, partial_reduce_pattern,
op::Constant(), op::Reshape()),
op::Shape("f32[32,39296]"));
auto output_tuple = op::Tuple(
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))),
op::DynamicUpdateSlice(intermediate_output3, partial_reduce_pattern,
op::Constant(), op::Reshape()));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
output_tuple,
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
next_i));
}
TEST_P(SpmdPartitioningTest, EinsumRHSWindowedContractingFromBroadcast) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%rhs = f32[32,39296,63,128] parameter(0)
%rhs.copy = f32[32,39296,63,128] copy(%rhs), sharding={devices=[1,1,2,1]0,1}
%constant.1 = f32[] constant(2)
%broadcast = f32[32,24,63,128] broadcast(%constant.1), dimensions={},
sharding={devices=[1,2,1,1]0,1}
%add = f32[32,24,63,128] add(%broadcast, %broadcast),
sharding={devices=[1,2,1,1]0,1}
ROOT %dot = f32[32,24,39296] dot(%add, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
2));
VLOG(1) << module->ToString();
}
TEST_P(SpmdPartitioningTest, UnrollEinsumRHSWindowedContractingFromBroadcast) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%rhs = f32[32,39296,63,128] parameter(0)
%rhs.copy = f32[32,39296,63,128] copy(%rhs), sharding={devices=[1,1,2,1]0,1}
%constant.1 = f32[] constant(2)
%broadcast = f32[32,24,63,128] broadcast(%constant.1), dimensions={},
sharding={devices=[1,2,1,1]0,1}
%add = f32[32,24,63,128] add(%broadcast, %broadcast),
sharding={devices=[1,2,1,1]0,1}
ROOT %dot = f32[32,24,39296] dot(%add, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 2,
true,
false,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = op::Tuple(op::Constant());
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(0), op::Constant()),
op::Constant(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("f32[32,39296,32,128]"));
auto masked_rhs =
op::Select(op::Compare(), rhs, op::Broadcast(op::Constant()));
EXPECT_THAT(root, AllOf(op::GetTupleElement(op::While(
op::Tuple(lhs, masked_rhs, op::Broadcast(),
op::Broadcast(), op::Constant()))),
op::Shape("f32[32,12,39296]")));
const auto while_loop = root->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
auto padded_broadcast_sum = op::Pad(
op::Add(op::Broadcast(
op::GetTupleElement(op::GetTupleElement(op::Parameter(0)))),
op::Broadcast(
op::GetTupleElement(op::GetTupleElement(op::Parameter(0))))),
op::Constant());
auto intermediate_output =
AllOf(op::Add(op::GetTupleElement(op::Parameter(0)),
op::Dot(op::DynamicSlice(padded_broadcast_sum,
op::Constant(), op::Constant(),
op::Reshape(), op::Constant()),
op::GetTupleElement(op::Parameter(0)))),
op::Shape("f32[32,12,39296]"));
auto output = AllOf(
op::Add(
intermediate_output,
op::Dot(
op::DynamicSlice(padded_broadcast_sum, op::Constant(),
op::Constant(), op::Reshape(), op::Constant()),
op::CollectivePermute(op::GetTupleElement(op::Parameter(0))))),
op::Shape("f32[32,12,39296]"));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
output, op::GetTupleElement(op::Parameter(0)), next_i));
}
TEST_P(SpmdPartitioningTest,
BidirectionalEinsumRHSWindowedContractingFromBroadcast) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%rhs = f32[32,39296,63,128] parameter(0)
%rhs.copy = f32[32,39296,63,128] copy(%rhs), sharding={devices=[1,1,4,1]<=[4]}
%constant.1 = f32[] constant(2)
%broadcast = f32[32,24,63,128] broadcast(%constant.1), dimensions={},
sharding={devices=[1,4,1,1]<=[4]}
%add = f32[32,24,63,128] add(%broadcast, %broadcast),
sharding={devices=[1,4,1,1]<=[4]}
ROOT %dot = f32[32,24,39296] dot(%add, %rhs.copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2,3}, rhs_contracting_dims={2,3},
sharding={devices=[1,4,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4,
true,
false,
false,
true));
VLOG(1) << module->ToString();
auto input_subtuple =
op::Tuple(op::Constant(), op::Constant(), op::Broadcast(op::Constant()));
const auto root = module->entry_computation()->root_instruction();
const auto lhs = op::Tuple(op::Constant());
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(0), op::Constant()),
op::Constant(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("f32[32,39296,16,128]"));
auto masked_rhs = op::Reshape(
op::Select(op::Compare(), rhs, op::Broadcast(op::Constant())));
EXPECT_THAT(root,
AllOf(op::GetTupleElement(op::While(op::Tuple(
lhs, masked_rhs, op::Broadcast(),
op::CollectivePermute(masked_rhs), op::Constant()))),
op::Shape("f32[32,6,39296]")));
const auto while_loop = root->operand(0);
EXPECT_THAT(
while_loop->while_condition()->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0)), op::Constant()));
const auto next_i =
op::Add(op::Add(op::GetTupleElement(op::Parameter(0)), op::Constant()),
op::Constant());
auto output =
AllOf(op::Add(op::Add(op::GetTupleElement(op::Parameter(0)),
op::Dot(op::Maximum(), op::Concatenate())),
op::Dot(op::Maximum(), op::Concatenate())),
op::Shape("f32[32,6,39296]"));
EXPECT_THAT(while_loop->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0)),
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
output,
op::CollectivePermute(op::CollectivePermute(
op::GetTupleElement(op::Parameter(0)))),
next_i));
}
TEST_P(SpmdPartitioningTest, EinsumNonContractingDimPartitionOnTwoDims) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = bf16[8,1024,2,1536] parameter(0)
%lhs.copy = bf16[8,1024,2,1536] copy(lhs),
sharding={devices=[4,1,2,1]<=[8]}
%rhs = bf16[2,1536,512,1] parameter(1)
%rhs.copy = bf16[2,1536,512,1] copy(rhs),
sharding={devices=[2,1,2,1,2]0,4,2,6,1,5,3,7 last_tile_dim_replicate}
ROOT %convolution = bf16[8,1024,512,1] convolution(lhs.copy, rhs.copy),
window={size=1x2}, dim_labels=0b1f_1io0->0bf1,
sharding={devices=[4,1,2,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("bf16[2,1024,1,1536]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(1), op::Reshape(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("bf16[1,1536,256,1]"));
const auto partial_replicate_rhs =
AllOf(op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(), rhs, op::Constant(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("bf16[1,1536,512,1]"));
EXPECT_THAT(
root,
AllOf(op::DynamicSlice(
op::AllReduce(op::Convolution(lhs, partial_replicate_rhs)),
op::Constant(), op::Constant(), op::Reshape(), op::Constant()),
op::Shape("bf16[2,1024,256,1]")));
}
TEST_P(SpmdPartitioningTest, EinsumNonContractingDimPartitionOnTwoDims2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = bf16[8,1024,2,1536] parameter(0)
%lhs.copy = bf16[8,1024,2,1536] copy(lhs),
sharding={devices=[4,1,2,1]<=[8]}
%rhs = bf16[2,1536,512,1] parameter(1)
%rhs.copy = bf16[2,1536,512,1] copy(rhs),
sharding={devices=[2,1,2,1,2]<=[4,2]T(1,0) last_tile_dim_replicate}
ROOT %convolution = bf16[8,1024,512,1] convolution(lhs.copy, rhs.copy),
window={size=1x2}, dim_labels=0b1f_1io0->0bf1,
sharding={devices=[4,1,2,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("bf16[2,1024,1,1536]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(1), op::Reshape(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("bf16[1,1536,256,1]"));
const auto partial_replicate_rhs =
AllOf(op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(), rhs, op::Constant(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("bf16[1,1536,512,1]"));
EXPECT_THAT(
root,
AllOf(op::DynamicSlice(
op::AllReduce(op::Convolution(lhs, partial_replicate_rhs)),
op::Constant(), op::Constant(), op::Reshape(), op::Constant()),
op::Shape("bf16[2,1024,256,1]")));
}
TEST_P(SpmdPartitioningTest, ReplicatedRng) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = s32[] parameter(0)
%lhs.copy = s32[] copy(%lhs), sharding={replicated}
%rhs = s32[] parameter(1)
%rhs.copy = s32[] copy(%rhs), sharding={replicated}
ROOT %rng = s32[4]{0} rng(%lhs.copy, %rhs.copy),
distribution=rng_uniform, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Copy(op::Parameter(0)), op::Shape("s32[]"));
const auto rhs = AllOf(op::Copy(op::Parameter(1)), op::Shape("s32[]"));
EXPECT_THAT(
root,
AllOf(op::AllReduce(op::Select(
op::Broadcast(op::Compare(op::PartitionId(), op::Constant())),
op::Rng(), op::Broadcast(op::Constant()))),
op::Shape("s32[4]")));
}
TEST_P(SpmdPartitioningTest, ManualRng) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = s32[] parameter(0), sharding={manual}
%rhs = s32[] parameter(1), sharding={manual}
ROOT %rng = s32[4]{0} rng(%lhs, %rhs),
distribution=rng_uniform, sharding={manual}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Rng(op::Parameter(0), op::Parameter(1)),
op::Shape("s32[4]")));
}
TEST_P(SpmdPartitioningTest, PartitionedRng) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = s32[] parameter(0)
%lhs.copy = s32[] copy(%lhs), sharding={replicated}
%rhs = s32[] parameter(1)
%rhs.copy = s32[] copy(%rhs), sharding={maximal device=1}
ROOT %rng = s32[4]{0} rng(%lhs.copy, %rhs.copy),
distribution=rng_uniform, sharding={devices=[2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Copy(op::Parameter(0)), op::Shape("s32[]"));
const auto rhs =
AllOf(op::Copy(op::Copy(op::Parameter(1))), op::Shape("s32[]"));
EXPECT_THAT(root, AllOf(op::Rng(lhs, op::AllReduce(op::Select(
op::Broadcast(op::Compare()), rhs,
op::Broadcast(op::Constant())))),
op::Shape("s32[2]")));
}
TEST_P(SpmdPartitioningTest, PartialReplicatedRng) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = s32[] parameter(0), sharding={replicated}
%rhs = s32[] parameter(1), sharding={replicated}
ROOT %rng = s32[8]{0} rng(%lhs, %rhs),
distribution=rng_uniform,
sharding={devices=[2,4]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Parameter(0), op::Shape("s32[]"));
const auto rhs = AllOf(op::Parameter(1), op::Shape("s32[]"));
auto partition_id =
AllOf(op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId())),
op::Shape("u32[]"));
EXPECT_THAT(
root, AllOf(op::AllReduce(op::Select(
op::Broadcast(op::Compare(partition_id, op::Constant())),
op::Rng(lhs, rhs), op::Broadcast(op::Constant()))),
op::Shape("s32[4]")));
}
TEST_P(SpmdPartitioningTest, ManualPartitionId) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
ROOT %lhs = u32[] partition-id(), sharding={manual}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::PartitionId());
}
TEST_P(SpmdPartitioningTest, DynamicSliceAlongNonPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = s32[128,64] parameter(0), sharding={devices=[2,1]0,1}
%index = s32[] parameter(1)
%trivial_index = s32[] parameter(2)
ROOT %dynamic-slice = s32[128,2] dynamic-slice(%input, %trivial_index, %index),
dynamic_slice_sizes={128,2}, sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto input = AllOf(op::Parameter(0), op::Shape("s32[64,64]"));
EXPECT_THAT(root,
AllOf(op::DynamicSlice(input, op::Constant(), op::Parameter(1)),
op::Shape("s32[64,2]")));
}
TEST_P(SpmdPartitioningTest, DynamicUpdateSliceAlongNonPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = s32[128,64] parameter(0), sharding={devices=[2,1]0,1}
%index = s32[] parameter(1)
%update = s32[128,2] parameter(2)
%trivial_index = s32[] parameter(3)
%update.copy = s32[128,2] copy(%update), sharding={devices=[2,1]0,1}
ROOT %dynamic-update-slice = s32[128,64]
dynamic-update-slice(%input, %update.copy, %trivial_index, %index),
sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto input = AllOf(op::Parameter(0), op::Shape("s32[64,64]"));
auto update = AllOf(op::Copy(op::DynamicSlice(op::Parameter(2), op::Reshape(),
op::Constant())),
op::Shape("s32[64,2]"));
EXPECT_THAT(root, AllOf(op::DynamicUpdateSlice(input, update, op::Constant(),
op::Parameter(1)),
op::Shape("s32[64,64]")));
}
TEST_P(SpmdPartitioningTest, DynamicUpdateSliceAlongPartitionedDimension) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = s32[128,64] parameter(0), sharding={devices=[1,2]0,1}
%index = s32[] parameter(1)
%constant = s32[] constant(60)
%update = s32[128,2] parameter(2), sharding={devices=[1,2]0,1}
ROOT %dynamic-update-slice = s32[128,64]
dynamic-update-slice(%input, %update, %index, %constant),
sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto input = AllOf(op::Parameter(0), op::Shape("s32[128,32]"));
auto update = AllOf(
op::AllReduce(op::DynamicUpdateSlice(op::Broadcast(), op::Parameter(2),
op::Constant(), op::Reshape())),
op::Shape("s32[128,2]"));
EXPECT_THAT(root,
AllOf(op::Select(op::Broadcast(),
op::DynamicUpdateSlice(
input, update, op::Constant(), op::Select()),
input),
op::Shape("s32[128,32]")));
}
TEST_P(SpmdPartitioningTest, DynamicUpdateSliceAlongPartitionedDimension2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = s32[8,790,2] parameter(0),
sharding={devices=[8,1,1]<=[8]}
%index = s32[] parameter(1)
%constant = s32[] constant(0)
%update = s32[1,790,2] parameter(2),
sharding={devices=[8,1,1]<=[8]}
ROOT %dynamic-update-slice = s32[8,790,2]
dynamic-update-slice(%input, %update, %index, %constant, %constant),
sharding={devices=[8,1,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto input = AllOf(op::Parameter(0), op::Shape("s32[1,790,2]"));
auto update = AllOf(op::AllReduce(op::Select(
op::Broadcast(), op::Parameter(2), op::Broadcast())),
op::Shape("s32[1,790,2]"));
EXPECT_THAT(
root,
AllOf(op::Select(op::Broadcast(),
op::DynamicUpdateSlice(input, update, op::Select(),
op::Constant(), op::Constant()),
input),
op::Shape("s32[1,790,2]")));
}
TEST_P(SpmdPartitioningTest, DynamicUpdateSlicePartitionSliceAndNonSliceDims) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = s32[128,64] parameter(0)
%input.copy = s32[128,64] copy(%input), sharding={devices=[2,2]<=[4]}
%constant.0 = s32[] constant(0)
%constant.1 = s32[] constant(60)
%update = s32[128,2] parameter(1)
%update.copy = s32[128,2] copy(%update), sharding={devices=[2,2]<=[4]}
ROOT %dynamic-update-slice = s32[128,64]
dynamic-update-slice(%input.copy, %update.copy, %constant.0, %constant.1),
sharding={devices=[2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto input = AllOf(op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(),
op::Reshape())),
op::Shape("s32[64,32]"));
auto update = AllOf(op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(),
op::Copy(op::DynamicSlice(
op::Parameter(1), op::Reshape(), op::Reshape())),
op::Constant(), op::Reshape())),
op::Shape("s32[64,2]"));
EXPECT_THAT(root,
AllOf(op::Select(op::Broadcast(),
op::DynamicUpdateSlice(
input, update, op::Constant(), op::Select()),
input),
op::Shape("s32[64,32]")));
}
TEST_P(SpmdPartitioningTest, UnpartitionedGather) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9] parameter(0), sharding={replicated}
%indices = s32[3] parameter(1), sharding={replicated}
ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,
slice_sizes={1,9}, sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(
op::DynamicSlice(
op::Pad(op::Gather(op::Parameter(0), op::Parameter(1)), _), _, _),
op::Shape("f32[3,5]")));
}
TEST_P(SpmdPartitioningTest, PassthroughGather) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9] parameter(0), sharding={devices=[1,2]0,1}
%indices = s32[3] parameter(1), sharding={replicated}
ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,
slice_sizes={1,9}, sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Gather(op::Parameter(0), op::Parameter(1)),
op::Shape("f32[3,5]")));
}
TEST_P(SpmdPartitioningTest, PassthroughGather_PartialReplicate) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
%indices = s32[3] parameter(1), sharding={replicated}
ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,
slice_sizes={1,9}, sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Gather(op::Parameter(0), op::Parameter(1)),
op::Shape("f32[3,5]")));
}
TEST_P(SpmdPartitioningTest, IndexPassthroughGather) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9,8] parameter(0), sharding={replicated}
%indices = s32[4,2,4] parameter(1), sharding={devices=[2,1,2]<=[4]}
ROOT %gather = f32[8,4,4] gather(%input, %indices), offset_dims={0},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=1,
slice_sizes={1,1,8}, sharding={devices=[1,2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Gather(op::Parameter(0), op::Parameter(1)),
op::Shape("f32[8,2,2]")));
}
TEST_P(SpmdPartitioningTest, IndexPassthroughGather_PartialReplicate) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9,8] parameter(0), sharding={replicated}
%indices = s32[4,2,4] parameter(1),
sharding={devices=[2,1,2,2]<=[8] last_tile_dim_replicate}
ROOT %gather = f32[8,4,4] gather(%input, %indices), offset_dims={0},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=1,
slice_sizes={1,1,8},
sharding={devices=[1,2,2,2]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Gather(op::Parameter(0), op::Parameter(1)),
op::Shape("f32[8,2,2]")));
}
TEST_P(SpmdPartitioningTest, IndexAndOperandPassthroughGather) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[7,12] parameter(0),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
%indices = s32[16,2] parameter(1),
sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}
ROOT %gather = f32[16,1,12] gather(%input, %indices),
offset_dims={1,2}, collapsed_slice_dims={}, start_index_map={0,1},
index_vector_dim=1, slice_sizes={1,12},
sharding={devices=[2,1,2]0,2,1,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Gather(op::Parameter(0), op::Parameter(1)),
op::Shape("f32[8,1,6]")));
}
TEST_P(SpmdPartitioningTest, IndexPassthroughGatherPartitionedIndexVectorDim) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9,8] parameter(0), sharding={replicated}
%indices = s32[4,2,4] parameter(1), sharding={devices=[2,2,2]<=[8]}
ROOT %gather = f32[8,4,4] gather(%input, %indices), offset_dims={0},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=1,
slice_sizes={1,1,8},
sharding={devices=[1,2,2,2]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("f32[2,9,8]"), op::Parameter(0));
auto indices = AllOf(op::Shape("s32[2,2,2]"), op::AllReduce());
auto gather = AllOf(op::Shape("f32[8,2,2]"), op::Gather(operand, indices));
VLOG(1) << module->ToString();
EXPECT_THAT(root, op::CollectivePermute(gather));
}
TEST_P(SpmdPartitioningTest, GatherExplicitBatchDims) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[10,3,14,4] parameter(0), sharding={devices=[2,1,2,1]<=[2,2]T(1,0)}
%indices = s32[14,10,6,2] parameter(1), sharding={devices=[2,2,1,1]<=[4]}
ROOT %gather = f32[14,10,6,2] gather(%input, %indices), offset_dims={3},
collapsed_slice_dims={1}, operand_batching_dims={0,2},
start_indices_batching_dims={1,0}, start_index_map={1,3},
index_vector_dim=3, slice_sizes={1,1,1,2}, sharding={devices=[2,2,1,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto input = AllOf(op::Shape("f32[5,3,7,4]"), op::Parameter(0));
auto indices = AllOf(op::Shape("s32[7,5,6,2]"), op::Parameter(1));
auto gather = AllOf(op::Shape("f32[7,5,6,2]"), op::Gather(input, indices));
EXPECT_THAT(module->entry_computation()->root_instruction(), gather);
}
TEST_P(SpmdPartitioningTest, GatherExplicitBatchAndOperandPassthroughDims) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[10,3,14,4] parameter(0), sharding={devices=[2,1,1,2]<=[4]}
%indices = s32[14,10,6,2] parameter(1), sharding={devices=[1,2,1,1,2]<=[4] last_tile_dim_replicate}
ROOT %gather = f32[14,10,6,4] gather(%input, %indices), offset_dims={3},
collapsed_slice_dims={1}, operand_batching_dims={0,2},
start_indices_batching_dims={1,0}, start_index_map={1,3},
index_vector_dim=3, slice_sizes={1,1,1,4}, sharding={devices=[1,2,1,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto input = AllOf(op::Shape("f32[5,3,14,2]"), op::Parameter(0));
auto indices = AllOf(op::Shape("s32[14,5,6,2]"), op::Parameter(1));
auto gather = AllOf(op::Shape("f32[14,5,6,2]"), op::Gather(input, indices));
EXPECT_THAT(module->entry_computation()->root_instruction(), gather);
}
TEST_P(SpmdPartitioningTest, GatherExplicitBatchAndIndexPassthroughDims) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[10,3,14,4] parameter(0), sharding={devices=[1,1,2,1,2]<=[4] last_tile_dim_replicate}
%indices = s32[14,10,6,2] parameter(1), sharding={devices=[2,1,2,1]<=[4]}
ROOT %gather = f32[14,10,6,2] gather(%input, %indices), offset_dims={3},
collapsed_slice_dims={1}, operand_batching_dims={0,2},
start_indices_batching_dims={1,0}, start_index_map={1,3},
index_vector_dim=3, slice_sizes={1,1,1,2}, sharding={devices=[2,1,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto input = AllOf(op::Shape("f32[10,3,7,4]"), op::Parameter(0));
auto indices = AllOf(op::Shape("s32[7,10,3,2]"), op::Parameter(1));
auto gather = AllOf(op::Shape("f32[7,10,3,2]"), op::Gather(input, indices));
EXPECT_THAT(module->entry_computation()->root_instruction(), gather);
}
TEST_P(SpmdPartitioningTest, GatherPartitionedOnTrivialSliceDims) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[17,9] parameter(0), sharding={devices=[2,1]0,1}
%indices = s32[2,3] parameter(1), sharding={replicated}
ROOT %gather = f32[2,3,9] gather(%input, %indices), offset_dims={2},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2,
slice_sizes={1,9}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto offset =
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId()));
auto min = AllOf(op::Broadcast(offset), op::Shape("s32[2,3]"));
auto max = AllOf(op::Broadcast(op::Add(offset, op::Constant())),
op::Shape("s32[2,3]"));
auto clamp = op::Clamp(min, op::Parameter(1), max);
auto gather = op::Gather(op::Parameter(0), op::Subtract(clamp, min));
auto mask =
op::Or(op::Lt(op::Parameter(1), min), op::Gt(op::Parameter(1), max));
auto masked =
op::Select(op::Broadcast(mask), op::Broadcast(op::Constant()), gather);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::AllReduce(masked), op::Shape("f32[2,3,9]")));
}
TEST_P(SpmdPartitioningTest,
GatherPartitionedOnTrivialSliceDims_PartialReplicate) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[17,9] parameter(0),
sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
%indices = s32[2,3] parameter(1), sharding={replicated}
ROOT %gather = f32[2,3,9] gather(%input, %indices), offset_dims={2},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2,
slice_sizes={1,9}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto offset =
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId()));
auto min = AllOf(op::Broadcast(offset), op::Shape("s32[2,3]"));
auto max = AllOf(op::Broadcast(op::Add(offset, op::Constant())),
op::Shape("s32[2,3]"));
auto clamp = op::Clamp(min, op::Parameter(1), max);
auto gather = op::Gather(op::Parameter(0), op::Subtract(clamp, min));
auto mask =
op::Or(op::Lt(op::Parameter(1), min), op::Gt(op::Parameter(1), max));
auto masked =
op::Select(op::Broadcast(mask), op::Broadcast(op::Constant()), gather);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::AllReduce(masked), op::Shape("f32[2,3,9]")));
}
TEST_P(SpmdPartitioningTest, UnpartitionedScatter) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0), sharding={replicated}
%indices = s32[3] parameter(1), sharding={replicated}
%updates = f32[3,9] parameter(2), sharding={replicated}
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1, sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::DynamicSlice(
op::Pad(op::Scatter(op::Parameter(0), op::Parameter(1),
op::Parameter(2)),
_),
_, _),
op::Shape("f32[2,5]")));
}
TEST_P(SpmdPartitioningTest, VariadicScatter) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) {
lhs.0 = f32[] parameter(0)
lhs.1 = f32[] parameter(1)
rhs.0 = f32[] parameter(2)
rhs.1 = f32[] parameter(3)
sum.0 = f32[] add(lhs.0, rhs.0)
sum.1 = f32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY entry {
%input.0 = f32[2,9] parameter(0), sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}
%input.1 = f32[2,9] parameter(1), sharding={devices=[1,2,2]0,2,1,3 last_tile_dim_replicate}
%indices = s32[3] parameter(2), sharding={replicated}
%updates.0 = f32[3,9] parameter(3), sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}
%updates.1 = f32[3,9] parameter(4), sharding={devices=[1,4]0,1,2,3}
ROOT %scatter = (f32[2,9],f32[2,9]) scatter(%input.0, %input.1, %indices, %updates.0, %updates.1),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1, sharding={devices=[1,4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
auto scatter = op::Scatter(op::Shape("f32[1,9]"), op::Shape("f32[1,9]"),
op::Shape("s32[3]"), op::Shape("f32[3,9]"),
op::Shape("f32[3,9]"));
EXPECT_THAT(
root,
AllOf(op::Tuple(op::DynamicSlice(
op::Pad(op::AllReduce(op::DynamicUpdateSlice(
_, op::GetTupleElement(scatter), _, _)),
_),
_, _),
op::DynamicSlice(
op::Pad(op::AllReduce(op::DynamicUpdateSlice(
_, op::GetTupleElement(scatter), _, _)),
_),
_, _)),
op::Shape("(f32[2,3],f32[2,3])")));
}
TEST_P(SpmdPartitioningTest, VariadicScatterSharedOperands) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) {
lhs.0 = f32[] parameter(0)
lhs.1 = f32[] parameter(1)
rhs.0 = f32[] parameter(2)
rhs.1 = f32[] parameter(3)
sum.0 = f32[] add(lhs.0, rhs.0)
sum.1 = f32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY entry {
%input.0 = f32[8,16,32] parameter(0), sharding={devices=[4,1,1,2]<=[8] last_tile_dim_replicate}
%indices = s32[16,1] parameter(1), sharding={replicated}
%updates.0 = f32[8,16,16] parameter(2), sharding={devices=[4,1,1,2]<=[8] last_tile_dim_replicate}
%updates.1 = f32[8,16,16] parameter(3), sharding={devices=[4,1,1,2]<=[8] last_tile_dim_replicate}
ROOT %scatter = (f32[8,16,32], f32[8,16,32]) scatter(%input.0, %input.0, %indices, %updates.0, %updates.1),
to_apply=add,
update_window_dims={0,1},
inserted_window_dims={2},
scatter_dims_to_operand_dims={2},
index_vector_dim=1,
indices_are_sorted=true,
unique_indices=true,
sharding={{devices=[4,1,1,2]<=[8] last_tile_dim_replicate}, {devices=[4,1,1,2]<=[8] last_tile_dim_replicate}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Scatter(), op::Shape("(f32[2,16,32],f32[2,16,32])")));
}
TEST_P(SpmdPartitioningTest, PassthroughScatter) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0), sharding={devices=[1,2]0,1}
%indices = s32[3] parameter(1), sharding={replicated}
%updates = f32[3,9] parameter(2), sharding={devices=[1,2]0,1}
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1, sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Scatter(op::Parameter(0), op::Parameter(1),
op::Parameter(2)),
op::Shape("f32[2,5]")));
}
TEST_P(SpmdPartitioningTest, PassthroughScatterVariadic) {
absl::string_view hlo_string = R"(
HloModule module
add_min_max {
lhs0 = f32[] parameter(0)
lhs1 = f32[] parameter(1)
rhs0 = f32[] parameter(2)
rhs1 = f32[] parameter(3)
min = minimum(rhs0, rhs1)
max = maximum(rhs0, rhs1)
min_sum = add(lhs0, min)
max_sum = add(lhs1, max)
ROOT tuple = tuple(min_sum, max_sum)
}
ENTRY entry {
%input0 = f32[2,9] parameter(0), sharding={devices=[1,2]0,1}
%input1 = f32[2,9] parameter(1), sharding={devices=[1,2]0,1}
%indices = s32[3] parameter(2), sharding={replicated}
%updates0 = f32[3,9] parameter(3), sharding={devices=[1,2]0,1}
%updates1 = f32[3,9] parameter(4), sharding={devices=[1,2]0,1}
ROOT %scatter = (f32[2,9], f32[2,9])
scatter(%input0, %input1, %indices, %updates0, %updates1),
to_apply=add_min_max, update_window_dims={1}, inserted_window_dims={0},
scatter_dims_to_operand_dims={0}, index_vector_dim=1,
sharding={{devices=[1,2]0,1},{devices=[1,2]0,1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Scatter(op::Parameter(0), op::Parameter(1),
op::Parameter(2), op::Parameter(3),
op::Parameter(4)),
op::Shape("(f32[2,5], f32[2,5])")));
}
TEST_P(SpmdPartitioningTest, PassthroughScatter_PartialReplicate) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
%indices = s32[3] parameter(1), sharding={replicated}
%updates = f32[3,9] parameter(2),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Scatter(op::Parameter(0), op::Parameter(1),
op::Parameter(2)),
op::Shape("f32[2,5]")));
}
TEST_P(SpmdPartitioningTest, PassthroughScatterVariadic_PartialReplicate) {
absl::string_view hlo_string = R"(
HloModule module
add_min_max {
lhs0 = f32[] parameter(0)
lhs1 = f32[] parameter(1)
rhs0 = f32[] parameter(2)
rhs1 = f32[] parameter(3)
min = minimum(rhs0, rhs1)
max = maximum(rhs0, rhs1)
min_sum = add(lhs0, min)
max_sum = add(lhs1, max)
ROOT tuple = tuple(min_sum, max_sum)
}
ENTRY entry {
%input0 = f32[2,9] parameter(0),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
%input1 = f32[2,9] parameter(1),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
%indices = s32[3] parameter(2), sharding={replicated}
%updates0 = f32[3,9] parameter(3),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
%updates1 = f32[3,9] parameter(4),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
ROOT %scatter = (f32[2,9], f32[2,9])
scatter(%input0, %input1, %indices, %updates0, %updates1),
to_apply=add_min_max, update_window_dims={1}, inserted_window_dims={0},
scatter_dims_to_operand_dims={0}, index_vector_dim=1,
sharding={{devices=[1,2,2]<=[4] last_tile_dim_replicate},
{devices=[1,2,2]<=[4] last_tile_dim_replicate}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Scatter(op::Parameter(0), op::Parameter(1),
op::Parameter(2), op::Parameter(3),
op::Parameter(4)),
op::Shape("(f32[2,5], f32[2,5])")));
}
TEST_P(SpmdPartitioningTest, IndexPassthroughScatter) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9,8] parameter(0), sharding={replicated}
%indices = s32[4,2,4] parameter(1), sharding={devices=[2,1,2]<=[4]}
%updates = f32[4,4,8] parameter(2), sharding={devices=[2,2,1]<=[4]}
ROOT %scatter = f32[2,9,8] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={2},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::AllReduce(op::AllReduce(op::Scatter(
op::Select(op::Broadcast(op::Convert(op::PartitionId())),
op::Broadcast(op::Constant()), op::Parameter(0)),
op::Parameter(1), op::Parameter(2)))),
op::Shape("f32[2,9,8]")));
}
TEST_P(SpmdPartitioningTest, IndexPassthroughScatter_PartialReplicate) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9,8] parameter(0), sharding={replicated}
%indices = s32[4,2,4] parameter(1),
sharding={devices=[2,1,2,2]<=[8] last_tile_dim_replicate}
%updates = f32[4,4,8] parameter(2),
sharding={devices=[2,2,1,2]<=[8] last_tile_dim_replicate}
ROOT %scatter = f32[2,9,8] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={2},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::AllReduce(op::AllReduce(op::Scatter(
op::Select(op::Broadcast(op::Convert(op::Reshape())),
op::Broadcast(op::Constant()), op::Parameter(0)),
op::Parameter(1), op::Parameter(2)))),
op::Shape("f32[2,9,8]")));
}
TEST_P(SpmdPartitioningTest, IndexPassthroughScatterPartitionedIndexVectorDim) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9,8] parameter(0), sharding={replicated}
%indices = s32[4,2,4] parameter(1), sharding={devices=[2,2,2]<=[8]}
%updates = f32[4,4,8] parameter(2),
sharding={devices=[2,2,1,2]<=[8] last_tile_dim_replicate}
ROOT %scatter = f32[2,9,8] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={2},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("f32[2,9,8]"), op::Select());
auto indices = AllOf(op::Shape("s32[2,2,2]"), op::AllReduce());
auto update = AllOf(op::Shape("f32[2,2,8]"), op::CollectivePermute());
auto scatter =
AllOf(op::Shape("f32[2,9,8]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root, op::AllReduce(op::AllReduce(op::AllReduce(scatter))));
}
TEST_P(SpmdPartitioningTest, IndexPassthroughScatter_Min) {
absl::string_view hlo_string = R"(
HloModule module
min (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT min = f32[] minimum(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9,8] parameter(0), sharding={replicated}
%indices = s32[4,2,4] parameter(1), sharding={devices=[2,1,2]<=[4]}
%updates = f32[4,4,8] parameter(2), sharding={devices=[2,2,1]<=[4]}
ROOT %scatter = f32[2,9,8] scatter(%input, %indices, %updates),
to_apply=min,
update_window_dims={2},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::AllReduce(op::AllReduce(op::Scatter(
op::Select(op::Broadcast(op::Convert(op::PartitionId())),
op::Broadcast(op::Constant()), op::Parameter(0)),
op::Parameter(1), op::Parameter(2)))),
op::Shape("f32[2,9,8]")));
}
TEST_P(SpmdPartitioningTest, ScatterExplicitBatchDims) {
absl::string_view hlo_string = R"(
HloModule module
min (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT min = f32[] minimum(lhs, rhs)
}
ENTRY entry {
%input = f32[10,6,14,4] parameter(0), sharding={devices=[2,1,2,1]<=[4]}
%indices = s32[14,10,6,2] parameter(1), sharding={devices=[2,2,1,1]<=[2,2]T(1,0)}
%updates = f32[14,10,6,2] parameter(2), sharding={devices=[2,2,1,1]<=[2,2]T(1,0)}
ROOT %scatter = f32[10,6,14,4] scatter(%input, %indices, %updates),
to_apply=min, update_window_dims={3}, inserted_window_dims={1},
scatter_dims_to_operand_dims={1,3}, input_batching_dims={0,2},
scatter_indices_batching_dims={1,0}, index_vector_dim=3, sharding={devices=[2,1,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto input = AllOf(op::Shape("f32[5,6,7,4]"), op::Parameter(0));
auto indices = AllOf(op::Shape("s32[7,5,6,2]"), op::Parameter(1));
auto updates = AllOf(op::Shape("f32[7,5,6,2]"), op::Parameter(2));
auto scatter =
AllOf(op::Shape("f32[5,6,7,4]"), op::Scatter(input, indices, updates));
EXPECT_THAT(module->entry_computation()->root_instruction(), scatter);
}
TEST_P(SpmdPartitioningTest, ScatterExplicitBatchAndOperandPassthroughDims) {
absl::string_view hlo_string = R"(
HloModule module
min (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT min = f32[] minimum(lhs, rhs)
}
ENTRY entry {
%input = f32[10,6,14,4] parameter(0), sharding={devices=[1,1,2,2]<=[4]}
%indices = s32[14,10,6,2] parameter(1), sharding={devices=[2,1,1,1,2]<=[4] last_tile_dim_replicate}
%updates = f32[14,10,6,4] parameter(2), sharding={devices=[2,1,1,2]<=[4]}
ROOT %scatter = f32[10,6,14,4] scatter(%input, %indices, %updates),
to_apply=min, update_window_dims={3}, inserted_window_dims={1},
scatter_dims_to_operand_dims={1,3}, input_batching_dims={0,2},
scatter_indices_batching_dims={1,0}, index_vector_dim=3, sharding={devices=[1,1,2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto input = AllOf(op::Shape("f32[10,6,7,2]"), op::Parameter(0));
auto indices = AllOf(op::Shape("s32[7,10,6,2]"), op::Parameter(1));
auto updates = AllOf(op::Shape("f32[7,10,6,2]"), op::Parameter(2));
auto scatter =
AllOf(op::Shape("f32[10,6,7,2]"), op::Scatter(input, indices, updates));
EXPECT_THAT(module->entry_computation()->root_instruction(), scatter);
}
TEST_P(SpmdPartitioningTest, ScatterExplicitBatchAndIndexPassthroughDims) {
absl::string_view hlo_string = R"(
HloModule module
min (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT min = f32[] minimum(lhs, rhs)
}
ENTRY entry {
%input = f32[10,6,14,4] parameter(0), sharding={devices=[1,1,2,1,2]<=[4] last_tile_dim_replicate}
%indices = s32[14,10,6,2] parameter(1), sharding={devices=[2,1,2,1]<=[4]}
%updates = f32[14,10,6,2] parameter(2), sharding={devices=[2,1,2,1]<=[4]}
ROOT %scatter = f32[10,6,14,4] scatter(%input, %indices, %updates),
to_apply=min, update_window_dims={3}, inserted_window_dims={1},
scatter_dims_to_operand_dims={1,3}, input_batching_dims={0,2},
scatter_indices_batching_dims={1,0}, index_vector_dim=3, sharding={devices=[1,1,2,1,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto input =
AllOf(op::Shape("f32[10,6,7,4]"), op::Select(_, _, op::Parameter(0)));
auto indices = AllOf(op::Shape("s32[7,10,3,2]"), op::Parameter(1));
auto updates = AllOf(op::Shape("f32[7,10,3,2]"), op::Parameter(2));
auto scatter = AllOf(op::Shape("f32[10,6,7,4]"),
op::AllReduce(op::Scatter(input, indices, updates)));
EXPECT_THAT(module->entry_computation()->root_instruction(), scatter);
}
TEST_P(SpmdPartitioningTest, ScatterPartitionedOnTrivialSliceDims) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[17,9] parameter(0), sharding={devices=[2,1]0,1}
%indices = s32[2,3] parameter(1), sharding={replicated}
%updates = f32[2,3,9] parameter(2), sharding={replicated}
ROOT %scatter = f32[17,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={2},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=2, sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto offset =
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId()));
auto indices = op::Subtract(
op::Parameter(1), AllOf(op::Broadcast(offset), op::Shape("s32[2,3]")));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Scatter(op::Parameter(0), indices, op::Parameter(2)),
op::Shape("f32[9,9]")));
}
TEST_P(SpmdPartitioningTest, ScatterPartitionedOnTrivialSliceDimsVariadic) {
absl::string_view hlo_string = R"(
HloModule module
add_min_max {
lhs0 = f32[] parameter(0)
lhs1 = f32[] parameter(1)
rhs0 = f32[] parameter(2)
rhs1 = f32[] parameter(3)
min = minimum(rhs0, rhs1)
max = maximum(rhs0, rhs1)
min_sum = add(lhs0, min)
max_sum = add(lhs1, max)
ROOT tuple = tuple(min_sum, max_sum)
}
ENTRY entry {
%input0 = f32[17,9] parameter(0), sharding={devices=[2,1]0,1}
%input1 = f32[17,9] parameter(1), sharding={devices=[2,1]0,1}
%indices = s32[2,3] parameter(2), sharding={replicated}
%updates0 = f32[2,3,9] parameter(3), sharding={replicated}
%updates1 = f32[2,3,9] parameter(4), sharding={replicated}
ROOT %scatter = (f32[17,9], f32[17,9])
scatter(%input0, %input1, %indices, %updates0, %updates1),
to_apply=add_min_max, update_window_dims={2}, inserted_window_dims={0},
scatter_dims_to_operand_dims={0}, index_vector_dim=2,
sharding={{devices=[2,1]0,1},{devices=[2,1]0,1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
auto offset =
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId()));
auto indices = op::Subtract(
op::Parameter(2), AllOf(op::Broadcast(offset), op::Shape("s32[2,3]")));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Scatter(op::Parameter(0), op::Parameter(1), indices,
op::Parameter(3), op::Parameter(4)),
op::Shape("(f32[9,9], f32[9,9])")));
}
TEST_P(SpmdPartitioningTest,
ScatterPartitionedOnTrivialSliceDims_PartialReplicate) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[17,9] parameter(0),
sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
%indices = s32[2,3] parameter(1), sharding={replicated}
%updates = f32[2,3,9] parameter(2), sharding={replicated}
ROOT %scatter = f32[17,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={2},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=2,
sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto offset =
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId()));
auto indices = op::Subtract(
op::Parameter(1), AllOf(op::Broadcast(offset), op::Shape("s32[2,3]")));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Scatter(op::Parameter(0), indices, op::Parameter(2)),
op::Shape("f32[9,9]")));
}
TEST_P(SpmdPartitioningTest,
ScatterPartitionedOnTrivialSliceDimsVariadic_PartialReplicate) {
absl::string_view hlo_string = R"(
HloModule module
add_min_max {
lhs0 = f32[] parameter(0)
lhs1 = f32[] parameter(1)
rhs0 = f32[] parameter(2)
rhs1 = f32[] parameter(3)
min = minimum(rhs0, rhs1)
max = maximum(rhs0, rhs1)
min_sum = add(lhs0, min)
max_sum = add(lhs1, max)
ROOT tuple = tuple(min_sum, max_sum)
}
ENTRY entry {
%input0 = f32[17,9] parameter(0),
sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
%input1 = f32[17,9] parameter(1),
sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
%indices = s32[2,3] parameter(2), sharding={replicated}
%updates0 = f32[2,3,9] parameter(3), sharding={replicated}
%updates1 = f32[2,3,9] parameter(4), sharding={replicated}
ROOT %scatter = (f32[17,9], f32[17,9])
scatter(%input0, %input1, %indices, %updates0, %updates1),
to_apply=add_min_max, update_window_dims={2}, inserted_window_dims={0},
scatter_dims_to_operand_dims={0}, index_vector_dim=2,
sharding={{devices=[2,1,2]<=[4] last_tile_dim_replicate},
{devices=[2,1,2]<=[4] last_tile_dim_replicate}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto offset =
op::Reshape(op::DynamicSlice(op::Constant(), op::PartitionId()));
auto indices = op::Subtract(
op::Parameter(2), AllOf(op::Broadcast(offset), op::Shape("s32[2,3]")));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Scatter(op::Parameter(0), op::Parameter(1), indices,
op::Parameter(3), op::Parameter(4)),
op::Shape("(f32[9,9], f32[9,9])")));
}
TEST_P(SpmdPartitioningTest, TiledReversePassthrough) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[3,3]{1,0} constant({{1,1,1},{1,1,1},{1,1,1}}),
sharding={devices=[2,1]0,1}
ROOT reverse = f32[3,3]{1,0} reverse(constant), dimensions={1},
sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[2,3]{1,0}"),
op::Reverse(op::DynamicSlice(
op::Pad(op::Constant(), op::Constant()),
op::Reshape(), op::Constant()))));
}
TEST_P(SpmdPartitioningTest, TiledReversePassthroughViaReversedSharding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = f32[4] parameter(0), sharding={devices=[2]0,1}
ROOT reverse = f32[4] reverse(param), dimensions={0},
sharding={devices=[2]1,0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[2]"), op::Reverse(op::Parameter(0))));
}
TEST_P(SpmdPartitioningTest, TiledReverseSwapShards) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = f32[4] parameter(0), sharding={devices=[2]0,1}
ROOT reverse = f32[4] reverse(param), dimensions={0},
sharding={devices=[2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Shape("f32[2]"),
op::Reverse(op::CollectivePermute(op::Parameter(0)))));
}
TEST_P(SpmdPartitioningTest, TiledReverseHaloExchange) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = f32[3] parameter(0), sharding={devices=[2]0,1}
ROOT reverse = f32[3] reverse(param), dimensions={0},
sharding={devices=[2]1,0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
auto halo_exchange_concat =
op::Concatenate(AllOf(op::Shape("f32[1]"),
op::CollectivePermute(op::Slice(op::Parameter(0)))),
op::Slice(op::Parameter(0)));
EXPECT_THAT(root,
AllOf(op::Shape("f32[2]"), op::Reverse(halo_exchange_concat)));
}
TEST_P(SpmdPartitioningTest, MixWithManualPartitioning) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = (f32[8,2], f32[4,2]) parameter(0), sharding={{devices=[2,1]0,1},{manual}}
param0 = f32[8,2] get-tuple-element(param), index=0, sharding={devices=[2,1]0,1}
param1 = f32[4,2] get-tuple-element(param), index=1, sharding={manual}
to_shard = f32[4,2] custom-call(param0), custom_call_target="SPMDFullToShardShape", sharding={manual}
add = f32[4,2] add(to_shard, param1), sharding={manual}
to_full = f32[8,2] custom-call(add), custom_call_target="SPMDShardToFullShape", sharding={devices=[2,1]0,1}
mul = f32[8,2] multiply(to_full, param0), sharding={devices=[2,1]0,1}
to_shard2 = f32[4,2] custom-call(mul), custom_call_target="SPMDFullToShardShape", sharding={manual}
ROOT tuple = (f32[4,2]) tuple(to_shard2), sharding={{manual}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
auto p0 = op::GetTupleElement(op::Parameter(0));
auto to_shard = op::Copy(p0);
auto p1 = op::GetTupleElement(op::Parameter(0));
auto mul = AllOf(op::Shape("f32[4,2]"),
op::Multiply(op::Copy(op::Add(to_shard, p1)), p0));
EXPECT_THAT(root, op::Tuple(op::Copy(mul)));
}
TEST_P(SpmdPartitioningTest, NestedManual) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
p.0 = s32[16,16,16] parameter(0), sharding={devices=[2,2,2]<=[8]}
m.0 = s32[8,8,8] custom-call(p.0), custom_call_target="SPMDFullToShardShape", sharding={manual}
m.1 = s32[16,8,8] custom-call(m.0), custom_call_target="SPMDShardToFullShape", sharding={devices=[2,1,1,4]<=[8] last_tile_dims={manual}}
m.2 = s32[16,16,8] custom-call(m.1), custom_call_target="SPMDShardToFullShape", sharding={devices=[2,2,1,2]<=[8] last_tile_dims={manual}}
ROOT out.0 = s32[16,16,16] custom-call(m.2), custom_call_target="SPMDShardToFullShape", sharding={devices=[2,2,2]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Shape("s32[8,8,8]"),
op::Copy(op::Copy(op::Copy(op::Copy(op::Parameter(0)))))));
}
TEST_P(SpmdPartitioningTest, SubgroupAllToAllReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,8,8,8] parameter(0),
sharding={devices=[2,2,1,2]<=[8]}
ROOT %copy = f32[8,8,8,8] copy(%param0),
sharding={devices=[1,2,2,2]0,1,4,5,2,3,6,7}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto reshape =
AllOf(op::Shape("f32[4,4,2,4,4]"), op::Reshape(op::Parameter(0)));
auto all_to_all = AllOf(op::Shape("f32[4,4,2,4,4]"), op::AllToAll(reshape));
auto xpose = AllOf(op::Shape("f32[2,4,4,4,4]"), op::Transpose(all_to_all));
EXPECT_THAT(root,
op::Copy(AllOf(op::Reshape(xpose), op::Shape("f32[8,4,4,4]"))));
EXPECT_EQ(root->operand(0)->operand(0)->operand(0)->replica_groups().size(),
4);
}
TEST_P(SpmdPartitioningTest, SubgroupAllToAllReshard2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,8] parameter(0),
sharding={devices=[2,4]<=[8]}
ROOT %copy = f32[8,8] copy(%param0),
sharding={devices=[4,2]0,1,4,5,2,3,6,7}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto all_to_all = op::AllToAll(
AllOf(op::Shape("f32[2,2,2]"), op::Reshape(op::Parameter(0))));
auto reshape =
AllOf(op::Shape("f32[2,4]"), op::Reshape(op::Transpose(all_to_all)));
EXPECT_THAT(root, op::Copy(op::CollectivePermute(reshape)));
}
TEST_P(SpmdPartitioningTest, SubgroupAllToAllReshard3) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,8,8] parameter(0),
sharding={devices=[2,4,1]<=[8]}
ROOT %copy = f32[8,8,8] copy(%param0),
sharding={devices=[1,2,4]0,1,4,5,2,3,6,7}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto all_to_all = op::AllToAll(
AllOf(op::Shape("f32[4,2,4,2]"), op::Reshape(op::Parameter(0))));
auto reshape =
AllOf(op::Shape("f32[4,8,2]"), op::Reshape(op::Transpose(all_to_all)));
auto all_to_all2 =
op::AllToAll(AllOf(op::Shape("f32[4,2,4,2]"), op::Reshape(reshape)));
auto reshape2 =
AllOf(op::Shape("f32[8,4,2]"), op::Reshape(op::Transpose(all_to_all2)));
EXPECT_THAT(root, op::Copy(op::CollectivePermute(reshape2)));
}
TEST_P(SpmdPartitioningTest, Dot2DPartitionedNonContractingAndContracting0) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[48,12] parameter(0), sharding={devices=[2,2]<=[4]}
%rhs = f32[32,12] parameter(1), sharding={devices=[2,2]0,2,1,3}
ROOT %dot = f32[48,32] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={1}, rhs_contracting_dims={1},
sharding={devices=[2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[24,6]"), op::Parameter(0));
auto partial_replicated_lhs =
AllOf(op::Shape("f32[24,12]"),
op::AllReduce(op::DynamicUpdateSlice(_, lhs, _, _)));
const auto rhs = AllOf(op::Shape("f32[16,6]"), op::Parameter(1));
auto partial_replicated_rhs =
AllOf(op::Shape("f32[16,12]"),
op::AllReduce(op::DynamicUpdateSlice(_, rhs, _, _)));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Dot(partial_replicated_lhs, partial_replicated_rhs),
op::Shape("f32[24,16]")));
}
TEST_P(SpmdPartitioningTest, Dot2DPartitionedNonContractingAndContracting1) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[48,100] parameter(0), sharding={devices=[2,2]<=[4]}
%rhs = f32[32,100] parameter(1), sharding={devices=[2,2]<=[4]}
ROOT %dot = f32[48,32] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={1}, rhs_contracting_dims={1},
sharding={devices=[2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[24,50]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[16,50]"), op::Parameter(1));
auto partial_replicated_rhs =
AllOf(op::Shape("f32[32,50]"),
op::AllReduce(op::DynamicUpdateSlice(_, rhs, _, _)));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::Shape("f32[24,16]"),
op::DynamicSlice(
op::AllReduce(AllOf(op::Dot(lhs, partial_replicated_rhs),
op::Shape("f32[24,32]"))),
_, _)));
}
TEST_P(SpmdPartitioningTest, Dot2DPartitionedNonContractingAndContracting2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[48,100] parameter(0), sharding={replicated}
%rhs = f32[32,100] parameter(1), sharding={devices=[2,2]<=[4]}
ROOT %dot = f32[48,32] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={1}, rhs_contracting_dims={1},
sharding={devices=[2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[48,100]"), op::Parameter(0));
const auto lhs_slice =
AllOf(op::Shape("f32[24,100]"), op::DynamicSlice(lhs, _, _));
const auto rhs = AllOf(op::Shape("f32[16,50]"), op::Parameter(1));
auto partial_replicated_rhs = AllOf(
op::Shape("f32[16,100]"), op::AllReduce(op::DynamicUpdateSlice(
_, op::CollectivePermute(rhs), _, _)));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[24,16]"),
op::Dot(lhs_slice, partial_replicated_rhs)));
}
TEST_P(SpmdPartitioningTest, Dot2DPartitionedNoncontractingAndContracting3) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[23,24] parameter(0), sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
%rhs = f32[23,32] parameter(1), sharding={devices=[2,2]<=[4]}
ROOT %dot = f32[24,32] dot(%lhs, %rhs),
lhs_contracting_dims={0}, rhs_contracting_dims={0},
sharding={devices=[2,2]1,0,3,2}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[12,24]"), op::Parameter(0));
auto masked_lhs = op::Select(_, lhs, op::Broadcast(op::Constant()));
const auto rhs = AllOf(op::Shape("f32[12,16]"), op::Parameter(1));
auto masked_rhs = op::Select(_, rhs, op::Broadcast(op::Constant()));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Shape("f32[12,16]"),
op::DynamicSlice(
AllOf(op::Shape("f32[24,16]"),
op::AllReduce(op::Dot(masked_lhs, masked_rhs))),
_, _)));
}
TEST_P(SpmdPartitioningTest, Dot2DPartitionedBatchAndNonContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,24,100] parameter(0), sharding={devices=[2,2,1]<=[4]}
%rhs = f32[4,32,100] parameter(1), sharding={devices=[2,2,1]<=[4]}
ROOT %dot = f32[4,24,32] dot(%lhs, %rhs),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[2,12,100]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[2,16,100]"), op::Parameter(1));
auto partial_replicated_rhs =
AllOf(op::Shape("f32[2,32,100]"),
op::AllReduce(op::DynamicUpdateSlice(_, rhs, _, _, _)));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[2,12,32]"),
op::Dot(lhs, partial_replicated_rhs)));
}
TEST_P(SpmdPartitioningTest, Dot2DPartitionedBatchAndContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,24,100] parameter(0), sharding={devices=[2,1,2]<=[4]}
%rhs = f32[4,32,100] parameter(1), sharding={devices=[1,2,2]<=[4]}
ROOT %dot = f32[4,24,32] dot(%lhs, %rhs),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[2,24,50]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[4,16,50]"), op::Parameter(1));
auto resharded_rhs =
AllOf(op::Shape("f32[2,32,50]"),
op::Reshape(op::Transpose(op::AllToAll(op::Reshape(rhs)))));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[2,12,32]"),
op::DynamicSlice(
AllOf(op::Shape("f32[2,24,32]"),
op::AllReduce(op::Dot(lhs, resharded_rhs))),
_, _, _)));
}
TEST_P(SpmdPartitioningTest, Dot2DPartitionedBatchAndContracting2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,24,100] parameter(0), sharding={devices=[2,1,2]<=[4]}
%rhs = f32[4,32,100] parameter(1), sharding={replicated}
ROOT %dot = f32[4,24,32] dot(%lhs, %rhs),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[2,24,50]"), op::Parameter(0));
auto resharded_lhs =
AllOf(op::Shape("f32[2,12,100]"),
op::Reshape(op::Transpose(op::AllToAll(op::Reshape(lhs)))));
const auto rhs = AllOf(op::Shape("f32[4,32,100]"), op::Parameter(1));
const auto rhs_slice =
AllOf(op::Shape("f32[2,32,100]"), op::DynamicSlice(rhs, _, _, _));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[2,12,32]"),
op::Dot(resharded_lhs, rhs_slice)));
}
TEST_P(SpmdPartitioningTest,
Dot2DPartitionedBatchNonContractingAndContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,24,100] parameter(0), sharding={devices=[2,1,2]<=[4]}
%rhs = f32[4,32,100] parameter(1), sharding={devices=[2,2,1]<=[4]}
ROOT %dot = f32[4,24,32] dot(%lhs, %rhs),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,1,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[2,24,50]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[2,16,100]"), op::Parameter(1));
auto partial_replicated_lhs =
AllOf(op::Shape("f32[2,24,100]"),
op::AllReduce(op::DynamicUpdateSlice(_, lhs, _, _, _)));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[2,24,16]"),
op::Dot(partial_replicated_lhs, rhs)));
}
TEST_P(SpmdPartitioningTest, Dot2DPartitionedBatchAndReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,8,24,100] parameter(0), sharding={devices=[2,1,2,1]<=[4]}
%rhs = f32[4,8,32,100] parameter(1), sharding={devices=[2,1,2,1]<=[4]}
ROOT %dot = f32[4,8,24,32] dot(%lhs, %rhs),
lhs_batch_dims={0,1}, rhs_batch_dims={0,1},
lhs_contracting_dims={3}, rhs_contracting_dims={3},
sharding={devices=[1,2,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[2,8,12,100]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[2,8,16,100]"), op::Parameter(1));
auto partial_replicated_rhs =
AllOf(op::Shape("f32[2,8,32,100]"),
op::AllReduce(op::DynamicUpdateSlice(_, rhs, _, _, _, _)));
auto dot =
AllOf(op::Shape("f32[2,8,12,32]"), op::Dot(lhs, partial_replicated_rhs));
auto reshape = AllOf(op::Shape("f32[2,2,4,12,32]"), op::Reshape(dot));
auto all_to_all = AllOf(op::Shape("f32[2,2,4,12,32]"), op::AllToAll(reshape));
auto xpose = AllOf(op::Shape("f32[2,2,4,12,32]"), op::Transpose(all_to_all));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[4,4,12,32]"), op::Reshape(xpose)));
}
TEST_P(SpmdPartitioningTest, SimpleDotPartial) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[2,24,100] parameter(0),
sharding={devices=[2,1,1,2]<=[4] last_tile_dim_replicate}
%rhs = f32[2,32,100] parameter(1),
sharding={devices=[2,1,1,2]<=[4] last_tile_dim_replicate}
ROOT %dot = f32[2,24,32] dot(%lhs, %rhs),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,1,1,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[1,24,100]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[1,32,100]"), op::Parameter(1));
auto dot = AllOf(op::Shape("f32[1,24,32]"), op::Dot(lhs, rhs));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, dot);
}
TEST_P(SpmdPartitioningTest, SimpleSparseDot) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[2,24,128] parameter(0),
sharding={devices=[2,2,1]<=[4]}
%rhs = f32[2,32,256] parameter(1),
sharding={devices=[2,1,1,2]<=[4] last_tile_dim_replicate}
%meta = u16[2,24,16] parameter(2),
sharding={devices=[2,2,1]<=[4]}
ROOT %dot = f32[2,24,32] dot(%lhs, %rhs, %meta),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}, sparsity=L.2@2:4,
sharding={devices=[2,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[1,12,128]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[1,32,256]"), op::Parameter(1));
const auto meta = AllOf(op::Shape("u16[1,12,16]"), op::Parameter(2));
auto dot = AllOf(op::Shape("f32[1,12,32]"),
::testing::MakeMatcher(new ::xla::testing::HloMatcher(
HloOpcode::kDot, {lhs, rhs, meta})));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, dot);
}
TEST_P(SpmdPartitioningTest, DotPartialContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[24,100] parameter(0),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
%rhs = f32[32,100] parameter(1),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
ROOT %dot = f32[24,32] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={1}, rhs_contracting_dims={1},
sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[24,50]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[32,50]"), op::Parameter(1));
auto dot = AllOf(op::Shape("f32[24,32]"), op::Dot(lhs, rhs));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::AllReduce(dot));
}
TEST_P(SpmdPartitioningTest, DotPartialContracting2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[24,100] parameter(0),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
%rhs = f32[32,100] parameter(1),
sharding={devices=[1,2,2]<=[4] last_tile_dim_replicate}
ROOT %dot = f32[24,32] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={1}, rhs_contracting_dims={1},
sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[24,50]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[32,50]"), op::Parameter(1));
auto dot =
AllOf(op::Shape("f32[12,32]"),
op::Dot(AllOf(op::Shape("f32[12,50]"), op::DynamicSlice(lhs, _, _)),
rhs));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::AllReduce(dot));
}
TEST_P(SpmdPartitioningTest, DotPartialContracting3) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[24,100] parameter(0),
sharding={devices=[1,2,4]<=[8] last_tile_dim_replicate}
%rhs = f32[32,100] parameter(1),
sharding={devices=[1,2,4]<=[8] last_tile_dim_replicate}
ROOT %dot = f32[24,32] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={1}, rhs_contracting_dims={1},
sharding={devices=[1,2,4]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[24,50]"), op::Parameter(0));
const auto rhs =
AllOf(op::Shape("f32[16,50]"), op::DynamicSlice(op::Parameter(1), _, _));
auto dot = AllOf(op::Shape("f32[24,16]"), op::Dot(lhs, rhs));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::CollectivePermute(op::AllReduce(dot)));
}
TEST_P(SpmdPartitioningTest, DotBatchAndPartialContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,24,100] parameter(0),
sharding={devices=[2,2,2]<=[8]}
%rhs = f32[4,32,100] parameter(1),
sharding={devices=[2,1,2,2]0,2,1,3,4,6,5,7 last_tile_dim_replicate}
ROOT %dot = f32[4,24,32] dot(%lhs, %rhs),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,2,1,2]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[2,12,50]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[2,32,50]"), op::Parameter(1));
auto dot = AllOf(op::Shape("f32[2,12,32]"), op::Dot(lhs, rhs));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::AllReduce(dot));
}
TEST_P(SpmdPartitioningTest, DotPartialNonContracting) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[24,8,100] parameter(0),
sharding={devices=[2,1,1,2]<=[4] last_tile_dim_replicate}
%rhs = f32[32,100] parameter(1), sharding={devices=[2,2]0,2,1,3}
ROOT %dot = f32[24,8,32] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={2}, rhs_contracting_dims={1},
sharding={devices=[2,1,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[12,8,100]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[16,50]"), op::Parameter(1));
auto partially_replicated_rhs =
AllOf(op::Shape("f32[16,100]"),
op::AllReduce(op::DynamicUpdateSlice(op::Broadcast(_), rhs, _, _)));
auto dot =
AllOf(op::Shape("f32[12,8,16]"), op::Dot(lhs, partially_replicated_rhs));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, dot);
}
TEST_P(SpmdPartitioningTest, DotPartialNonContractingPartialMatch) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[24,8,100] parameter(0), sharding={devices=[2,2,1]<=[4]}
%rhs = f32[32,100] parameter(1),
sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}
ROOT %dot = f32[24,8,32] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={2}, rhs_contracting_dims={1},
sharding={devices=[2,1,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[12,4,100]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[16,100]"), op::Parameter(1));
auto partially_replicated_lhs = AllOf(
op::Shape("f32[12,8,100]"),
op::AllReduce(op::DynamicUpdateSlice(op::Broadcast(_), lhs, _, _, _)));
auto dot =
AllOf(op::Shape("f32[12,8,16]"), op::Dot(partially_replicated_lhs, rhs));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, dot);
}
TEST_P(SpmdPartitioningTest, DotPartialContractingPartialMatch) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[24,8,100] parameter(0), sharding={devices=[1,2,2]<=[4]}
%rhs = f32[32,8,100] parameter(1),
sharding={devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}
ROOT %dot = f32[24,32] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={1,2}, rhs_contracting_dims={1,2},
sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[24,4,50]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[32,8,50]"), op::Parameter(1));
auto dot = AllOf(op::Shape("f32[24,32]"),
op::Dot(lhs, AllOf(op::Shape("f32[32,4,50]"),
op::DynamicSlice(rhs, _, _, _))));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::AllReduce(op::AllReduce(dot)));
}
TEST_P(SpmdPartitioningTest, DotNonContractingPartialMatchContractingMatch) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[24,8,100] parameter(0), sharding={devices=[2,1,2]<=[4]}
%rhs = f32[100,50] parameter(1), sharding={devices=[2,2]0,2,1,3}
ROOT %dot = f32[24,8,50] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={2}, rhs_contracting_dims={0},
sharding={devices=[2,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[12,8,50]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[50,25]"), op::Parameter(1));
auto dot = AllOf(
op::Shape("f32[12,8,50]"),
op::Dot(lhs, AllOf(op::Shape("f32[50,50]"),
op::AllReduce(op::DynamicUpdateSlice(_, rhs, _, _)))));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[12,4,50]"),
op::DynamicSlice(op::AllReduce(dot), _, _, _)))
<< module->ToString();
}
TEST_P(SpmdPartitioningTest, DotLHSMutiNonContractingRHSNotMatch) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[24,8,10] parameter(0), sharding={devices=[2,2,1]<=[4]}
%rhs = f32[10,50] parameter(1),
sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}
ROOT %dot = f32[24,8,50] dot(%lhs, %rhs),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={2}, rhs_contracting_dims={0},
sharding={devices=[2,2,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("f32[12,4,10]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("f32[5,50]"), op::Parameter(1));
auto dot = AllOf(
op::Shape("f32[12,4,50]"),
op::Dot(lhs, AllOf(op::Shape("f32[10,50]"),
op::AllReduce(op::DynamicUpdateSlice(_, rhs, _, _)))));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, dot) << module->ToString();
}
TEST_P(SpmdPartitioningTest, ReshardLHSRHSToMatchDotSharding) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %main.7 {
%p0 = bf16[32,97] parameter(0), sharding={devices=[32,1]<=[8,4]T(1,0)}
%p1 = bf16[48,64,97] parameter(1), sharding={devices=[8,4,1]<=[32]}
%dot.0 = bf16[32,48,64] dot(%p0, %p1), lhs_contracting_dims={1}, rhs_contracting_dims={2}, sharding={devices=[4,8,1]<=[8,4]T(1,0)}
%dot.1 = bf16[32,48,64] dot(%p0, %p1), lhs_contracting_dims={1}, rhs_contracting_dims={2}, sharding={devices=[4,4,1,2]<=[8,4]T(1,0) last_tile_dim_replicate}
ROOT %tuple = tuple(%dot.0, %dot.1), sharding={{devices=[4,8,1]<=[8,4]T(1,0)}, {devices=[4,4,1,2]<=[8,4]T(1,0) last_tile_dim_replicate}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 32));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("bf16[8,97]"));
const auto rhs0 = AllOf(op::Shape("bf16[6,64,97]"));
const auto rhs1 = AllOf(op::Shape("bf16[12,64,97]"));
auto dot0 = AllOf(op::Shape("bf16[8,6,64]"), op::Dot(lhs, rhs0));
auto dot1 = AllOf(op::Shape("bf16[8,12,64]"), op::Dot(lhs, rhs1));
auto tuple =
AllOf(op::Shape("(bf16[8,6,64], bf16[8,12,64])"), op::Tuple(dot0, dot1));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, tuple);
}
TEST_P(SpmdPartitioningTest, PartiallyReplicateRHS) {
const char* const hlo_string = R"(
HloModule module
ENTRY main {
lhs = bf16[16384,2048] parameter(0), sharding={devices=[16,8]<=[128]}
rhs = bf16[16384,256] parameter(1), sharding={devices=[128,1]<=[128]}
ROOT dot = bf16[2048,256] dot(lhs, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[8,1,16]<=[16,8]T(1,0) last_tile_dim_replicate}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, PartitionComputation(hlo_string, 128));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("bf16[1024,256]"), op::Parameter(0));
const auto rhs = AllOf(op::Shape("bf16[1024,256]"),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(), op::Parameter(1), _, _)));
auto dot = AllOf(op::Shape("bf16[256,256]"), op::Dot(lhs, rhs));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::AllReduce(dot));
}
TEST_P(SpmdPartitioningTest, AllToAllAndPartialReplicateRHS) {
const char* const hlo_string = R"(
HloModule module
ENTRY main {
lhs = bf16[64,64] parameter(0), sharding={devices=[2,2,2]<=[8] last_tile_dim_replicate}
rhs = bf16[64,64,64] parameter(1), sharding={devices=[1,2,4]<=[2,2,2]T(2,1,0)}
ROOT dot = bf16[64,64,64] dot(lhs, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={2}, sharding={devices=[2,2,1,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("bf16[32,32]"), op::Parameter(0));
const auto all_to_all_p1 = AllOf(
op::Shape("bf16[32,64,16]"),
op::Reshape(op::Transpose(op::AllToAll(op::Reshape(op::Parameter(1))))));
const auto rhs = AllOf(op::Shape("bf16[32,64,32]"),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(), all_to_all_p1, _, _, _)));
auto dot = AllOf(op::Shape("bf16[32,32,64]"), op::Dot(lhs, rhs));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::AllReduce(dot));
}
TEST_P(SpmdPartitioningTest, ReplicateLHSofConv) {
const char* const hlo_string = R"(
HloModule module
ENTRY main {
lhs = bf16[128,8,8,1280] parameter(0), sharding={devices=[128,1,1,1]<=[128]}
rhs = bf16[3,3,1280,1280] parameter(1), sharding={devices=[1,1,1,8,16]<=[16,8]T(1,0) last_tile_dim_replicate}
ROOT conv = bf16[128,8,8,1280] convolution(lhs, rhs), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, sharding={devices=[1,1,1,8,16]<=[16,8]T(1,0) last_tile_dim_replicate}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, PartitionComputation(hlo_string, 128));
VLOG(1) << module->ToString();
const auto lhs = AllOf(op::Shape("bf16[128,8,8,1280]"),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(), op::Parameter(0), _, _, _, _)));
const auto rhs = AllOf(op::Shape("bf16[3,3,1280,160]"), op::Parameter(1));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Shape("bf16[128,8,8,160]"), op::Convolution(lhs, rhs)));
}
TEST_P(SpmdPartitioningTest, ElementwiseTest_SubgroupSharding_TileToReplicate) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[6,3]{1,0}
constant({{1,3,7},{5,1,4},{1,2,8},{2,3,7},{5,2,4},{2,2,8}}),
sharding={devices=[1,2,2]<=[4] last_tile_dims={manual}}
constant.1 = f32[6,3]{1,0}
constant({{2,7,2},{2,9,2},{2,6,2},{3,7,2},{2,9,3},{2,3,2}}),
sharding={devices=[1,2,2]<=[4] last_tile_dims={manual}}
multiply = f32[6,3]{1,0} multiply(constant, constant.1),
sharding={devices=[1,2,2]<=[4] last_tile_dims={manual}}
ROOT add = f32[6,3]{1,0} add(multiply, constant.1),
sharding={devices=[1,1,2,2]<=[4] last_tile_dims={replicated, manual}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto multiply_lhs =
AllOf(op::Shape("f32[6,2]"),
op::DynamicSlice(op::Pad(op::Constant(), op::Constant()),
op::Constant(), op::Reshape()));
auto multiply_rhs =
AllOf(op::Shape("f32[6,2]"),
op::DynamicSlice(op::Pad(op::Constant(), op::Constant()),
op::Constant(), op::Reshape()));
auto multiply =
AllOf(op::Shape("f32[6,2]"), op::Multiply(multiply_lhs, multiply_rhs));
auto replicated_lhs = AllOf(op::Shape("f32[6,3]"),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(), op::Select(_, multiply, _),
op::Constant(), op::Reshape())));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[6,3]"),
op::Add(replicated_lhs, op::Constant())));
}
TEST_P(SpmdPartitioningTest, ElementwiseTest_SubgroupSharding_ReplicateToTile) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[6,3]{1,0}
constant({{1,3,7},{5,1,4},{1,2,8},{2,3,7},{5,2,4},{2,2,8}}),
sharding={devices=[1,1,2,2]<=[4] last_tile_dims={replicated,manual}}
constant.1 = f32[6,3]{1,0}
constant({{2,7,2},{2,9,2},{2,6,2},{3,7,2},{2,9,3},{2,3,2}}),
sharding={devices=[1,1,2,2]<=[4] last_tile_dims={replicated,manual}}
multiply = f32[6,3]{1,0} multiply(constant, constant.1),
sharding={devices=[1,1,2,2]<=[4] last_tile_dims={replicated,manual}}
ROOT add = f32[6,3]{1,0} add(multiply, constant.1),
sharding={devices=[1,2,2]<=[4] last_tile_dims={manual}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto multiply = AllOf(op::Shape("f32[6,3]"),
op::Multiply(op::Constant(), op::Constant()));
auto add_lhs = AllOf(op::Shape("f32[6,2]"),
op::DynamicSlice(op::Pad(multiply, op::Constant()),
op::Constant(), op::Reshape()));
auto add_rhs = AllOf(op::Shape("f32[6,2]"),
op::DynamicSlice(op::Pad(op::Constant(), op::Constant()),
op::Constant(), op::Reshape()));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[6,2]"), op::Add(add_lhs, add_rhs)));
}
TEST_P(SpmdPartitioningTest,
ElementwiseTest_PartialReplicateToTiledHaloExchange) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
input = f32[6,3] parameter(0),
sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
ROOT copy = f32[6,3]{1,0} copy(input),
sharding={devices=[4,1]<=[4]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto input = AllOf(op::Shape("f32[3,3]"), op::Parameter(0));
auto right_halo =
AllOf(op::Shape("f32[1,3]"), op::CollectivePermute(op::Slice(input)));
auto concat = op::Concatenate(
input, AllOf(op::Shape("f32[2,3]"), op::Pad(right_halo, _)));
auto valid_slice =
AllOf(op::Shape("f32[4,3]"), op::DynamicSlice(concat, _, _));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[2,3]"),
op::Copy(op::DynamicSlice(valid_slice, _, _))));
}
TEST_P(SpmdPartitioningTest, TileToPartialReplicateReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,8] parameter(0)
%copy = f32[8,8] copy(%param0),
sharding={devices=[2,2]<=[4]}
ROOT %copy0 = f32[8,8] copy(%copy),
sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto tiled = AllOf(op::Shape("f32[4,4]"),
op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(),
op::Reshape())));
auto partially_replicated = AllOf(
op::Shape("f32[4,8]"), op::Copy(op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(_), tiled, _, _))));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, partially_replicated);
}
TEST_P(SpmdPartitioningTest, TileToPartialReplicateReshardUnevenPartition) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,8] parameter(0), sharding={devices=[2,3]<=[6]}
ROOT %copy0 = f32[8,8] copy(%param0),
sharding={devices=[1,2,3]<=[6] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 6));
VLOG(1) << module->ToString();
auto tiled = AllOf(op::Shape("f32[4,3]"), op::Select(_, op::Parameter(0), _));
auto partially_replicated = AllOf(
op::Shape("f32[8,4]"),
op::Copy(op::Reshape(op::Transpose(op::AllToAll(op::Reshape(op::AllReduce(
op::DynamicUpdateSlice(op::Broadcast(), tiled, _, _))))))));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, partially_replicated);
const HloInstruction* all_reduce =
FindInstruction(module.get(), "all-reduce");
EXPECT_NE(all_reduce, nullptr);
EXPECT_TRUE(
absl::StrContains(all_reduce->ToString(), "replica_groups=[2,3]<=[6]"));
}
TEST_P(SpmdPartitioningTest, PartialReplicateToTileReshardUnevenPartition) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,8] parameter(0),
sharding={devices=[1,2,3]<=[6] last_tile_dim_replicate}
ROOT %copy0 = f32[8,8] copy(%param0), sharding={devices=[2,3]<=[6]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 6));
VLOG(1) << module->ToString();
auto partial_replicated = AllOf(op::Shape("f32[8,4]"), op::Parameter(0));
auto tiled = AllOf(
op::Shape("f32[4,3]"),
op::Copy(op::DynamicSlice(op::Pad(op::Reshape(op::Transpose(op::AllToAll(
op::Reshape(partial_replicated)))),
_),
_, _)));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, tiled);
}
TEST_P(SpmdPartitioningTest, PartialReplicateToTileReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,8] parameter(0)
%copy = f32[8,8] copy(%param0),
sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
ROOT %copy0 = f32[8,8] copy(%copy),
sharding={devices=[2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto partially_replicated =
AllOf(op::Shape("f32[4,8]"),
op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(),
op::Constant())));
auto tiled =
AllOf(op::Shape("f32[4,4]"),
op::Copy(op::DynamicSlice(partially_replicated, op::Subtract(),
op::Subtract())));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, tiled);
}
TEST_P(SpmdPartitioningTest,
PartialReplicateToPartialReplicateReshard_AllReduce) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,8] parameter(0)
%copy = f32[8,8] copy(param0),
sharding={devices=[2,2,2]<=[8] last_tile_dim_replicate}
ROOT %copy0 = f32[8,8] copy(%copy),
sharding={devices=[2,1,4]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto partially_replicated_init =
AllOf(op::Shape("f32[4,4]"),
op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(),
op::Reshape())));
auto partially_replicated =
AllOf(op::Shape("f32[4,8]"),
op::Copy(op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(_), partially_replicated_init, _, _))));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, partially_replicated);
}
TEST_P(SpmdPartitioningTest,
PartialReplicateToPartialReplicateReshard_DynamicSlice) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,8] parameter(0)
%copy = f32[8,8] copy(%param0),
sharding={devices=[2,1,4]<=[8] last_tile_dim_replicate}
ROOT %copy0 = f32[8,8] copy(%copy),
sharding={devices=[2,2,2]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto partially_replicated =
AllOf(op::Shape("f32[4,8]"),
op::Copy(op::DynamicSlice(op::Parameter(0), op::Reshape(),
op::Constant())));
auto tiled =
AllOf(op::Shape("f32[4,4]"),
op::Copy(op::DynamicSlice(partially_replicated, op::Subtract(),
op::Subtract())));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, tiled);
}
TEST_P(SpmdPartitioningTest,
PartialReplicateToPartialReplicateReshardWithCollectivePermute) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,8] parameter(0)
%copy = f32[8,8] copy(param0),
sharding={devices=[2,2,2]<=[8] last_tile_dim_replicate}
ROOT %copy0 = f32[8,8] copy(%copy),
sharding={devices=[1,2,4]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto partially_replicated_init =
AllOf(op::Shape("f32[4,4]"),
op::CollectivePermute(op::Copy(op::DynamicSlice(
op::Parameter(0), op::Reshape(), op::Reshape()))));
auto partially_replicated =
AllOf(op::Shape("f32[8,4]"),
op::Copy(op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(_), partially_replicated_init, _, _))));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, partially_replicated);
}
TEST_P(SpmdPartitioningTest,
PartialReplicateToPartialReplicateReshardCollectivePermute1) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[8,8] parameter(0)
%copy = f32[8,8] copy(%param0),
sharding={devices=[1,2,4]<=[8] last_tile_dim_replicate}
ROOT %copy0 = f32[8,8] copy(%copy),
sharding={devices=[2,2,2]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto partially_replicated =
AllOf(op::Shape("f32[8,4]"),
op::Copy(op::DynamicSlice(op::Parameter(0), op::Constant(),
op::Reshape())));
auto tiled =
AllOf(op::Shape("f32[4,4]"),
op::Copy(op::CollectivePermute(op::DynamicSlice(
partially_replicated, op::Subtract(), op::Subtract()))));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, tiled);
}
TEST_P(SpmdPartitioningTest,
PartialReplicateToPartialReplicateReshardHaloExchange) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[6,3] parameter(0),
sharding={devices=[4,1,2]<=[8] last_tile_dim_replicate}
ROOT %copy0 = f32[6,3] copy(%param0),
sharding={devices=[2,1,4]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto input = AllOf(op::Shape("f32[2,3]"), op::Parameter(0));
auto piece1 =
AllOf(op::Shape("f32[2,3]"),
op::Select(_, op::Pad(op::CollectivePermute(op::Slice(input)), _),
input));
auto piece2 = AllOf(op::Shape("f32[1,3]"), op::Slice(input));
auto concat = op::Concatenate(piece1, piece2);
auto partially_replicated =
AllOf(op::Shape("f32[3,3]"),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(_),
op::Select(_, op::DynamicSlice(concat, _, _), _), _, _)));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Copy(partially_replicated));
}
TEST_P(SpmdPartitioningTest,
PartialReplicateToPartialReplicateReshardHaloExchange1) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[6,3] parameter(0),
sharding={devices=[2,1,4]<=[8] last_tile_dim_replicate}
ROOT %copy0 = f32[6,3] copy(%param0),
sharding={devices=[4,1,2]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto input = AllOf(op::Shape("f32[3,3]"), op::Parameter(0));
auto slice =
AllOf(op::Shape("f32[4,3]"),
op::DynamicSlice(
op::Concatenate(
input, op::Pad(op::CollectivePermute(op::Slice(input)), _)),
_, _));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Shape("f32[2,3]"),
op::Copy(op::DynamicSlice(slice, _, _))));
}
TEST_P(SpmdPartitioningTest, PartitionConvWithBathGroupCount) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,1,1,2]0,1}
%rhs = f32[16,801,1,1024] parameter(1)
%rhs.copy = f32[16,801,1,1024] copy(%rhs),
sharding={devices=[1,1,1,2]0,1}
ROOT %conv = f32[5,1,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=f01b_i01o->01bf,batch_group_count=1024,
window={size=801x1 pad=2_2x0_0},
sharding={devices=[1,1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,801,1,512]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,801,1,512]"));
EXPECT_THAT(root,
AllOf(op::Convolution(lhs, rhs), op::Shape("f32[5,1,1,512]")));
}
TEST_P(SpmdPartitioningTest, PartitionConvWithBathGroupCountRHSAlignWithLHS) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,1,1,2]0,1}
%rhs = f32[16,801,1,1024] parameter(1)
%rhs.copy = f32[16,801,1,1024] copy(%rhs),
sharding={devices=[1,2,1,1]0,1}
ROOT %conv = f32[5,1,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=f01b_i01o->01bf,batch_group_count=1024,
window={size=801x1 pad=2_2x0_0},
sharding={devices=[1,1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,801,1,512]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[16,401,1,1024]"));
auto resharded_rhs = AllOf(
op::Slice(op::Reshape(op::Transpose(op::AllToAll(op::Reshape(rhs))))),
op::Shape("f32[16,801,1,512]"));
EXPECT_THAT(root, AllOf(op::Convolution(lhs, resharded_rhs),
op::Shape("f32[5,1,1,512]")));
}
TEST_P(SpmdPartitioningTest, PartitionConvWithBathGroupCountLHSAlignWithRHS) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,2,1,1]0,1}
%rhs = f32[16,801,1,1024] parameter(1)
%rhs.copy = f32[16,801,1,1024] copy(%rhs),
sharding={devices=[1,1,1,2]0,1}
ROOT %conv = f32[5,1,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=f01b_i01o->01bf,batch_group_count=1024,
window={size=801x1 pad=2_2x0_0},
sharding={devices=[1,1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,801,1,512]"));
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[16,401,1,1024]"));
auto resharded_lhs = AllOf(
op::Slice(op::Reshape(op::Transpose(op::AllToAll(op::Reshape(lhs))))),
op::Shape("f32[16,801,1,512]"));
EXPECT_THAT(root, AllOf(op::Convolution(resharded_lhs, rhs),
op::Shape("f32[5,1,1,512]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvWithBathGroupCountOutputAlignWithLHS) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,1,1,2]0,1}
%rhs = f32[16,801,1,1024] parameter(1)
%rhs.copy = f32[16,801,1,1024] copy(%rhs),
sharding={devices=[1,1,1,2]0,1}
ROOT %conv = f32[5,1,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=f01b_i01o->01bf,batch_group_count=1024,
window={size=801x1 pad=2_2x0_0},
sharding={devices=[2,1,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,801,1,512]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,801,1,512]"));
auto conv = AllOf(op::Convolution(lhs, rhs), op::Shape("f32[5,1,1,512]"));
EXPECT_THAT(root, AllOf(op::Reshape(op::Transpose(op::AllToAll(
op::Reshape(op::Pad(conv, op::Constant()))))),
op::Shape("f32[3,1,1,1024]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvWithBathGroupCountOutputAlignWithRHS) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,2,1,1]0,1}
%rhs = f32[16,801,1,1024] parameter(1)
%rhs.copy = f32[16,801,1,1024] copy(%rhs),
sharding={devices=[1,1,1,2]0,1}
ROOT %conv = f32[5,1,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=f01b_i01o->01bf,batch_group_count=1024,
window={size=801x1 pad=2_2x0_0},
sharding={devices=[2,1,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,801,1,512]"));
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[16,401,1,1024]"));
auto resharded_lhs = AllOf(
op::Slice(op::Reshape(op::Transpose(op::AllToAll(op::Reshape(lhs))))),
op::Shape("f32[16,801,1,512]"));
auto conv =
AllOf(op::Convolution(resharded_lhs, rhs), op::Shape("f32[5,1,1,512]"));
EXPECT_THAT(root, AllOf(op::Reshape(op::Transpose(op::AllToAll(
op::Reshape(op::Pad(conv, op::Constant()))))),
op::Shape("f32[3,1,1,1024]")));
}
TEST_P(SpmdPartitioningTest, PartitionConvWithBathGroupAlignWithLHSPartial) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,275,64]{2,1,0} parameter(0)
%multiply.5810 = f32[4,275,64]{2,1,0} copy(lhs), sharding={devices=[2,1,4]<=[8]}
%rhs = f32[4,275,64]{2,1,0} parameter(1)
%copy.25 = f32[4,275,64]{2,1,0} copy(rhs), sharding={devices=[4,1,2]<=[8]}
ROOT %convolution.6144 = f32[5,1,64]{2,1,0} convolution(multiply.5810, copy.25), window={size=275 pad=2_2},
dim_labels=f0b_i0o->0bf, batch_group_count=64,
operand_precision={HIGH,HIGH}, sharding={devices=[1,4,1,2]<=[2,4]T(1,0) last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Shape("f32[4,275,16]"));
const auto rhs = AllOf(op::Shape("f32[4,275,16]"));
auto conv = AllOf(op::Convolution(lhs, rhs), op::Shape("f32[5,1,16]"));
EXPECT_THAT(root, AllOf(op::Reshape(op::Transpose(op::AllToAll(
op::Reshape(op::Pad(conv, op::Constant()))))),
op::Shape("f32[5,1,64]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvWithBathGroupCountAlignWithRHSPartial) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,275,64]{2,1,0} parameter(0)
%multiply.5810 = f32[4,275,64]{2,1,0} copy(lhs), sharding={devices=[4,1,2]<=[8]}
%rhs = f32[4,275,64]{2,1,0} parameter(1)
%copy.25 = f32[4,275,64]{2,1,0} copy(rhs), sharding={devices=[2,1,4]<=[8]}
ROOT %convolution.6144 = f32[5,1,64]{2,1,0} convolution(multiply.5810, copy.25), window={size=275 pad=2_2},
dim_labels=f0b_i0o->0bf, batch_group_count=64,
operand_precision={HIGH,HIGH}, sharding={devices=[1,4,1,2]<=[2,4]T(1,0) last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Shape("f32[4,275,16]"));
const auto rhs = AllOf(op::Shape("f32[4,275,16]"));
auto conv = AllOf(op::Convolution(lhs, rhs), op::Shape("f32[5,1,16]"));
EXPECT_THAT(root, AllOf(op::Reshape(op::Transpose(op::AllToAll(
op::Reshape(op::Pad(conv, op::Constant()))))),
op::Shape("f32[5,1,64]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvWithBathGroupCountAlignWithOutputPartial) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,275,64]{2,1,0} parameter(0)
%multiply.5810 = f32[4,275,64]{2,1,0} copy(lhs), sharding={devices=[4,1,2]<=[8]}
%rhs = f32[4,275,64]{2,1,0} parameter(1)
%copy.25 = f32[4,275,64]{2,1,0} copy(rhs), sharding={devices=[4,1,2]<=[8]}
ROOT %convolution.6144 = f32[5,1,64]{2,1,0} convolution(multiply.5810, copy.25), window={size=275 pad=2_2},
dim_labels=f0b_i0o->0bf, batch_group_count=64,
operand_precision={HIGH,HIGH}, sharding={devices=[1,1,4,2]<=[2,4]T(1,0) last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Shape("f32[4,275,16]"));
const auto rhs = AllOf(op::Shape("f32[4,275,16]"));
EXPECT_THAT(root, AllOf(op::Convolution(lhs, rhs), op::Shape("f32[5,1,16]")));
}
TEST_P(SpmdPartitioningTest, PartitionConvWithFeatureGroupCount) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,1,1,2]0,1}
%rhs = f32[5,1,1,2048] parameter(1)
%rhs.copy = f32[5,1,1,2048] copy(%rhs),
sharding={devices=[1,1,1,2]0,1}
ROOT %conv = f32[16,801,1,2048] convolution(%lhs.copy, %rhs.copy),
dim_labels=b01f_01io->b01f,feature_group_count=1024,
window={size=5x1 pad=2_2x0_0},
sharding={devices=[1,1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,801,1,512]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[5,1,1,1024]"));
EXPECT_THAT(
root, AllOf(op::Convolution(lhs, rhs), op::Shape("f32[16,801,1,1024]")));
}
TEST_P(SpmdPartitioningTest, PartitionConvWithFeatureGroupCount2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[64,3,1,3072] parameter(0)
%lhs.copy = f32[64,3,1,3072] copy(%lhs),
sharding={devices=[1,1,1,4,8]0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23,24,25
,26,27,28,29,30,31,8,9,10,11,12,13,14,15 last_tile_dim_replicate}
%rhs = f32[3,1,1,3072] parameter(1)
%rhs.copy = f32[3,1,1,3072] copy(%rhs),
sharding={devices=[1,1,1,4,8]0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23,24,25
,26,27,28,29,30,31,8,9,10,11,12,13,14,15 last_tile_dim_replicate}
ROOT %conv = f32[64,1,1,3072] convolution(%lhs.copy, %rhs.copy),
dim_labels=b01f_01io->b01f,feature_group_count=3072,
window={size=3x1},
sharding={devices=[8,1,1,4]0,16,24,8,2,18,26,10,4,20,28,12,6,22,30,14,7,23,
31,15,5,21,29,13,3,19,27,11,1,17,25,9}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 32));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::DynamicSlice(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(),
op::Constant(), op::Constant(),
op::Reshape())),
op::Reshape(), op::Constant(), op::Constant(), op::Constant()),
op::Shape("f32[8,3,1,768]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[3,1,1,768]"));
EXPECT_THAT(root,
AllOf(op::Convolution(lhs, rhs), op::Shape("f32[8,1,1,768]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvWithFeatureGroupCountAlignWithLHSPartial) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,275,16]{2,1,0} parameter(0)
%multiply.5810 = f32[4,275,16]{2,1,0} copy(lhs), sharding={devices=[1,1,4,2]<=[8] last_tile_dim_replicate}
%rhs = f32[1,275,16]{2,1,0} parameter(1)
%copy.25 = f32[1,275,16]{2,1,0} copy(rhs), sharding={devices=[1,1,2,4]<=[8] last_tile_dim_replicate}
ROOT %convolution.6144 = f32[5,4,16]{2,1,0} convolution(multiply.5810, copy.25), window={size=275 pad=2_2},
dim_labels=b0f_i0o->0bf, feature_group_count=16,
operand_precision={HIGH,HIGH}, sharding={devices=[1,1,2,4]<=[2,4]T(1,0) last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Shape("f32[4,275,4]"));
const auto rhs = AllOf(op::Shape("f32[1,275,4]"));
auto conv = AllOf(op::Convolution(lhs, rhs), op::Shape("f32[5,4,4]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::DynamicUpdateSlice(
_, op::CollectivePermute(conv), _, _, _)),
op::Shape("f32[5,4,8]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvWithFeatureGroupCountAlignWithRHSPartial) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,275,16]{2,1,0} parameter(0)
%multiply.5810 = f32[4,275,16]{2,1,0} copy(lhs), sharding={devices=[1,1,2,4]<=[8] last_tile_dim_replicate}
%rhs = f32[1,275,16]{2,1,0} parameter(1)
%copy.25 = f32[1,275,16]{2,1,0} copy(rhs), sharding={devices=[1,1,4,2]<=[8] last_tile_dim_replicate}
ROOT %convolution.6144 = f32[5,4,16]{2,1,0} convolution(multiply.5810, copy.25), window={size=275 pad=2_2},
dim_labels=b0f_i0o->0bf, feature_group_count=16,
operand_precision={HIGH,HIGH}, sharding={devices=[1,1,2,4]<=[2,4]T(1,0) last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Shape("f32[4,275,4]"));
const auto rhs = AllOf(op::Shape("f32[1,275,4]"));
auto conv = AllOf(op::Convolution(lhs, rhs), op::Shape("f32[5,4,4]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::DynamicUpdateSlice(
_, op::CollectivePermute(conv), _, _, _)),
op::Shape("f32[5,4,8]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvWithFeatureGroupCountAlignWithOutputPartial) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[4,275,16]{2,1,0} parameter(0)
%multiply.5810 = f32[4,275,16]{2,1,0} copy(lhs), sharding={devices=[1,1,2,4]<=[8] last_tile_dim_replicate}
%rhs = f32[1,275,16]{2,1,0} parameter(1)
%copy.25 = f32[1,275,16]{2,1,0} copy(rhs), sharding={devices=[1,1,2,4]<=[8] last_tile_dim_replicate}
ROOT %convolution.6144 = f32[5,4,16]{2,1,0} convolution(multiply.5810, copy.25), window={size=275 pad=2_2},
dim_labels=b0f_i0o->0bf, feature_group_count=16,
operand_precision={HIGH,HIGH}, sharding={devices=[1,1,4,2]<=[2,4]T(1,0) last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(op::Shape("f32[4,275,4]"));
const auto rhs = AllOf(op::Shape("f32[1,275,4]"));
EXPECT_THAT(root, AllOf(op::Convolution(lhs, rhs), op::Shape("f32[5,4,4]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvWithFeatureGroupCountRHSAlignWithLHS) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,1,1,2]0,1}
%rhs = f32[5,1,1,1024] parameter(1)
%rhs.copy = f32[5,1,1,1024] copy(%rhs),
sharding={devices=[2,1,1,1]0,1}
ROOT %conv = f32[16,801,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=b01f_01io->b01f,feature_group_count=1024,
window={size=5x1 pad=2_2x0_0},
sharding={devices=[1,1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,801,1,512]"));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Reshape(), op::Constant(),
op::Constant(), op::Constant())),
op::Shape("f32[3,1,1,1024]"));
auto resharded_rhs = AllOf(
op::Slice(op::Reshape(op::Transpose(op::AllToAll(op::Reshape(rhs))))),
op::Shape("f32[5,1,1,512]"));
EXPECT_THAT(root, AllOf(op::Convolution(lhs, resharded_rhs),
op::Shape("f32[16,801,1,512]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvWithFeatureGroupCountLHSAlignWithRHS) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,2,1,1]0,1}
%rhs = f32[5,1,1,1024] parameter(1)
%rhs.copy = f32[5,1,1,1024] copy(%rhs),
sharding={devices=[1,1,1,2]0,1}
ROOT %conv = f32[16,801,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=b01f_01io->b01f,feature_group_count=1024,
window={size=5x1 pad=2_2x0_0},
sharding={devices=[1,1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[16,401,1,1024]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[5,1,1,512]"));
auto resharded_lhs = AllOf(
op::Slice(op::Reshape(op::Transpose(op::AllToAll(op::Reshape(lhs))))),
op::Shape("f32[16,801,1,512]"));
EXPECT_THAT(root, AllOf(op::Convolution(resharded_lhs, rhs),
op::Shape("f32[16,801,1,512]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvWithFeatureGroupCountAlignOuputWithLHS) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,1,1,2]0,1}
%rhs = f32[5,1,1,1024] parameter(1)
%rhs.copy = f32[5,1,1,1024] copy(%rhs),
sharding={devices=[1,1,1,2]0,1}
ROOT %conv = f32[16,801,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=b01f_01io->b01f,feature_group_count=1024,
window={size=5x1 pad=2_2x0_0},
sharding={devices=[2,1,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,801,1,512]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[5,1,1,512]"));
auto conv = AllOf(op::Convolution(lhs, rhs), op::Shape("f32[16,801,1,512]"));
EXPECT_THAT(root,
AllOf(op::Reshape(op::Transpose(op::AllToAll(op::Reshape(conv)))),
op::Shape("f32[8,801,1,1024]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvGroupOnFeatureGroupCount_RHSPartialReplicate) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,2,1,2]<=[4]}
%rhs = f32[5,1,1,1024] parameter(1)
%rhs.copy = f32[5,1,1,1024] copy(%rhs),
sharding={devices=[1,1,1,2,2]0,2,1,3 last_tile_dim_replicate}
ROOT %conv = f32[16,801,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=b01f_01io->b01f,feature_group_count=1024,
window={size=5x1 pad=2_2x0_0},
sharding={devices=[1,2,1,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,401,1,512]"));
auto left_halo = AllOf(op::Shape("f32[16,2, 1, 512]"),
op::CollectivePermute(op::Slice(lhs)));
auto right_halo = AllOf(op::Shape("f32[16,2, 1, 512]"),
op::CollectivePermute(op::Slice(lhs)));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[5,1,1,512]"));
EXPECT_THAT(
root,
AllOf(op::Convolution(
op::Select(_, op::Concatenate(left_halo, lhs, right_halo), _),
rhs),
op::Shape("f32[16, 401, 1, 512]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvGroupOnFeatureGroupCount_RHSAlignWithOutput) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,2,1,2]<=[4]}
%rhs = f32[5,1,1,1024] parameter(1), sharding={replicated}
ROOT %conv = f32[16,801,1,1024] convolution(%lhs.copy, %rhs),
dim_labels=b01f_01io->b01f,feature_group_count=1024,
window={size=5x1 pad=2_2x0_0},
sharding={devices=[1,2,1,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,401,1,512]"));
auto left_halo = AllOf(op::Shape("f32[16,2, 1, 512]"),
op::CollectivePermute(op::Slice(lhs)));
auto right_halo = AllOf(op::Shape("f32[16,2, 1, 512]"),
op::CollectivePermute(op::Slice(lhs)));
const auto rhs =
AllOf(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape()),
op::Shape("f32[5,1,1,512]"));
EXPECT_THAT(
root,
AllOf(op::Convolution(
op::Select(_, op::Concatenate(left_halo, lhs, right_halo), _),
rhs),
op::Shape("f32[16, 401, 1, 512]")));
}
TEST_P(SpmdPartitioningTest,
PartitionConvGroupOnFeatureGroupCount_LHSAlignWithOutput) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[2,1,1,1,2]<=[4] last_tile_dim_replicate}
%rhs = f32[5,1,1,1024] parameter(1)
%rhs.copy = f32[5,1,1,1024] copy(%rhs),
sharding={devices=[1,1,1,2,2]0,2,1,3 last_tile_dim_replicate}
ROOT %conv = f32[16,801,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=b01f_01io->b01f,feature_group_count=1024,
window={size=5x1 pad=2_2x0_0},
sharding={devices=[1,2,1,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Reshape(), op::Constant(),
op::Constant(), op::Constant())),
op::Shape("f32[8,801,1,1024]"));
auto resharded_lhs =
AllOf(op::Reshape(op::Transpose(op::AllToAll(op::Reshape(
op::Pad(op::DynamicSlice(lhs, op::Subtract(), op::Subtract(),
op::Subtract(), op::Subtract()),
op::Constant()))))),
op::Shape("f32[16,401,1,512]"));
auto left_halo = AllOf(op::Shape("f32[16,2, 1, 512]"),
op::CollectivePermute(op::Slice(resharded_lhs)));
auto right_halo = AllOf(op::Shape("f32[16,2, 1, 512]"),
op::CollectivePermute(op::Slice(resharded_lhs)));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[5,1,1,512]"));
EXPECT_THAT(
root,
AllOf(
op::Convolution(
op::Select(
_, op::Concatenate(left_halo, resharded_lhs, right_halo), _),
rhs),
op::Shape("f32[16, 401, 1, 512]")));
}
TEST_P(SpmdPartitioningTest, PartitionConvGroupOnBatchGroupCount) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,2,1,2]<=[4]}
%rhs = f32[16,801,1,1024] parameter(1)
%rhs.copy = f32[16,801,1,1024] copy(%rhs),
sharding={devices=[1,2,1,2]<=[4]}
ROOT %conv = f32[5,1,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=f01b_i01o->01bf,batch_group_count=1024,
window={size=801x1 pad=2_2x0_0},
sharding={devices=[1,1,1,2,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Select(_,
op::Copy(op::DynamicSlice(
op::Pad(op::Parameter(), op::Constant()), op::Constant(),
op::Reshape(), op::Constant(), op::Reshape())),
_),
op::Shape("f32[16,401,1,512]"));
auto left_halo = AllOf(op::Shape("f32[16,2, 1, 512]"),
op::CollectivePermute(op::Slice(lhs)));
auto right_halo = AllOf(op::Shape("f32[16,2, 1, 512]"),
op::CollectivePermute(op::Slice(lhs)));
const auto rhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,401,1,512]"));
auto conv = AllOf(op::Convolution(op::Concatenate(left_halo, lhs, right_halo),
op::Select(_, rhs, _)),
op::Shape("f32[5,1,1,512]"));
EXPECT_THAT(root, AllOf(op::CollectivePermute(op::AllReduce(conv)),
op::Shape("f32[5,1,1,512]")));
}
TEST_P(SpmdPartitioningTest, PartitionConvWithBatchGroupCountReplicatedLHSRHS) {
absl::string_view hlo_string = R"(
HloModule test, entry_computation_layout={(f32[8,28,1,64]{3,2,1,0}, f32[8,28,1,2]{3,2,1,0})->f32[3,1,32,2]{3,2,1,0}}, allow_spmd_sharding_propagation_to_output={true}
ENTRY main.4 {
lhs = f32[8,28,1,64]{3,2,1,0} parameter(0), sharding={replicated}
rhs = f32[8,28,1,2]{3,2,1,0} parameter(1), sharding={replicated}
ROOT convolution.3 = f32[3,1,32,2]{3,2,1,0} convolution(lhs, rhs), window={size=28x1 pad=1_1x0_0}, dim_labels=f01b_i01o->01bf, batch_group_count=2, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
}
TEST_P(SpmdPartitioningTest,
PartitionConvWithFeatureGroupCountAlignOuputWithRHS) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,2,1,1]0,1}
%rhs = f32[5,1,1,1024] parameter(1)
%rhs.copy = f32[5,1,1,1024] copy(%rhs),
sharding={devices=[1,1,1,2]0,1}
ROOT %conv = f32[16,801,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=b01f_01io->b01f,feature_group_count=1024,
window={size=5x1 pad=2_2x0_0},
sharding={devices=[2,1,1,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs =
AllOf(op::Copy(op::DynamicSlice(op::Pad(op::Parameter(), op::Constant()),
op::Constant(), op::Reshape(),
op::Constant(), op::Constant())),
op::Shape("f32[16,401,1,1024]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[5,1,1,512]"));
auto resharded_lhs = AllOf(
op::Slice(op::Reshape(op::Transpose(op::AllToAll(op::Reshape(lhs))))),
op::Shape("f32[16,801,1,512]"));
auto conv = AllOf(op::Convolution(resharded_lhs, rhs),
op::Shape("f32[16,801,1,512]"));
EXPECT_THAT(root,
AllOf(op::Reshape(op::Transpose(op::AllToAll(op::Reshape(conv)))),
op::Shape("f32[8,801,1,1024]")));
}
TEST_P(SpmdPartitioningTest, PartitionConvWithFeatureGroupCountBackProp) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[16,801,1,1024] parameter(0)
%lhs.copy = f32[16,801,1,1024] copy(%lhs),
sharding={devices=[1,1,1,2]0,1}
%rhs = f32[5,1,1024,1] parameter(1)
%rhs.copy = f32[5,1,1024,1] copy(%rhs),
sharding={devices=[1,1,2,1]0,1}
ROOT %conv = f32[16,801,1,1024] convolution(%lhs.copy, %rhs.copy),
dim_labels=b01f_01oi->b01f,feature_group_count=1024,
window={size=5x1 pad=2_2x0_0 rhs_reversal=1x1},
sharding={devices=[1,1,1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[16,801,1,512]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("f32[5,1,512,1]"));
EXPECT_THAT(root,
AllOf(op::Convolution(lhs, rhs), op::Shape("f32[16,801,1,512]")));
}
TEST_P(SpmdPartitioningTest, NoReshardOnBroadcastDims) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[2,3] parameter(0)
%param1 = f32[2,3,20] parameter(1)
%br0 = f32[20,2,20,3,20] broadcast(%param0), dimensions={1,3}, sharding={devices=[2,1,2,1,2]<=[8]}
%br1 = f32[20,2,20,3,20] broadcast(%param1), dimensions={1,3,4}, sharding={devices=[2,1,2,1,2]<=[8]}
%add = f32[20,2,20,3,20] add(%br0, %br1), sharding={devices=[2,1,2,1,2]<=[8]}
%reshape = f32[10,4,10,6,20] reshape(%br0), sharding={devices=[2,1,2,1,2]<=[8]}
%transpose = f32[2,3,20,20,20] transpose(%br0), dimensions={1,3,0,2,4}, sharding={devices=[1,1,2,2,2]<=[8]}
%copy_add0 = f32[20,2,20,3,20] copy(%add), sharding={devices=[2,1,2,1,2]6,7,2,3,4,5,0,1}
%copy_add1 = f32[20,2,20,3,20] copy(%add), sharding={devices=[2,1,2,1,2]7,6,3,2,5,4,0,1}
%copy_reshape = f32[10,4,10,6,20] copy(%reshape), sharding={devices=[2,1,2,1,2]7,6,3,2,5,4,0,1}
%copy_transpose = f32[2,3,20,20,20] copy(%transpose), sharding={devices=[1,1,2,2,2]7,6,3,2,5,4,0,1}
ROOT %tuple = (f32[20,2,20,3,20], f32[20,2,20,3,20], f32[10,4,10,6,20], f32[2,3,20,20,20])
tuple(%copy_add0, %copy_add1, %copy_reshape, %copy_transpose),
sharding={{devices=[2,1,2,1,2]6,7,2,3,4,5,0,1},{devices=[2,1,2,1,2]7,6,3,2,5,4,0,1},{devices=[2,1,2,1,2]7,6,3,2,5,4,0,1},{devices=[1,1,2,2,2]7,6,3,2,5,4,0,1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto copy_add0 =
op::Copy(op::Copy(op::Add(op::Broadcast(_), op::Broadcast(_))));
auto copy_add1 = op::Copy(
op::CollectivePermute(op::Add(op::Broadcast(_), op::Broadcast(_))));
auto copy_reshape = op::Copy(op::Copy(op::Reshape(op::Broadcast(_))));
auto copy_transpose = op::Copy(op::Copy(op::Transpose(op::Broadcast(_))));
EXPECT_THAT(root,
op::Tuple(copy_add0, copy_add1, copy_reshape, copy_transpose));
}
TEST_P(SpmdPartitioningTest,
ConvolutionFilterIFOFPartitionedInputPartialReplicate) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,112,112,12] parameter(0)
%lhs.copy = f32[128,112,112,12] copy(f32[128,112,112,12] %lhs),
sharding={devices=[1,1,1,2,2]<=[4] last_tile_dim_replicate}
%rhs = f32[7,7,12,64] parameter(1)
%rhs.copy = f32[7,7,12,64] copy(f32[7,7,12,64] %rhs),
sharding={devices=[1,1,2,2]<=[4]}
ROOT %conv = f32[128,56,56,64] convolution(
f32[128,112,112,12] %lhs.copy,
f32[7,7,12,64] %rhs.copy),
window={size=7x7 stride=2x2 pad=3_3x3_3},
dim_labels=b01f_01io->b01f,
sharding={devices=[1,1,1,2,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[128,112,112,6]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Reshape(), op::Reshape())),
op::Shape("f32[7,7,6,32]"));
EXPECT_THAT(
root,
AllOf(op::CollectivePermute(op::AllReduce(op::Convolution(lhs, rhs))),
op::Shape("f32[128,56,56,32]")));
}
TEST_P(SpmdPartitioningTest,
ConvolutionInputKernelNonContractingDimPartialReplicate) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,56,56,256] parameter(0)
%lhs.copy = f32[128,56,56,256] copy(%lhs),
sharding={devices=[1,1,1,2,2]<=[4] last_tile_dim_replicate}
%rhs = f32[128,28,28,512] parameter(1)
%rhs.copy = f32[128,28,28,512] copy(%rhs),
sharding={devices=[1,1,1,2,2]<=[4] last_tile_dim_replicate}
ROOT %conv = f32[1,1,256,512] convolution(%lhs.copy, %rhs.copy),
window={size=28x28 pad=0_-1x0_-1 rhs_dilate=2x2}, dim_labels=f01b_i01o->01bf,
sharding={devices=[1,1,2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[128,56,56,128]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Constant(), op::Reshape())),
op::Shape("f32[128,28,28,256]"));
EXPECT_THAT(root, AllOf(op::Convolution(lhs, op::CollectivePermute(rhs)),
op::Shape("f32[1,1,128,256]")));
}
TEST_P(SpmdPartitioningTest,
ConvolutionInputSpatialDimAndFeatureDimParttiioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[8,210,210,12] parameter(0)
%lhs.copy = f32[8,210,210,12] copy(f32[8,210,210,12] %lhs),
sharding={devices=[1,2,1,2]<=[4]}
%rhs = f32[3,3,12,32] parameter(1)
%rhs.copy = f32[3,3,12,32] copy(f32[3,3,12,32] %rhs),
sharding={devices=[1,1,2,1,2]<=[4] last_tile_dim_replicate}
ROOT %conv = f32[8,210,210,32] convolution(
f32[8,210,210,12] %lhs.copy,
f32[3,3,12,32] %rhs.copy),
window={size=3x3 pad=1_1x1_1},
dim_labels=b01f_01io->b01f,
sharding={devices=[1,2,1,1,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
const auto lhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Reshape(),
op::Constant(), op::Reshape())),
op::Shape("f32[8,105,210,6]"));
auto left_halo =
AllOf(op::CollectivePermute(op::Slice(lhs)), op::Shape("f32[8,1,210,6]"));
auto right_halo =
AllOf(op::CollectivePermute(op::Slice(lhs)), op::Shape("f32[8,1,210,6]"));
auto exchanged_lhs = AllOf(
op::Select(op::And(_, _), op::Concatenate(left_halo, lhs, right_halo),
op::Broadcast(_)),
op::Shape("f32[8,107,210,6]"));
const auto rhs = AllOf(
op::Copy(op::DynamicSlice(op::Parameter(), op::Constant(), op::Constant(),
op::Reshape(), op::Constant())),
op::Shape("f32[3,3,6,32]"));
EXPECT_THAT(root, AllOf(op::AllReduce(op::Convolution(
exchanged_lhs, op::CollectivePermute(rhs))),
op::Shape("f32[8,105,210,32]")));
}
TEST_P(SpmdPartitioningTest, Fft3D) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = c64[1,1,6]
constant({{{(0,0),(1,1),(2,2),(3,3),(4,4),(5,5)}}}),
sharding={devices=[1,1,2]0,1}
ROOT fft = c64[1,1,6] fft(c64[1,1,6] constant), fft_type=FFT, fft_length={6},
sharding={devices=[1,1,2]0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto input = AllOf(op::DynamicSlice(op::Constant(), op::Constant(),
op::Constant(), op::Reshape()),
op::Shape("c64[1,1,3]"));
auto padded_input =
AllOf(op::DynamicSlice(
op::Concatenate(input, op::CollectivePermute(op::Slice())),
op::Constant(), op::Constant(), op::Reshape()),
op::Shape("c64[1,1,4]"));
auto shuffled_input =
AllOf(op::Slice(op::AllToAll(op::Dot(padded_input, op::Convert()))),
op::Shape("c64[1,1,3]"));
auto local_fft = AllOf(op::Fft(shuffled_input), op::Shape("c64[1,1,3]"));
EXPECT_THAT(root, AllOf(op::GetTupleElement(op::While(op::Tuple(
_, op::Multiply(local_fft, op::Exp()), _, _, _))),
op::Shape("c64[1,1,3]")));
}
TEST_P(SpmdPartitioningTest, DotInputsAreIdentical) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%parameter.1 = f32[4000,4000]{1,0} parameter(0), sharding={devices=[2,4]<=[8]}
ROOT %convolution = f32[4000,4000]{1,0} convolution(
f32[4000,4000]{1,0} %parameter.1, f32[4000,4000]{1,0} %parameter.1),
dim_labels=bf_io->bf, sharding={devices=[2,4]<=[8]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param = AllOf(op::Parameter(), op::Shape("f32[2000, 1000]"));
auto resharded_lhs =
AllOf(op::AllReduce(op::DynamicUpdateSlice(_, param, _, _)),
op::Shape("f32[2000, 4000]"));
auto resharded_rhs =
AllOf(op::AllReduce(op::DynamicUpdateSlice(_, op::Copy(param), _, _)),
op::Shape("f32[4000, 1000]"));
EXPECT_THAT(root, AllOf(op::Convolution(resharded_lhs, resharded_rhs),
op::Shape("f32[2000, 1000]")));
}
TEST_P(SpmdPartitioningTest, ConstantSliceReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%constant.785 = f32[1,8] constant({{0,1,2,3,4,5,6,7}}),
sharding={devices=[1,8]<=[8]}
%slice.62 = f32[1,1] slice(%constant.785), slice={[0:1], [0:1]},
sharding={devices=[1,8]<=[8]}
ROOT %reshape.779 = f32[] reshape(%slice.62), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto slice = AllOf(op::Shape("f32[1,1]"),
op::Copy(op::DynamicSlice(op::Constant(), _, _)));
EXPECT_THAT(root, op::Reshape(op::AllReduce(op::Select(_, slice, _))));
}
TEST_P(SpmdPartitioningTest, GatherParallelDimRedistributionOperand) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[4,2,1,1]<=[8]}
%constant = s32[4] constant({0, 1, 2, 3}), sharding={replicated}
%iota = s32[1,8,4]{2,1,0} broadcast(%constant), dimensions={2},
sharding={devices=[1,8,1]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::Reshape());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[1,4,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, GatherParallelDimRedistributionIndices) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,2]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,4,2]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,4,2]<=[8]}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[2,2,2,2]"), op::Reshape());
auto indices = AllOf(op::Shape("s32[2,2,2]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[2,2,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(root, op::AllReduce(op::AllReduce(
op::DynamicUpdateSlice(_, gather, _, _, _, _))));
}
TEST_P(SpmdPartitioningTest, GatherParallelDimReplicatedIndices) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={replicated}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={replicated}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={replicated}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[1,4,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, GatherParallelDimReplicatedOperand) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={replicated}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[1,4,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, GatherParallelDimPartialReplicatedIndices) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,2,1,4]<=[8] last_tile_dim_replicate}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,2,1,4]<=[8] last_tile_dim_replicate}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,2,1,4]<=[8] last_tile_dim_replicate}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[1,4,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, GatherParallelDimPartialReplicatedOperand) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={
devices=[2,1,1,1,4]<=[8] last_tile_dim_replicate}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[1,4,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, GatherParallelDimSwappedDimensions) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={
devices=[4,2,1,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,2,4]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,2,4]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,2,4]<=[8]}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[4,1,2,2]"), op::CollectivePermute());
auto indices = AllOf(op::Shape("s32[2,4,1]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[4,1,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(root, op::AllReduce(op::AllReduce(
op::DynamicUpdateSlice(_, gather, _, _, _, _))));
}
TEST_P(SpmdPartitioningTest, GatherParallelDimFromOutsideWhilePositive) {
absl::string_view hlo_string = R"(
HloModule module
cond {
%parameters = (s32[8,4,2,2], s32[1,8,4], s32[]) parameter(0),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}}
%counter = s32[] get-tuple-element(parameters), index=2, sharding={replicated}
%constant = s32[] constant(3), sharding={replicated}
ROOT %lt = pred[] compare(counter, constant), direction=LT,
sharding={replicated}
}
body {
%parameters = (s32[8,4,2,2], s32[1,8,4], s32[]) parameter(0),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}}
%parameter.0 = s32[8,4,2,2]{3,2,1,0} get-tuple-element(parameters), index=0,
sharding={replicated}
%iota = s32[1,8,4]{2,1,0} get-tuple-element(parameters), index=1,
sharding={devices=[1,8,1]<=[8]}
%counter = s32[] get-tuple-element(parameters), index=2, sharding={replicated}
%constant = s32[] constant(1), sharding={replicated}
%updated_counter = s32[] add(counter, constant), sharding={replicated}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
%gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
ROOT %tuple = (s32[8,4,2,2], s32[1,8,4], s32[])
tuple(gather.20, iota, updated_counter),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}}
}
ENTRY entry {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={replicated}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%counter = s32[] constant(0), sharding={replicated}
%tuple = (s32[8,4,2,2], s32[1,8,4], s32[]) tuple(parameter.0, iota, counter),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}}
ROOT while = (s32[8,4,2,2], s32[1,8,4], s32[]) while(tuple), body=body,
condition=cond,
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()
->root_instruction()
->while_body()
->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[1,4,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(
root,
op::Tuple(op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _)), _,
_));
}
TEST_P(SpmdPartitioningTest, GatherParallelDimFromOutsideWhileNegative) {
absl::string_view hlo_string = R"(
HloModule module
cond {
%parameters = (s32[8,4,2,2], s32[1,8,4], s32[]) parameter(0),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}}
%counter = s32[] get-tuple-element(parameters), index=2, sharding={replicated}
%constant = s32[] constant(3), sharding={replicated}
ROOT %lt = pred[] compare(counter, constant), direction=LT,
sharding={replicated}
}
body {
%parameters = (s32[8,4,2,2], s32[1,8,4], s32[]) parameter(0),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}}
%parameter.0 = s32[8,4,2,2]{3,2,1,0} get-tuple-element(parameters), index=0,
sharding={replicated}
%iota = s32[1,8,4]{2,1,0} get-tuple-element(parameters), index=1,
sharding={devices=[1,8,1]<=[8]}
%counter = s32[] get-tuple-element(parameters), index=2, sharding={replicated}
%constant = s32[] constant(1), sharding={replicated}
%updated_counter = s32[] add(counter, constant), sharding={replicated}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
%gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
%iota.2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
ROOT %tuple = (s32[8,4,2,2], s32[1,8,4], s32[])
tuple(gather.20, iota.2, updated_counter),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}}
}
ENTRY entry {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={replicated}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%counter = s32[] constant(0), sharding={replicated}
%tuple = (s32[8,4,2,2], s32[1,8,4], s32[]) tuple(parameter.0, iota, counter),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}}
ROOT while = (s32[8,4,2,2], s32[1,8,4], s32[]) while(tuple), body=body,
condition=cond,
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()
->root_instruction()
->while_body()
->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[8,4,2,2]"), op::GetTupleElement());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Concatenate());
auto gather = AllOf(op::Shape("s32[1,4,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(
root,
op::Tuple(op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _)), _,
_));
}
TEST_P(SpmdPartitioningTest, ScatterRepsOnLastTileDimDontDivideGroups) {
absl::string_view hlo_string = R"(
HloModule module
region.1 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT res.1 = f32[] add(lhs, rhs)
}
ENTRY entry {
%add.1 = f32[8,96,2048,16]{3,2,1,0} parameter(0)
%concatenate.1 = s32[8,96,2048,2,4]{4,3,2,1,0} parameter(1)
%broadcast.1 = f32[8,96,2048,2]{3,2,1,0} parameter(2)
%add.1.shard = f32[8,96,2048,16]{3,2,1,0} copy(%add.1), sharding={devices=[8,8,1,1,24]<=[8,8,24]T(1,0,2) last_tile_dim_replicate}
%concatenate.1.shard = s32[8,96,2048,2,4]{4,3,2,1,0} copy(%concatenate.1), sharding={devices=[8,8,1,1,1,24]<=[8,8,24]T(1,0,2) last_tile_dim_replicate}
%broadcast.1.shard = f32[8,96,2048,2]{3,2,1,0} copy(%broadcast.1), sharding={devices=[8,8,1,1,24]<=[8,8,24]T(1,0,2) last_tile_dim_replicate}
ROOT %scatter.44 = f32[8,96,2048,16]{3,2,1,0} scatter(
%add.1.shard,
%concatenate.1.shard,
%broadcast.1.shard),
update_window_dims={},
inserted_window_dims={0,1,2,3},
scatter_dims_to_operand_dims={0,1,2,3},
index_vector_dim=4,
to_apply=region.1,
sharding={devices=[8,8,1,1,24]<=[8,8,24]T(1,0,2) last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module, PartitionComputation(hlo_string, 1536));
VLOG(1) << module->ToString();
{
const auto partitioned_scatter =
module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("f32[1,12,2048,16]"));
auto indices = AllOf(op::Shape("s32[8,96,2048,2,4]"));
auto update = AllOf(op::Shape("f32[8,96,2048,2]"));
auto scatter = AllOf(op::Shape("f32[1,12,2048,16]"),
op::Scatter(operand, indices, update));
EXPECT_THAT(partitioned_scatter, scatter);
}
}
TEST_P(SpmdPartitioningTest, ParallelDimFromOutsideConditionalPositive) {
absl::string_view hlo_string = R"(
HloModule module
gather_comp {
%parameters = (s32[8,4,2,2], s32[1,8,4]) parameter(0),
sharding={{replicated}, {devices=[1,8,1]<=[8]}}
%parameter.0 = s32[8,4,2,2]{3,2,1,0} get-tuple-element(parameters), index=0,
sharding={replicated}
%iota = s32[1,8,4]{2,1,0} get-tuple-element(parameters), index=1,
sharding={devices=[1,8,1]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
%gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather.20), sharding={replicated}
}
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
scatter_comp {
%parameters = (s32[8,4,2,2], s32[1,8,4]) parameter(0),
sharding={{replicated}, {devices=[1,8,1]<=[8]}}
%parameter.0 = s32[8,4,2,2]{3,2,1,0} get-tuple-element(parameters), index=0,
sharding={replicated}
%iota = s32[1,8,4]{2,1,0} get-tuple-element(parameters), index=1,
sharding={devices=[1,8,1]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
%constant = s32[] constant(0)
%base = s32[8,4,2,2]{3,2,1,0} broadcast(constant), dimensions={},
sharding={replicated}
%scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %base,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.0),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter.20), sharding={replicated}
}
ENTRY entry {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={replicated}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%counter = s32[] constant(0), sharding={replicated}
%tuple = (s32[8,4,2,2], s32[1,8,4]) tuple(parameter.0, iota),
sharding={{replicated}, {devices=[1,8,1]<=[8]}}
%parameter.1 = pred[] parameter(1)
ROOT conditional = s32[8,4,2,2] conditional(parameter.1, tuple, tuple),
true_computation=gather_comp, false_computation=scatter_comp,
sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
{
const auto partitioned_gather = module->entry_computation()
->root_instruction()
->true_computation()
->root_instruction();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto gather =
AllOf(op::Shape("s32[1,4,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(
partitioned_gather,
op::Copy(op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _))));
}
{
const auto partitioned_scatter = module->entry_computation()
->root_instruction()
->false_computation()
->root_instruction();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[1,4,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(partitioned_scatter,
op::Copy(op::AllReduce(
op::DynamicUpdateSlice(_, scatter, _, _, _, _))));
}
}
TEST_P(SpmdPartitioningTest, GatherParallelDimAndNonParallelDimPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[2,2,1,1]<=[4]}
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1),
sharding={devices=[1,2,2]<=[4]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,2,2]<=[4]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %indices), dimensions={0},
sharding={devices=[1,2,2]<=[4]}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[4,4,2,2]"), op::AllReduce());
auto indices = AllOf(op::Shape("s32[2,4,2]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[4,2,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(
root, op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _)),
_, _, _, _)));
}
TEST_P(SpmdPartitioningTest, Gather_b303520921) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%convert.303 = bf16[1000,16]{1,0} parameter(0), sharding={devices=[4,1,2]<=[2,4]T(1,0) last_tile_dim_replicate}
%reshape.830 = s32[16,8,1]{2,1,0} parameter(1), sharding={devices=[2,1,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate}
ROOT %gather.831 = bf16[16,8,16]{2,1,0} gather(convert.303, reshape.830),
offset_dims={2}, collapsed_slice_dims={0}, start_index_map={0},
index_vector_dim=2, slice_sizes={1,16}, sharding={devices=[2,1,4]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("bf16[250,16]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[8,8,1]"), op::Subtract());
auto gather = AllOf(op::Shape("bf16[8,8,16]"), op::Gather(operand, indices));
const HloInstruction* gather_inst = FindInstruction(module.get(), "gather");
EXPECT_NE(gather_inst, nullptr);
EXPECT_THAT(gather_inst, gather);
}
TEST_P(SpmdPartitioningTest, GatherMergedIndexParallelAndOperandPassthrough) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[2,2,2,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[2,4,1,2]"), op::Reshape());
auto indices = AllOf(op::Shape("s32[2,2,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[2,4,1,2]"), op::Gather(operand, indices));
EXPECT_THAT(root, op::AllReduce(op::AllReduce(
op::DynamicUpdateSlice(_, gather, _, _, _, _))));
}
TEST_P(SpmdPartitioningTest, GatherMergedIndexParallelAndTrivialSlicedOperand) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[4,2,1,1]<=[8]}
%parameter.1 = s32[1,8,1]{2,1,0} parameter(1),
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%iota = s32[1,8,1]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%concatenate.19 = s32[2,8,1]{2,1,0} concatenate(
s32[1,8,1]{2,1,0} %parameter.1, s32[1,8,1]{2,1,0} %iota), dimensions={0},
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
ROOT %gather.20 = s32[8,1,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,1]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[2,2,2,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,2,1]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[2,1,2,2]"), op::Gather(operand, indices));
VLOG(1) << module->ToString();
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(op::Select(_, _, gather)), _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, GatherMergedIndexParallelAndIndexPassthrough) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[4,1,1,1,2]<=[8] last_tile_dim_replicate}
%parameter.1 = s32[1,8,4]{2,1,0} parameter(1),
sharding={devices=[1,4,2]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,2]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %parameter.1, s32[1,8,4]{2,1,0} %iota), dimensions={0},
sharding={devices=[1,4,2]<=[8]}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[2,4,2,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,2,2]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[2,2,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(
root, op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _)),
_, _, _, _)));
}
TEST_P(SpmdPartitioningTest,
GatherMergedOperandPassthroughAndTrivialSlicedOperand) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[2,2,2,1]<=[8]}
%parameter.1 = s32[2,8,4]{2,1,0} parameter(1),
sharding={replicated}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %parameter.1), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[4,2,1,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,8,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[8,4,1,2]"), op::Gather(operand, indices));
EXPECT_THAT(
root, op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(op::AllReduce(op::Select(_, _, gather))), _, _,
_, _)));
}
TEST_P(SpmdPartitioningTest,
GatherMergedOperandPassthroughAndIndexPassthrough) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[1,1,2,1,4]<=[8] last_tile_dim_replicate}
%parameter.1 = s32[2,8,4]{2,1,0} parameter(1),
sharding={devices=[1,2,1,4]<=[8] last_tile_dim_replicate}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %parameter.1), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[8,4,1,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,4,4]"), op::CollectivePermute());
auto gather = AllOf(op::Shape("s32[4,4,1,2]"), op::Gather(operand, indices));
EXPECT_THAT(
root, op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _)),
_, _, _, _)));
}
TEST_P(SpmdPartitioningTest,
GatherMergedOperandPassthroughAndIndexPassthrough_PartialGrouping) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[2,2,2,1]<=[8]}
%parameter.1 = s32[2,8,4]{2,1,0} parameter(1),
sharding={devices=[1,2,2,2]<=[8] last_tile_dim_replicate}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %parameter.1), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[8,4,1,2]"), op::AllReduce());
auto indices = AllOf(op::Shape("s32[2,4,2]"), op::Parameter());
auto gather = AllOf(op::Shape("s32[4,2,1,2]"), op::Gather(operand, indices));
EXPECT_THAT(
root, op::AllReduce(op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(op::DynamicUpdateSlice(_, gather, _, _, _, _)),
_, _, _, _))));
}
TEST_P(SpmdPartitioningTest,
GatherMergedTrivialSlicedOperandAndIndexPassthrough) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[2,2,1,1,2]<=[8] last_tile_dim_replicate}
%parameter.1 = s32[2,8,4]{2,1,0} parameter(1),
sharding={devices=[1,2,1,4]<=[8] last_tile_dim_replicate}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %parameter.1), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[4,2,2,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,4,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[4,4,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(
root, op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(op::AllReduce(op::Select(_, _, gather))), _, _,
_, _)));
}
TEST_P(SpmdPartitioningTest,
GatherMergedTrivialSlicedOperandAndIndexPassthrough_PartialGrouping) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[2,2,1,1,2]<=[8] last_tile_dim_replicate}
%parameter.1 = s32[2,8,4]{2,1,0} parameter(1),
sharding={devices=[1,2,2,2]<=[8] last_tile_dim_replicate}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %parameter.1), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[8,2,2,2]"), op::AllReduce());
auto indices = AllOf(op::Shape("s32[2,4,2]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[4,2,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(root,
op::AllReduce(op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(op::Select(_, _, gather)), _, _, _, _))));
}
TEST_P(SpmdPartitioningTest, GatherTrivialSlicedOperandPartial) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY main.4 {
%arg.0 = s64[8,2]{1,0} parameter(0), sharding={devices=[4,2]<=[8]}
%arg.1 = s32[2]{0} parameter(1), sharding={replicated}
ROOT gather = s64[2,1]{1,0} gather(arg.0, arg.1), offset_dims={0,1},
collapsed_slice_dims={}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={2,1}, indices_are_sorted=true, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s64[8,1]"), op::AllReduce());
auto indices = AllOf(op::Shape("s32[2]"), op::Subtract());
auto gather = AllOf(op::Shape("s64[2,1]"), op::Gather(operand, indices));
EXPECT_THAT(root, op::AllReduce(op::Select(_, _, gather)));
}
TEST_P(SpmdPartitioningTest, GatherParallelIndexAndOperand) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[4,1,2,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={devices=[4,1,2,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[2,4,1,2]"), op::Parameter(0));
auto indices = AllOf(op::Shape("s32[2,2,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[2,4,1,2]"), op::Gather(operand, indices));
EXPECT_THAT(root, gather);
}
TEST_P(SpmdPartitioningTest, GatherReshardParallelIndexAndOperand) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[4,1,2,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={devices=[4,1,2,1]1,0,3,2,4,5,6,7}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[2,4,1,2]"), op::Parameter(0));
auto indices = AllOf(op::Shape("s32[2,2,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[2,4,1,2]"), op::Gather(operand, indices));
EXPECT_THAT(root, op::CollectivePermute(gather));
}
TEST_P(SpmdPartitioningTest, GatherParallelIndexAndOperandReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[4,1,1,1,2]<=[8] last_tile_dim_replicate}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
ROOT %gather.20 = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0,
slice_sizes={1,1,2,2}, sharding={devices=[4,1,2,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[2,4,2,2]"), op::Parameter(0));
auto indices = AllOf(op::Shape("s32[2,2,4]"), op::Subtract());
auto gather = AllOf(op::Shape("s32[2,4,2,2]"), op::Gather(operand, indices));
EXPECT_THAT(root, op::DynamicSlice(gather, _, _, _, _));
}
TEST_P(SpmdPartitioningTest,
GatherPartitionedOnTrivialSliceDimsForceTrivialSlice) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[8,16] parameter(0), sharding={devices=[8,4]<=[4,8]T(1,0)}
%indices = s32[4,16,1] parameter(1), sharding={devices=[4,1,1,8]<=[32] last_tile_dim_replicate}
ROOT %gather = f32[4,16,16] gather(%input, %indices), offset_dims={2},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2,
slice_sizes={1,16}, sharding={devices=[4,1,1,8]<=[32] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module, PartitionComputation(
hlo_string, 32, true, false, false,
false, -1, PartitioningMethod::kTrivialSlicedOperand));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::AllReduce(op::Select(_, _, op::Gather(_, _))));
EXPECT_THAT(root->operand(0)->operand(2)->operand(1),
op::Subtract(op::Clamp(_, op::Parameter(1), _), _));
auto clamp = FindInstruction(module.get(), HloOpcode::kClamp);
EXPECT_THAT(clamp->operand(1), op::Parameter(1));
auto dynamic_slice = FindInstruction(module.get(), HloOpcode::kDynamicSlice);
EXPECT_THAT(dynamic_slice->operand(1), op::PartitionId());
auto collective_permute =
FindInstruction(module.get(), HloOpcode::kCollectivePermute);
EXPECT_THAT(collective_permute, nullptr);
}
TEST_P(SpmdPartitioningTest,
GatherPartitionedOnTrivialSliceDimsForceIndexParallel) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[8,16] parameter(0), sharding={devices=[8,4]<=[4,8]T(1,0)}
%indices = s32[4,16,1] parameter(1), sharding={devices=[4,1,1,8]<=[32] last_tile_dim_replicate}
ROOT %gather = f32[4,16,16] gather(%input, %indices), offset_dims={2},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2,
slice_sizes={1,16}, sharding={devices=[4,1,1,8]<=[32] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 32, true, false, false,
false, -1, PartitioningMethod::kIndexParallel));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(op::Select(_, _, op::Gather(op::AllReduce(_), _))),
_, _, _)));
auto gather = FindInstruction(module.get(), HloOpcode::kGather);
EXPECT_THAT(gather->operand(1),
op::Subtract(op::Clamp(_, op::Parameter(1), _), _));
auto collective_permute =
FindInstruction(module.get(), HloOpcode::kCollectivePermute);
EXPECT_NE(collective_permute, nullptr);
auto all_reduce = FindInstruction(module.get(), HloOpcode::kAllReduce);
EXPECT_THAT(all_reduce->operand(0), op::DynamicUpdateSlice(_, _, _, _));
auto dynamic_slice = FindInstruction(module.get(), HloOpcode::kDynamicSlice);
EXPECT_THAT(dynamic_slice->operand(1), op::PartitionId());
}
TEST_P(SpmdPartitioningTest, ScatterParallelDimRedistributionOperand) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[4,2,1,1]<=[8]}
%constant = s32[4] constant({0, 1, 2, 3}), sharding={replicated}
%iota = s32[1,8,4]{2,1,0} broadcast(%constant), dimensions={2},
sharding={devices=[1,8,1]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1),
sharding={devices=[8,1,1,1]<=[8]}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={1,0},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::Reshape());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[1,4,2,2]"), op::Parameter());
auto scatter =
AllOf(op::Shape("s32[1,4,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, scatter, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, ScatterParallelDimReplicatedIndices) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={replicated}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={replicated}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={replicated}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1),
sharding={devices=[8,1,1,1]<=[8]}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[1,4,2,2]"), op::Parameter());
auto scatter =
AllOf(op::Shape("s32[1,4,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, scatter, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, ScatterParallelDimReplicatedOperand) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={replicated}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1),
sharding={devices=[8,1,1,1]<=[8]}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[1,4,2,2]"), op::Parameter());
auto scatter =
AllOf(op::Shape("s32[1,4,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, scatter, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, ScatterParallelDimReplicatedUpdate) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1), sharding={replicated}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[1,4,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, scatter, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, ScatterParallelDimPartialReplicatedIndices) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,2,1,4]<=[8] last_tile_dim_replicate}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,2,1,4]<=[8] last_tile_dim_replicate}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,2,1,4]<=[8] last_tile_dim_replicate}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1),
sharding={devices=[8,1,1,1]<=[8]}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[1,4,2,2]"), op::Parameter());
auto scatter =
AllOf(op::Shape("s32[1,4,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, scatter, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, ScatterParallelDimPartialReplicatedOperand) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={
devices=[2,1,1,1,4]<=[8] last_tile_dim_replicate}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1),
sharding={devices=[8,1,1,1]<=[8]}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[1,4,2,2]"), op::Parameter());
auto scatter =
AllOf(op::Shape("s32[1,4,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, scatter, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, ScatterParallelDimPartialReplicatedUpdate) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,8,1]<=[8]}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1), sharding={
devices=[2,1,1,1,4]<=[8] last_tile_dim_replicate}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[1,4,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root,
op::AllReduce(op::DynamicUpdateSlice(_, scatter, _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, ScatterParallelDimSwappedDimensions) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={
devices=[4,2,1,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,2,4]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,2,4]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,2,4]<=[8]}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1), sharding={
devices=[4,2,1,1]<=[8]}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(hlo_string,
8));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[4,1,2,2]"), op::CollectivePermute());
auto indices = AllOf(op::Shape("s32[2,4,1]"), op::Subtract());
auto update = AllOf(op::Shape("s32[4,1,2,2]"), op::CollectivePermute());
auto scatter =
AllOf(op::Shape("s32[4,1,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root, op::AllReduce(op::AllReduce(
op::DynamicUpdateSlice(_, scatter, _, _, _, _))));
}
TEST_P(SpmdPartitioningTest, ScatterParallelDimFromOutsideWhilePositive) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
cond {
%parameters = (s32[8,4,2,2], s32[1,8,4], s32[8,4,2,2], s32[]) parameter(0),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}, {replicated}}
%counter = s32[] get-tuple-element(parameters), index=3, sharding={replicated}
%constant = s32[] constant(3), sharding={replicated}
ROOT %lt = pred[] compare(counter, constant), direction=LT,
sharding={replicated}
}
body {
%parameters = (s32[8,4,2,2], s32[1,8,4], s32[8,4,2,2], s32[]) parameter(0),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}, {replicated}}
%parameter.0 = s32[8,4,2,2]{3,2,1,0} get-tuple-element(parameters), index=0,
sharding={replicated}
%iota = s32[1,8,4]{2,1,0} get-tuple-element(parameters), index=1,
sharding={devices=[1,8,1]<=[8]}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,8,1]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}, sharding={devices=[1,8,1]<=[8]}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} get-tuple-element(parameters), index=2,
sharding={replicated}
%counter = s32[] get-tuple-element(parameters), index=3, sharding={replicated}
%constant = s32[] constant(1), sharding={replicated}
%updated_counter = s32[] add(counter, constant), sharding={replicated}
%scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
ROOT %tuple = (s32[8,4,2,2], s32[1,8,4], s32[8,4,2,2], s32[])
tuple(scatter.20, iota, parameter.1, updated_counter),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}, {replicated}}
}
ENTRY entry {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={replicated}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]<=[8]}
%counter = s32[] constant(0), sharding={replicated}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1), sharding={replicated}
%tuple = (s32[8,4,2,2], s32[1,8,4], s32[8,4,2,2], s32[])
tuple(parameter.0, iota, parameter.1, counter),
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}, {replicated}}
ROOT while = (s32[8,4,2,2], s32[1,8,4], s32[8,4,2,2], s32[]) while(tuple), body=body,
condition=cond,
sharding={{replicated}, {devices=[1,8,1]<=[8]}, {replicated}, {replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()
->root_instruction()
->while_body()
->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto indices = AllOf(op::Shape("s32[2,1,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[1,4,2,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[1,4,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(
root,
op::Tuple(op::AllReduce(op::DynamicUpdateSlice(_, scatter, _, _, _, _)),
_, _, _));
}
TEST_P(SpmdPartitioningTest, ScatterParallelDimAndNonParallelDimPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[2,2,1,1]<=[4]}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2),
sharding={devices=[2,2,1,1]<=[4]}
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1),
sharding={devices=[1,2,2]<=[4]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,2,2]<=[4]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %indices), dimensions={0},
sharding={devices=[1,2,2]<=[4]}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
const auto root = module->entry_computation()->root_instruction();
VLOG(1) << module->ToString();
auto operand = AllOf(op::Shape("s32[4,4,2,2]"));
auto indices = AllOf(op::Shape("s32[2,4,2]"));
auto update = AllOf(op::Shape("s32[4,2,2,2]"));
auto scatter =
AllOf(op::Shape("s32[4,4,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root, op::AllReduce(op::AllReduce(op::DynamicUpdateSlice(
_, op::DynamicSlice(op::AllReduce(scatter), _, _, _, _),
_, _, _, _))));
}
TEST_P(SpmdPartitioningTest, b_356877097) {
absl::string_view hlo_string = R"(
HloModule jit__init
region_0.16 {
Arg_0.17 = f32[] parameter(0)
ROOT Arg_1.18 = f32[] parameter(1)
}
ENTRY main.22 {
constant.5 = f32[] constant(0), sharding={replicated}
broadcast.3 = f32[16,16]{1,0} broadcast(constant.5), dimensions={}, sharding={devices=[1,8]<=[8]}
constant.3 = s32[8,1]{1,0} constant({ {0}, {2}, {5}, {7}, {8}, {10}, {13}, {15} }), sharding={devices=[8,1]<=[8]}
iota = s32[8,1]{1,0} iota(), iota_dimension=0, sharding={devices=[8,1]<=[8]}
concatenate.15 = s32[8,2]{1,0} concatenate(constant.3, iota), dimensions={1}, sharding={devices=[8,1]<=[8]}
constant.2 = f32[] constant(1), sharding={replicated}
broadcast.1 = f32[8]{0} broadcast(constant.2), dimensions={}, sharding={devices=[8]<=[8]}
ROOT scatter.19 = f32[16,16]{1,0} scatter(broadcast.3, concatenate.15, broadcast.1),
update_window_dims={}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1},
index_vector_dim=1, to_apply=region_0.16, sharding={devices=[1,8]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("f32[16,2]"), op::Broadcast());
auto indices = AllOf(op::Shape("s32[8,2]"), op::Subtract());
auto update = AllOf(op::Shape("f32[8]"), op::AllReduce());
EXPECT_THAT(root, AllOf(op::Shape("f32[16,2]"),
op::Scatter(operand, indices, update)));
}
TEST_P(SpmdPartitioningTest, ScatterMergedIndexParallelAndOperandPassthrough) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[2,2,2,1]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0},
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1),
sharding={replicated}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={1,0},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[2,4,1,2]"), op::Reshape());
auto indices = AllOf(op::Shape("s32[2,2,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[2,4,1,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[2,4,1,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root, op::AllReduce(op::AllReduce(
op::DynamicUpdateSlice(_, scatter, _, _, _, _))));
}
TEST_P(SpmdPartitioningTest,
ScatterMergedIndexParallelAndTrivialSlicedOperand) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[4,2,1,1]<=[8]}
%parameter.1 = s32[1,8,4]{2,1,0} parameter(1),
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %parameter.1, s32[1,8,4]{2,1,0} %iota), dimensions={0},
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2),
sharding={replicated}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.2),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={1,0},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[2,2,2,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,2,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[2,4,2,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[2,2,2,2]"), op::Scatter(operand, indices, update));
VLOG(1) << module->ToString();
EXPECT_THAT(root, op::AllReduce(op::AllReduce(
op::DynamicUpdateSlice(_, scatter, _, _, _, _))));
}
TEST_P(SpmdPartitioningTest, ScatterMergedIndexParallelAndIndexPassthrough) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[4,1,1,1,2]<=[8] last_tile_dim_replicate}
%parameter.1 = s32[1,8,4]{2,1,0} parameter(1),
sharding={devices=[1,4,2]<=[8]}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,2]<=[8]}
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %parameter.1, s32[1,8,4]{2,1,0} %iota), dimensions={0},
sharding={devices=[1,4,2]<=[8]}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2),
sharding={replicated}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19,
s32[8,4,2,2]{3,2,1,0} %parameter.2),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={1,0},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[2,4,2,2]"), op::Select());
auto indices = AllOf(op::Shape("s32[2,2,2]"), op::Subtract());
auto update = AllOf(op::Shape("s32[2,2,2,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[2,4,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root, op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(scatter), _, _, _, _)));
}
TEST_P(SpmdPartitioningTest,
ScatterMergedOperandPassthroughAndTrivialSlicedOperand) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[2,2,2,1]<=[8]}
%parameter.1 = s32[2,8,4]{2,1,0} parameter(1),
sharding={replicated}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2),
sharding={replicated}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %parameter.1,
s32[8,4,2,2]{3,2,1,0} %parameter.2),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[4,2,1,2]"), op::Parameter());
auto indices = AllOf(op::Shape("s32[2,8,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[8,4,1,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[4,2,1,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root, op::AllReduce(op::AllReduce(op::AllReduce(
op::DynamicUpdateSlice(_, scatter, _, _, _, _)))));
}
TEST_P(SpmdPartitioningTest,
ScatterMergedOperandPassthroughAndIndexPassthrough) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[1,1,2,1,4]<=[8] last_tile_dim_replicate}
%parameter.1 = s32[2,8,4]{2,1,0} parameter(1),
sharding={devices=[1,2,1,4]<=[8] last_tile_dim_replicate}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2),
sharding={replicated}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %parameter.1,
s32[8,4,2,2]{3,2,1,0} %parameter.2),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[8,4,1,2]"), op::Select());
auto indices = AllOf(op::Shape("s32[2,4,4]"), op::CollectivePermute());
auto update = AllOf(op::Shape("s32[4,4,1,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[8,4,1,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root, op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(scatter), _, _, _, _)));
}
TEST_P(SpmdPartitioningTest,
ScatterMergedOperandPassthroughAndIndexPassthrough_PartialGrouping) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[2,2,2,1]<=[8]}
%parameter.1 = s32[2,8,4]{2,1,0} parameter(1),
sharding={devices=[1,2,2,2]<=[8] last_tile_dim_replicate}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2),
sharding={replicated}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %parameter.1,
s32[8,4,2,2]{3,2,1,0} %parameter.2),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[8,4,1,2]"), op::Select());
auto indices = AllOf(op::Shape("s32[2,4,2]"), op::Parameter());
auto update = AllOf(op::Shape("s32[4,2,1,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[8,4,1,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root, op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(op::AllReduce(scatter)), _, _, _, _)));
}
TEST_P(SpmdPartitioningTest,
ScatterMergedTrivialSlicedOperandAndIndexPassthrough) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[2,2,1,1,2]<=[8] last_tile_dim_replicate}
%parameter.1 = s32[2,8,4]{2,1,0} parameter(1),
sharding={devices=[1,2,1,4]<=[8] last_tile_dim_replicate}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2),
sharding={replicated}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %parameter.1,
s32[8,4,2,2]{3,2,1,0} %parameter.2),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[4,2,2,2]"), op::Select());
auto indices = AllOf(op::Shape("s32[2,4,4]"), op::Subtract());
auto update = AllOf(op::Shape("s32[4,4,2,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[4,2,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root, op::AllReduce(op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(scatter), _, _, _, _))));
}
TEST_P(SpmdPartitioningTest,
ScatterMergedTrivialSlicedOperandAndIndexPassthrough_PartialGrouping) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[2,2,1,1,2]<=[8] last_tile_dim_replicate}
%parameter.1 = s32[2,8,4]{2,1,0} parameter(1),
sharding={devices=[1,2,2,2]<=[8] last_tile_dim_replicate}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2),
sharding={replicated}
ROOT %scatter.20 = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %parameter.1,
s32[8,4,2,2]{3,2,1,0} %parameter.2),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s32[8,2,2,2]"), op::Select());
auto indices = AllOf(op::Shape("s32[2,4,2]"), op::Subtract());
auto update = AllOf(op::Shape("s32[4,2,2,2]"), op::DynamicSlice());
auto scatter =
AllOf(op::Shape("s32[8,2,2,2]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root, op::AllReduce(op::DynamicUpdateSlice(
_, op::AllReduce(op::AllReduce(scatter)), _, _, _, _)));
}
TEST_P(SpmdPartitioningTest, ScatterTrivialSlicedOperandPartial) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s64[], rhs: s64[]) -> s64[] {
lhs = s64[] parameter(0)
rhs = s64[] parameter(1)
ROOT sum = s64[] add(lhs, rhs)
}
ENTRY main.4 {
%arg.0 = s64[8,2]{1,0} parameter(0), sharding={devices=[4,2]<=[8]}
%arg.1 = s32[2]{0} parameter(1), sharding={replicated}
%arg.2 = s64[2,1]{1,0} parameter(2), sharding={replicated}
ROOT scatter = s64[8,2]{1,0} scatter(arg.0, arg.1, arg.2),
to_apply=add,
update_window_dims={0,1},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0, indices_are_sorted=true, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto operand = AllOf(op::Shape("s64[8,1]"), op::AllReduce());
auto indices = AllOf(op::Shape("s32[2]"), op::Subtract());
auto update = AllOf(op::Shape("s64[2,1]"), op::Parameter());
auto scatter =
AllOf(op::Shape("s64[8,1]"), op::Scatter(operand, indices, update));
EXPECT_THAT(root, op::AllReduce(op::AllReduce(op::DynamicUpdateSlice(
_, op::DynamicSlice(scatter, _, _), _, _))));
}
TEST_P(SpmdPartitioningTest,
ScatterPartitionedOnTrivialSliceDimsForceTrivialSlice) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[8,16] parameter(0), sharding={devices=[8,1,4]<=[4,8]T(1,0) last_tile_dim_replicate}
%indices = s32[4,16,1] parameter(1), sharding={devices=[4,1,1,8]<=[32] last_tile_dim_replicate}
%updates = f32[4,16,16] parameter(2), sharding={devices=[4,1,1,8]<=[32] last_tile_dim_replicate}
ROOT %scatter = f32[8,16] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={2},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=2, sharding={devices=[8,1,4]<=[4,8]T(1,0) last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module, PartitionComputation(
hlo_string, 32, true, false, false,
false, -1, PartitioningMethod::kTrivialSlicedOperand));
VLOG(1) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::AllReduce(op::Scatter(op::Select(_, _, _),
op::Subtract(_, _), _)));
auto dynamic_slice = FindInstruction(module.get(), HloOpcode::kDynamicSlice);
EXPECT_THAT(dynamic_slice->operand(1), op::PartitionId());
auto collective_permute =
FindInstruction(module.get(), HloOpcode::kCollectivePermute);
EXPECT_THAT(collective_permute, nullptr);
}
TEST_P(SpmdPartitioningTest,
ScatterPartitionedOnTrivialSliceDimsForceIndexParallel) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[8,16] parameter(0), sharding={devices=[8,4]<=[4,8]T(1,0)}
%indices = s32[4,16,1] parameter(1), sharding={devices=[4,1,1,8]<=[32] last_tile_dim_replicate}
%updates = f32[4,16,16] parameter(2), sharding={devices=[4,1,1,8]<=[32] last_tile_dim_replicate}
ROOT %scatter = f32[8,16] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={2},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=2, sharding={devices=[8,1,4]<=[4,8]T(1,0) last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 32, true, false, false,
false, -1, PartitioningMethod::kIndexParallel));
VLOG(1) << module->ToString();
auto all_to_all = FindInstruction(module.get(), HloOpcode::kAllToAll);
EXPECT_NE(all_to_all, nullptr);
auto scatter = FindInstruction(module.get(), HloOpcode::kScatter);
EXPECT_THAT(scatter->operand(1), op::Subtract(op::Parameter(1), _));
auto collective_permute =
FindInstruction(module.get(), HloOpcode::kCollectivePermute);
EXPECT_NE(collective_permute, nullptr);
auto all_reduce = FindInstruction(module.get(), HloOpcode::kAllReduce);
EXPECT_NE(all_reduce, nullptr);
auto dynamic_slice = FindInstruction(module.get(), HloOpcode::kDynamicSlice);
EXPECT_THAT(dynamic_slice->operand(1), op::PartitionId());
}
TEST_P(SpmdPartitioningTest, SortTopKNonSortDimension) {
absl::string_view hlo_string = R"(
HloModule module
%compare-greater-than.42077 (p.0.lhs.42078: f32[],
p.0.rhs.42079: f32[], p.1.lhs.42080: s32[], p.1.rhs.42081: s32[]) -> pred[] {
%p.0.lhs.42078 = f32[] parameter(0)
%bitcast-convert.135 = s32[] bitcast-convert(f32[] %p.0.lhs.42078)
%constant.45054 = s32[] constant(0)
%compare.133 = pred[] compare(s32[] %bitcast-convert.135,
s32[] %constant.45054), direction=LT
%constant.45278 = u32[] constant(2147483647)
%bitcast-convert.136 = u32[] bitcast-convert(f32[] %p.0.lhs.42078)
%subtract.337 = u32[] subtract(u32[] %constant.45278,
u32[] %bitcast-convert.136)
%bitcast-convert.137 = s32[] bitcast-convert(u32[] %subtract.337)
%select.282 = s32[] select(pred[] %compare.133, s32[] %bitcast-convert.137,
s32[] %bitcast-convert.135)
%p.0.rhs.42079 = f32[] parameter(1)
%bitcast-convert.138 = s32[] bitcast-convert(f32[] %p.0.rhs.42079)
%compare.134 = pred[] compare(s32[] %bitcast-convert.138,
s32[] %constant.45054), direction=LT
%bitcast-convert.139 = u32[] bitcast-convert(f32[] %p.0.rhs.42079)
%subtract.338 = u32[] subtract(u32[] %constant.45278,
u32[] %bitcast-convert.139)
%bitcast-convert.140 = s32[] bitcast-convert(u32[] %subtract.338)
%select.283 = s32[] select(pred[] %compare.134, s32[] %bitcast-convert.140,
s32[] %bitcast-convert.138)
%compare.135 = pred[] compare(s32[] %select.282,
s32[] %select.283), direction=GT
%compare.428 = pred[] compare(s32[] %select.283,
s32[] %select.282), direction=GT
%compare.429 = pred[] compare(pred[] %compare.135,
pred[] %compare.428), direction=EQ
%p.1.lhs.42080 = s32[] parameter(2)
%p.1.rhs.42081 = s32[] parameter(3)
%compare.430 = pred[] compare(s32[] %p.1.lhs.42080,
s32[] %p.1.rhs.42081), direction=LT
ROOT %select.579 = pred[] select(pred[] %compare.429,
pred[] %compare.430, pred[] %compare.135)
}
ENTRY %module {
%parameter.0 = f32[2,64,32128]{2,1,0} parameter(0),
sharding={devices=[2,1,4]<=[8]}
%iota = s32[2,64,32128]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[2,1,4]<=[8]}
%sort.18 = (f32[2,64,32128]{2,1,0}, s32[2,64,32128]{2,1,0}) sort(
f32[2,64,32128]{2,1,0} %parameter.0, s32[2,64,32128]{2,1,0} %iota),
dimensions={2}, is_stable=true, to_apply=%compare-greater-than.42077,
sharding={{devices=[2,1,4]<=[8]}, {devices=[2,1,4]<=[8]}}
output = f32[2,64,32128]{2,1,0} get-tuple-element(%sort.18), index=0,
sharding={devices=[2,1,4]<=[8]}
%slice.0 = f32[2,64,2]{2,1,0} slice(f32[2,64,32128]{2,1,0} output),
slice={[0:2], [0:64], [0:2]}, sharding={devices=[2,1,4]<=[8]}
output2 = s32[2,64,32128]{2,1,0} get-tuple-element(%sort.18), index=1,
sharding={replicated}
%slice.1 = s32[2,64,2]{2,1,0} slice(s32[2,64,32128]{2,1,0} output2),
slice={[0:2], [0:64], [0:2]}, sharding={devices=[2,1,4]<=[8]}
ROOT output.t = (f32[2,64,2]{2,1,0},
s32[2,64,2]{2,1,0}) tuple(slice.0, slice.1),
sharding={{replicated}, {replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const HloInstruction* sort = FindInstruction(module.get(), "sort.0");
EXPECT_NE(sort, nullptr);
auto sort_match =
AllOf(op::Shape("(f32[1,16,32128], s32[1,16,32128])"), op::Sort(_, _));
EXPECT_THAT(sort, sort_match);
}
TEST_P(SpmdPartitioningTest, SortTopKPropagateBaseShape) {
absl::string_view hlo_string = R"(
HloModule module
%compare-greater-than.42077 (p.0.lhs.42078: f32[],
p.0.rhs.42079: f32[], p.1.lhs.42080: s32[], p.1.rhs.42081: s32[]) -> pred[] {
%p.0.lhs.42078 = f32[] parameter(0)
%bitcast-convert.135 = s32[] bitcast-convert(f32[] %p.0.lhs.42078)
%constant.45054 = s32[] constant(0)
%compare.133 = pred[] compare(s32[] %bitcast-convert.135,
s32[] %constant.45054), direction=LT
%constant.45278 = u32[] constant(2147483647)
%bitcast-convert.136 = u32[] bitcast-convert(f32[] %p.0.lhs.42078)
%subtract.337 = u32[] subtract(u32[] %constant.45278,
u32[] %bitcast-convert.136)
%bitcast-convert.137 = s32[] bitcast-convert(u32[] %subtract.337)
%select.282 = s32[] select(pred[] %compare.133, s32[] %bitcast-convert.137,
s32[] %bitcast-convert.135)
%p.0.rhs.42079 = f32[] parameter(1)
%bitcast-convert.138 = s32[] bitcast-convert(f32[] %p.0.rhs.42079)
%compare.134 = pred[] compare(s32[] %bitcast-convert.138,
s32[] %constant.45054), direction=LT
%bitcast-convert.139 = u32[] bitcast-convert(f32[] %p.0.rhs.42079)
%subtract.338 = u32[] subtract(u32[] %constant.45278,
u32[] %bitcast-convert.139)
%bitcast-convert.140 = s32[] bitcast-convert(u32[] %subtract.338)
%select.283 = s32[] select(pred[] %compare.134, s32[] %bitcast-convert.140,
s32[] %bitcast-convert.138)
%compare.135 = pred[] compare(s32[] %select.282,
s32[] %select.283), direction=GT
%compare.428 = pred[] compare(s32[] %select.283,
s32[] %select.282), direction=GT
%compare.429 = pred[] compare(pred[] %compare.135,
pred[] %compare.428), direction=EQ
%p.1.lhs.42080 = s32[] parameter(2)
%p.1.rhs.42081 = s32[] parameter(3)
%compare.430 = pred[] compare(s32[] %p.1.lhs.42080,
s32[] %p.1.rhs.42081), direction=LT
ROOT %select.579 = pred[] select(pred[] %compare.429,
pred[] %compare.430, pred[] %compare.135)
}
ENTRY %module {
%parameter.0 = f32[2,64,32128]{2,1,0} parameter(0),
sharding={devices=[1,1,8]<=[8]}
%iota = s32[2,64,32128]{2,1,0} iota(), iota_dimension=2,
sharding={devices=[1,1,8]<=[8]}
%sort.18 = (f32[2,64,32128]{2,1,0}, s32[2,64,32128]{2,1,0}) sort(
f32[2,64,32128]{2,1,0} %parameter.0, s32[2,64,32128]{2,1,0} %iota),
dimensions={2}, is_stable=true, to_apply=%compare-greater-than.42077,
sharding={{devices=[1,1,8]<=[8]}, {devices=[1,1,8]<=[8]}}
output = f32[2,64,32128]{2,1,0} get-tuple-element(%sort.18), index=0,
sharding={devices=[1,1,8]<=[8]}
%slice.0 = f32[2,64,2]{2,1,0} slice(f32[2,64,32128]{2,1,0} output),
slice={[0:2], [0:64], [0:2]}, sharding={devices=[1,1,8]<=[8]}
output2 = s32[2,64,32128]{2,1,0} get-tuple-element(%sort.18), index=1,
sharding={replicated}
%slice.1 = s32[2,64,2]{2,1,0} slice(s32[2,64,32128]{2,1,0} output2),
slice={[0:2], [0:64], [0:2]}, sharding={devices=[1,1,8]<=[8]}
ROOT output.t = (f32[2,64,2]{2,1,0},
s32[2,64,2]{2,1,0}) tuple(slice.0, slice.1),
sharding={{replicated}, {replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const HloInstruction* root = module->entry_computation()->root_instruction();
auto all_reduce_val =
AllOf(op::Shape("f32[2,64,2]"),
op::AllReduce(op::DynamicUpdateSlice(_, _, _, _, _)));
auto all_reduce_idx =
AllOf(op::Shape("s32[2,64,2]"),
op::AllReduce(op::DynamicUpdateSlice(_, _, _, _, _)));
auto tuple = AllOf(op::Shape("(f32[2,64,2], s32[2,64,2])"),
op::Tuple(all_reduce_val, all_reduce_idx));
EXPECT_THAT(root, tuple);
}
TEST_P(SpmdPartitioningTest, GatherIndexOnlyCorrectReplacement) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = bf16[1,8,6,6]{3,2,1,0} parameter(0),
sharding={replicated}
%parameter.1 = s32[2,4]{1,0} parameter(1),
sharding={devices=[2,1,4]<=[8] last_tile_dim_replicate}
%gather.100 = bf16[2,1,8,1,6]{4,3,2,1,0} gather(
bf16[1,8,6,6]{3,2,1,0} %parameter.0, s32[2,4]{1,0} %parameter.1),
offset_dims={1,2,3,4}, collapsed_slice_dims={}, start_index_map={0,1,2,3},
index_vector_dim=1, slice_sizes={1,8,1,6},
sharding={devices=[2,1,4,1,1]<=[8]}
%constant.45590 = s32[] constant(0), sharding={replicated}
%broadcast.54515 = s32[2,64,1,1]{3,2,1,0} broadcast(s32[] %constant.45590),
dimensions={},
sharding={devices=[2,1,1,1,4]<=[8] last_tile_dim_replicate}
ROOT %reshape.4243 = bf16[2,8,6]{2,1,0} reshape(
bf16[2,1,8,1,6]{4,3,2,1,0} %gather.100),
sharding={devices=[2,4,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const HloInstruction* root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Shape("bf16[1,8,6,6]"), op::Parameter());
auto param1 = AllOf(op::Shape("s32[1,4]"), op::Parameter());
auto reshape = AllOf(
op::Shape("bf16[1,2,6]"),
op::Reshape(op::DynamicSlice(op::Gather(param0, param1), _, _, _, _, _)));
EXPECT_THAT(root, reshape);
}
TEST_P(SpmdPartitioningTest, GatherRegressionTest1) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[1,4] parameter(0), sharding={devices=[1,8]<=[8]}
%iota.10 = s32[4]{0} iota(), iota_dimension=0, sharding={devices=[8]<=[8]}
ROOT %gather.44 = s32[1,4]{1,0} gather(%parameter.0, %iota.10),
offset_dims={0}, collapsed_slice_dims={1}, start_index_map={1}, index_vector_dim=1,
slice_sizes={1,1}, sharding={devices=[1,8]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
const HloInstruction* root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Shape("s32[1,1]"), op::Parameter());
EXPECT_THAT(root, op::Gather(param0, _));
}
TEST_P(SpmdPartitioningTest, WindowedEinsumPreferMemoryFootprint) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = bf16[128,1024,4,4,1152,1,1]{6,5,4,3,2,1,0} parameter(0),
sharding={devices=[4,1,2,1,1,1,1]<=[8]}
%parameter.1 = bf16[4,4,1152,4,176,256,1]{6,5,4,3,2,1,0} parameter(1),
sharding={devices=[2,2,1,2,1,1,1]<=[8]}
%convolution.3 = bf16[128,1024,4,176,256,1,1]{6,5,4,3,2,1,0}
convolution(bf16[128,1024,4,4,1152,1,1]{6,5,4,3,2,1,0} %parameter.0,
bf16[4,4,1152,4,176,256,1]{6,5,4,3,2,1,0} %parameter.1),
window={size=1x4x176x4x4 pad=0_0x3_3x175_175x0_0x0_0
rhs_reversal=0x1x1x0x0}, dim_labels=0b34f12_34i12o0->0b12f34,
sharding={devices=[4,1,2,1,1,1,1]<=[8]}
ROOT %reshape.3973 = bf16[128,1024,4,176,256]{4,3,2,1,0}
reshape(bf16[128,1024,4,176,256,1,1]{6,5,4,3,2,1,0} %convolution.3),
sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 8,
true,
false));
const HloInstruction* while_inst = FindInstruction(module.get(), "while");
EXPECT_NE(while_inst, nullptr);
const HloComputation* cond_comp = while_inst->while_condition();
const HloInstruction* root = cond_comp->root_instruction();
EXPECT_THAT(root, op::Compare(_, op::Constant()));
const HloConstantInstruction* iterations =
Cast<HloConstantInstruction>(root->operand(1));
EXPECT_TRUE(iterations->literal().GetFirstInteger());
EXPECT_EQ(*iterations->literal().GetFirstInteger(), 4);
}
TEST_P(SpmdPartitioningTest, WindowedEinsumPreferNumberIterations) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = bf16[128,1024,4,4,1152,1,1]{6,5,4,3,2,1,0} parameter(0),
sharding={devices=[4,1,2,1,1,1,1]<=[8]}
%parameter.1 = bf16[4,4,1152,4,176,256,1]{6,5,4,3,2,1,0} parameter(1),
sharding={devices=[2,2,1,2,1,1,1]<=[8]}
%convolution.3 = bf16[128,1024,4,176,256,1,1]{6,5,4,3,2,1,0}
convolution(bf16[128,1024,4,4,1152,1,1]{6,5,4,3,2,1,0} %parameter.0,
bf16[4,4,1152,4,176,256,1]{6,5,4,3,2,1,0} %parameter.1),
window={size=1x4x176x4x4 pad=0_0x3_3x175_175x0_0x0_0
rhs_reversal=0x1x1x0x0}, dim_labels=0b34f12_34i12o0->0b12f34,
sharding={devices=[4,1,2,1,1,1,1]<=[8]}
ROOT %reshape.3973 = bf16[128,1024,4,176,256]{4,3,2,1,0}
reshape(bf16[128,1024,4,176,256,1,1]{6,5,4,3,2,1,0} %convolution.3),
sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 8,
true,
true));
const HloInstruction* while_inst = FindInstruction(module.get(), "while");
EXPECT_NE(while_inst, nullptr);
const HloComputation* cond_comp = while_inst->while_condition();
const HloInstruction* root = cond_comp->root_instruction();
EXPECT_THAT(root, op::Compare(_, op::Constant()));
const HloConstantInstruction* iterations =
Cast<HloConstantInstruction>(root->operand(1));
EXPECT_TRUE(iterations->literal().GetFirstInteger());
EXPECT_EQ(*iterations->literal().GetFirstInteger(), 2);
}
TEST_P(SpmdPartitioningTest, WindowedEinsumPreferNumberIterations2) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = bf16[512,1024,16,36,256]{4,3,2,1,0} parameter(0)
%lhs.copy = bf16[512,1024,16,36,256]{4,3,2,1,0} copy(%lhs),
sharding={devices=[8,1,4,1,1]<=[32]}
%rhs = bf16[512,1024,16,4,288]{4,3,2,1,0} parameter(1)
%rhs.copy = bf16[512,1024,16,4,288]{4,3,2,1,0} copy(%rhs),
sharding={devices=[8,1,4,1,1]<=[32]}
%reshape.2556 = bf16[512,1024,16,4,288,1,1]{6,5,4,3,2,1,0} reshape(
bf16[512,1024,16,4,288]{4,3,2,1,0} %rhs.copy), sharding={
devices=[8,1,4,1,1,1,1]<=[32]}
%reshape.2570 = bf16[512,1024,16,36,256,1,1]{6,5,4,3,2,1,0}
reshape(bf16[512,1024,16,36,256]{4,3,2,1,0} %lhs.copy), sharding={
devices=[8,1,4,1,1,1,1]<=[32]}
%convolution.10 = bf16[16,36,256,16,4,288,1]{6,5,4,3,2,1,0}
convolution(bf16[512,1024,16,36,256,1,1]{6,5,4,3,2,1,0} %reshape.2570,
bf16[512,1024,16,4,288,1,1]{6,5,4,3,2,1,0} %reshape.2556),
window={size=1x1x16x4x512 pad=0_0x0_0x15_15x3_3x0_0 rhs_reversal=0x0x1x1x0},
dim_labels=4f01b23_4i23o01->01b23f4,
sharding={devices=[4,1,1,4,2,1,1]<=[8,2,2]T(1,2,0)}
ROOT %output = bf16[16,36,256,16,4,288,1]{6,5,4,3,2,1,0}
copy(%convolution.10), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 32,
true,
true));
const HloInstruction* while_inst = FindInstruction(module.get(), "while");
EXPECT_NE(while_inst, nullptr);
const HloComputation* cond_comp = while_inst->while_condition();
const HloInstruction* root = cond_comp->root_instruction();
EXPECT_THAT(root, op::Compare(_, op::Constant()));
const HloConstantInstruction* iterations =
Cast<HloConstantInstruction>(root->operand(1));
EXPECT_TRUE(iterations->literal().GetFirstInteger());
EXPECT_EQ(*iterations->literal().GetFirstInteger(), 4);
}
TEST_P(SpmdPartitioningTest, WindowedEinsumPreferMemoryFootprint2) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = bf16[512,1024,16,36,256]{4,3,2,1,0} parameter(0)
%lhs.copy = bf16[512,1024,16,36,256]{4,3,2,1,0} copy(%lhs),
sharding={devices=[8,1,4,1,1]<=[32]}
%rhs = bf16[512,1024,16,4,288]{4,3,2,1,0} parameter(1)
%rhs.copy = bf16[512,1024,16,4,288]{4,3,2,1,0} copy(%rhs),
sharding={devices=[8,1,4,1,1]<=[32]}
%reshape.2556 = bf16[512,1024,16,4,288,1,1]{6,5,4,3,2,1,0} reshape(
bf16[512,1024,16,4,288]{4,3,2,1,0} %rhs.copy), sharding={
devices=[8,1,4,1,1,1,1]<=[32]}
%reshape.2570 = bf16[512,1024,16,36,256,1,1]{6,5,4,3,2,1,0}
reshape(bf16[512,1024,16,36,256]{4,3,2,1,0} %lhs.copy), sharding={
devices=[8,1,4,1,1,1,1]<=[32]}
%convolution.10 = bf16[16,36,256,16,4,288,1]{6,5,4,3,2,1,0}
convolution(bf16[512,1024,16,36,256,1,1]{6,5,4,3,2,1,0} %reshape.2570,
bf16[512,1024,16,4,288,1,1]{6,5,4,3,2,1,0} %reshape.2556),
window={size=1x1x16x4x512 pad=0_0x0_0x15_15x3_3x0_0 rhs_reversal=0x0x1x1x0},
dim_labels=4f01b23_4i23o01->01b23f4,
sharding={devices=[4,1,1,4,2,1,1]<=[8,2,2]T(1,2,0)}
ROOT %output = bf16[16,36,256,16,4,288,1]{6,5,4,3,2,1,0}
copy(%convolution.10), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 32,
true,
false));
const HloInstruction* while_inst = FindInstruction(module.get(), "while");
EXPECT_NE(while_inst, nullptr);
const HloComputation* cond_comp = while_inst->while_condition();
const HloInstruction* root = cond_comp->root_instruction();
EXPECT_THAT(root, op::Compare(_, op::Constant()));
const HloConstantInstruction* iterations =
Cast<HloConstantInstruction>(root->operand(1));
EXPECT_TRUE(iterations->literal().GetFirstInteger());
EXPECT_EQ(*iterations->literal().GetFirstInteger(), 8);
}
TEST_P(SpmdPartitioningTest, ContractingPartitionDotOperandsSlicedWrong) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[8,2,15,4] parameter(0)
%lhs.copy = f32[8,2,15,4] copy(%lhs),
sharding={devices=[1,2,4,1]<=[8]}
%rhs = f32[2,15,4] parameter(1)
%rhs.copy = f32[2,15,4] copy(%rhs),
sharding={devices=[2,4,1]<=[8]}
%dot = f32[8,2,2] dot(%lhs.copy, %rhs.copy),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2},
operand_precision={HIGH,HIGH},
sharding={devices=[2,2,2]<=[8]}
ROOT %output = f32[8,2,2] copy(%dot), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 8,
true,
true));
const HloInstruction* dot_op = FindInstruction(module.get(), HloOpcode::kDot);
auto op1 = op::Shape("f32[4,2,4,4]");
auto op2 = op::Shape("f32[2,4,4]");
EXPECT_THAT(dot_op, op::Dot(op1, op2));
}
TEST_P(SpmdPartitioningTest, PartitionDotGroupOnBatchContractingReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,32,24,4096] parameter(0),
sharding={devices=[2,1,1,2]<=[4]}
%rhs = f32[32,4096,1024] parameter(1),
sharding={devices=[2,2,1]<=[4]}
ROOT %dot = f32[32,32,24,1024] dot(%lhs, %rhs),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={3}, rhs_contracting_dims={1},
sharding={devices=[1,2,1,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 4,
true,
true));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto dot = AllOf(op::Shape("f32[16,32,24,1024]"),
op::Dot(op::Parameter(0), op::Parameter(1)));
auto reduce_scatter = AllOf(op::Shape("f32[16,32,24,512]"),
op::DynamicSlice(op::AllReduce(dot), _, _, _, _));
EXPECT_THAT(root, AllOf(op::Reshape(op::Transpose(
op::AllToAll(op::Reshape(reduce_scatter)))),
op::Shape("f32[32,16,24,512]")));
}
TEST_P(SpmdPartitioningTest, PartitionPassthroughScatterCorrectOutputSharding) {
absl::string_view hlo_string = R"(
HloModule module
%scatter_add (parameter.0: bf16[], parameter.1: bf16[]) -> bf16[] {
%parameter.0 = bf16[] parameter(0)
%parameter.1 = bf16[] parameter(1)
ROOT %add = bf16[] add(bf16[] %parameter.0, bf16[] %parameter.1)
}
ENTRY entry {
%operand = bf16[2,1024]{1,0} parameter(0),
sharding={devices=[1,2]0,1}
%indices = s32[8,512,1]{2,1,0} parameter(1),
sharding={replicated}
%updates = bf16[8,512,1024]{2,1,0} parameter(2),
sharding={devices=[1,1,2]0,1}
ROOT %scatter = bf16[2,1024]{1,0} scatter(bf16[2,1024]{1,0} %operand,
s32[8,512,1]{2,1,0} %indices,
bf16[8,512,1024]{2,1,0} %updates), update_window_dims={2},
inserted_window_dims={0}, scatter_dims_to_operand_dims={0},
index_vector_dim=2, to_apply=%scatter_add,
sharding={devices=[1,2]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto scatter = AllOf(op::Shape("bf16[2,512]"), op::Scatter(_, _, _));
EXPECT_THAT(root, scatter);
}
bool IsTrivialCollectivePermute(HloInstruction* hlo) {
if (hlo->opcode() != HloOpcode::kCollectivePermute) {
return false;
}
if (hlo->source_target_pairs().empty()) {
return true;
}
return absl::c_all_of(hlo->source_target_pairs(),
[](const std::pair<int64_t, int64_t>& pair) {
return pair.first == pair.second;
});
}
TEST_P(SpmdPartitioningTest, CollectivePermuteSimplifyIdentity) {
absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
%parameter.7 = f32[3,16] parameter(0), sharding={devices=[1,2]0,1}
%constant.7 = f32[] constant(0)
%pad.3 = f32[3,18] pad(f32[3,16] %parameter.7, f32[] %constant.7), padding=0_0x1_1, sharding={devices=[1,2]0,1}
%slice.8 = f32[3,16] slice(f32[3,18] %pad.3), slice={[0:3], [2:18]}, sharding={devices=[1,2]0,1}
%slice.9 = f32[3,2] slice(f32[3,18] %pad.3), slice={[0:3], [0:2]}, sharding={devices=[1,2]0,1}
ROOT %concatenate.6 = f32[3,18] concatenate(f32[3,16] %slice.8, f32[3,2] %slice.9), dimensions={1}, sharding={devices=[1,2]0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
for (HloComputation* computation : module->computations()) {
for (HloInstruction* hlo : computation->instructions()) {
EXPECT_FALSE(IsTrivialCollectivePermute(hlo)) << hlo->ToString();
}
}
}
TEST_P(SpmdPartitioningTest, CollectivePermuteSimplifyZero) {
absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
%parameter = f32[3,16,16,16,16,132]{5,4,3,2,1,0} parameter(0), sharding={devices=[1,2,1,1,1,1]0,1}
%slice = f32[3,1,16,16,16,132]{5,4,3,2,1,0} slice(f32[3,16,16,16,16,132]{5,4,3,2,1,0} %parameter), slice={[0:3], [15:16], [0:16], [0:16], [0:16], [0:132]}, sharding={devices=[1,2,1,1,1,1]0,1}
%c0 = f32[] constant(0)
ROOT %pad = f32[3,18,16,16,16,132]{5,4,3,2,1,0} pad(f32[3,1,16,16,16,132]{5,4,3,2,1,0} %slice, f32[] %c0), padding=0_0x0_17x0_0x0_0x0_0x0_0, sharding={devices=[1,2,1,1,1,1]0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
for (HloComputation* computation : module->computations()) {
for (HloInstruction* hlo : computation->instructions()) {
EXPECT_FALSE(IsTrivialCollectivePermute(hlo)) << hlo->ToString();
}
}
}
TEST_P(SpmdPartitioningTest, PadWithWrapPattern) {
absl::string_view hlo_string = R"(
HloModule xla_computation_apply_fn__4.61
ENTRY %xla_computation_apply_fn__4.61 (parameter.7: f32[3,16,16,16,16,132]) -> f32[3,18,16,16,16,132] {
%parameter.7 = f32[3,16,16,16,16,132]{5,4,3,2,1,0} parameter(0), sharding={devices=[1,2,1,1,1,1]0,1}
%slice.2 = f32[3,1,16,16,16,132]{5,4,3,2,1,0} slice(f32[3,16,16,16,16,132]{5,4,3,2,1,0} %parameter.7), slice={[0:3], [15:16], [0:16], [0:16], [0:16], [0:132]}, sharding={devices=[1,2,1,1,1,1]0,1}
%slice.3 = f32[3,1,16,16,16,132]{5,4,3,2,1,0} slice(f32[3,16,16,16,16,132]{5,4,3,2,1,0} %parameter.7), slice={[0:3], [0:1], [0:16], [0:16], [0:16], [0:132]}, sharding={devices=[1,2,1,1,1,1]0,1}
ROOT %concatenate.3 = f32[3,18,16,16,16,132]{5,4,3,2,1,0} concatenate(f32[3,1,16,16,16,132]{5,4,3,2,1,0} %slice.2, f32[3,16,16,16,16,132]{5,4,3,2,1,0} %parameter.7, f32[3,1,16,16,16,132]{5,4,3,2,1,0} %slice.3), dimensions={1}, sharding={devices=[1,2,1,1,1,1]0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
for (HloComputation* computation : module->computations()) {
for (HloInstruction* hlo : computation->instructions()) {
EXPECT_FALSE(IsTrivialCollectivePermute(hlo)) << hlo->ToString();
EXPECT_NE(hlo->opcode(), HloOpcode::kAllReduce) << hlo->ToString();
}
}
}
TEST_P(SpmdPartitioningTest, PadWrapWithNegatePattern) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%parameter.1 = f32[1,18] parameter(0), sharding={devices=[1,2]0,1}
%slice.16 = f32[1,2] slice(f32[1,18] %parameter.1), slice={[0:1], [16:18]}, sharding={devices=[1,2]0,1}
%negate.2 = f32[1,2] negate(f32[1,2] %slice.16), sharding={devices=[1,2]0,1}
%slice.17 = f32[1,2] slice(f32[1,18] %parameter.1), slice={[0:1], [0:2]}, sharding={devices=[1,2]0,1}
%negate.3 = f32[1,2] negate(f32[1,2] %slice.17), sharding={devices=[1,2]0,1}
ROOT %concatenate.13 = f32[1,22] concatenate(f32[1,2] %negate.2, f32[1,18] %parameter.1, f32[1,2] %negate.3), dimensions={1}, sharding={devices=[1,2]0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
for (HloComputation* computation : module->computations()) {
for (HloInstruction* hlo : computation->instructions()) {
EXPECT_FALSE(IsTrivialCollectivePermute(hlo)) << hlo->ToString();
EXPECT_NE(hlo->opcode(), HloOpcode::kAllReduce) << hlo->ToString();
}
}
}
TEST_P(SpmdPartitioningTest, PadWrapWithMultipleModifiersPattern) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%parameter.1 = f32[1,18] parameter(0), sharding={devices=[1,2]0,1}
%slice.16 = f32[1,2] slice(f32[1,18] %parameter.1), slice={[0:1], [16:18]}, sharding={devices=[1,2]0,1}
%mod0.16 = f32[1,2] rsqrt(f32[1,2] %slice.16), sharding={devices=[1,2]0,1}
%mod1.16 = f32[1,2] sine(f32[1,2] %mod0.16), sharding={devices=[1,2]0,1}
%slice.17 = f32[1,2] slice(f32[1,18] %parameter.1), slice={[0:1], [0:2]}, sharding={devices=[1,2]0,1}
%mod0.17 = f16[1,2] convert(f32[1,2] %slice.17), sharding={devices=[1,2]0,1}
%mod1.17 = f16[1,2] cosine(f16[1,2] %mod0.17), sharding={devices=[1,2]0,1}
%mod2.17 = f32[1,2] convert(f16[1,2] %mod1.17), sharding={devices=[1,2]0,1}
ROOT %concatenate.13 = f32[1,22] concatenate(f32[1,2] %mod1.16, f32[1,18] %parameter.1, f32[1,2] %mod2.17), dimensions={1}, sharding={devices=[1,2]0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
for (HloComputation* computation : module->computations()) {
for (HloInstruction* hlo : computation->instructions()) {
const HloOpcode op = hlo->opcode();
EXPECT_FALSE(IsTrivialCollectivePermute(hlo)) << hlo->ToString();
EXPECT_NE(op, HloOpcode::kAllReduce) << hlo->ToString();
if (hlo->operand_count() != 1) {
continue;
}
const PrimitiveType type = hlo->shape().element_type();
const HloOpcode child_op = hlo->operand(0)->opcode();
const PrimitiveType child_type = hlo->operand(0)->shape().element_type();
if (op == HloOpcode::kSin) {
EXPECT_EQ(child_op, HloOpcode::kRsqrt);
} else if (op == HloOpcode::kConvert && type == F32) {
EXPECT_EQ(child_op, HloOpcode::kCos);
EXPECT_EQ(child_type, F16);
} else if (op == HloOpcode::kCos) {
EXPECT_EQ(child_op, HloOpcode::kConvert);
EXPECT_EQ(child_type, F16);
}
}
}
}
TEST_P(SpmdPartitioningTest, BroadcastAsReplicate) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[1,1] parameter(0), sharding={devices=[2,2]<=[4]}
ROOT %copy = f32[1,1] copy(%param0), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(0), op::Shape("f32[1,1]"));
EXPECT_THAT(root, AllOf(op::Copy(op::AllReduce(op::Select(_, param0, _))),
op::Shape("f32[1,1]")));
}
TEST_P(SpmdPartitioningTest, BroadcastAsReplicate2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[1,2] parameter(0), sharding={devices=[2,2]<=[4]}
ROOT %copy = f32[1,2] copy(%param0), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(0), op::Shape("f32[1,1]"));
auto broadcast =
AllOf(op::AllReduce(op::Select(_, param0, _)), op::Shape("f32[1,1]"));
EXPECT_THAT(
root,
AllOf(op::Copy(op::AllReduce(op::DynamicUpdateSlice(_, broadcast, _, _))),
op::Shape("f32[1,2]")));
}
TEST_P(SpmdPartitioningTest, BroadcastAsReplicate3) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[1,1] parameter(0),
sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
ROOT %copy = f32[1,1] copy(%param0), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
auto param0 = AllOf(op::Parameter(0), op::Shape("f32[1,1]"));
EXPECT_THAT(root, AllOf(op::Copy(op::AllReduce(op::Select(_, param0, _))),
op::Shape("f32[1,1]")));
}
TEST_P(SpmdPartitioningTest, TupleWithSubgroupManual) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[6,3]{1,0}
constant({{1,3,7},{5,1,4},{1,2,8},{2,3,7},{5,2,4},{2,2,8}}),
sharding={replicated}
param = (f32[6,3]{1,0}, f32[]) parameter(0),
sharding={{devices=[2,1,2]<=[4] last_tile_dims={manual}},{replicated}}
gte = f32[6,3]{1,0} get-tuple-element(param), index=0,
sharding={devices=[2,1,2]<=[4] last_tile_dims={manual}}
ROOT tuple = (f32[6,3]{1,0}, f32[6,3]{1,0}) tuple(constant, gte),
sharding={{replicated},{devices=[2,1,2]<=[4] last_tile_dims={manual}}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::Tuple(op::Constant(), op::GetTupleElement(op::Parameter(0))));
}
TEST_P(SpmdPartitioningTest, SubgroupManualSharedOperand) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
constant = f32[] constant(1), sharding={replicated}
broadcast = f32[2,2] broadcast(constant), dimensions={},
sharding={devices=[2,1,2]<=[4] last_tile_dims={manual}}
ROOT add = f32[2,2] add(broadcast, broadcast),
sharding={devices=[2,1,2]<=[4] last_tile_dims={manual}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Add(op::Broadcast(op::Constant()),
op::Broadcast(op::Constant())));
}
TEST_P(SpmdPartitioningTest, SubgroupManualAllReduce) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
param = f32[2,2] parameter(0),
sharding={devices=[2,1,2]0,2,1,3 last_tile_dims={manual}}
ROOT all-reduce = f32[2,2]{1,0} all-reduce(param), to_apply=sum,
replica_groups={{2,0},{1,3}}, use_global_device_ids=true, channel_id=1,
sharding={devices=[2,1,2]0,2,1,3 last_tile_dims={manual}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::AllReduce(op::Parameter(0)), op::Shape("f32[1,2]")));
EXPECT_EQ(root->replica_groups().size(), 2);
}
TEST_P(SpmdPartitioningTest, SubgroupIllegalManualAllReduce) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
param = f32[2,2] parameter(0),
sharding={devices=[2,1,2]0,2,1,3 last_tile_dims={manual}}
ROOT all-reduce = f32[2,2]{1,0} all-reduce(param), to_apply=sum,
replica_groups={{1,0},{2,3}}, use_global_device_ids=true, channel_id=1,
sharding={devices=[2,1,2]0,2,1,3 last_tile_dims={manual}}
}
)";
auto module_status = PartitionComputation(hlo_string, 4);
EXPECT_FALSE(module_status.status().ok());
EXPECT_THAT(module_status.status().ToString(),
::testing::HasSubstr("Manual all-reduce across devices that "
"belong to different manual subgroups"));
}
TEST_P(SpmdPartitioningTest, AllReduceNoSharding) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
param = f32[2,2] parameter(0), sharding={devices=[2,2]<=[4]}
ROOT all-reduce = f32[2,2]{1,0} all-reduce(param), to_apply=sum,
replica_groups={{0,1,2,3}}, use_global_device_ids=true, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::AllReduce(), op::Shape("f32[2,2]")));
EXPECT_EQ(root->replica_groups().size(), 1);
}
TEST_P(SpmdPartitioningTest, SubgroupManualReduce) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
constant = f32[] constant(0),
sharding={devices=[2,2]<=[4] last_tile_dims={manual,replicated}}
param = f32[2,2] parameter(0),
sharding={devices=[2,1,2]0,2,1,3 last_tile_dims={manual}}
ROOT reduce = f32[2] reduce(param, constant), dimensions={0}, to_apply=sum,
sharding={devices=[1,2,2]<=[4] last_tile_dims={manual,replicated}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::AllReduce(op::Reduce(op::Parameter(0), op::Constant())));
EXPECT_EQ(root->replica_groups().size(), 2);
}
TEST_P(SpmdPartitioningTest, ScatterPreferUpdateIndexIfSmaller) {
absl::string_view hlo_string = R"(
HloModule module
%scatter_add_reducer__33.191857 (parameter.191858: bf16[], parameter.191859: bf16[]) -> bf16[] {
%parameter.191858 = bf16[] parameter(0)
%parameter.191859 = bf16[] parameter(1)
ROOT %add.4425 = bf16[] add(bf16[] %parameter.191858, bf16[] %parameter.191859)
}
ENTRY entry {
p1 = s32[2048,1024,1]{2,1,0} parameter(0)
p2 = bf16[2048,1024,2040]{2,1,0} parameter(1)
%constant.8635 = bf16[] constant(0)
%broadcast.21781 = bf16[50048,2040]{1,0} broadcast(bf16[] %constant.8635), dimensions={},
sharding={devices=[1,2,4]<=[8] last_tile_dim_replicate}
%select.1954 = s32[2048,1024,1]{2,1,0} copy(%p1), sharding={devices=[4,1,1,2]<=[8] last_tile_dim_replicate}
%slice.1274 = bf16[2048,1024,2040]{2,1,0} copy(%p2),
sharding={devices=[4,1,1,2]<=[8] last_tile_dim_replicate}
%scatter.34 = bf16[50048,2040]{1,0} scatter(bf16[50048,2040]{1,0} %broadcast.21781,
s32[2048,1024,1]{2,1,0} %select.1954, bf16[2048,1024,2040]{2,1,0} %slice.1274),
update_window_dims={2}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0},
index_vector_dim=2, to_apply=%scatter_add_reducer__33.191857,
sharding={devices=[1,2,4]<=[8] last_tile_dim_replicate}
ROOT c = bf16[50048,2040]{1,0} copy(scatter.34),
sharding={replicated}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, op::Copy(op::AllReduce(op::DynamicUpdateSlice(
_,
op::CollectivePermute(op::AllReduce(op::Scatter(
op::Shape("bf16[50048,1020]"), op::Shape("s32[512,1024,1]"),
op::Shape("bf16[512,1024,1020]")))),
_, _))));
}
TEST_P(SpmdPartitioningTest, ScatterPreferTrivialIfSmallerThanIndices) {
absl::string_view hlo_string = R"(
HloModule module
%scatter_add_reducer__33.191857 (parameter.191858: bf16[], parameter.191859: bf16[]) -> bf16[] {
%parameter.191858 = bf16[] parameter(0)
%parameter.191859 = bf16[] parameter(1)
ROOT %add.4425 = bf16[] add(bf16[] %parameter.191858, bf16[] %parameter.191859)
}
ENTRY entry {
p1 = s32[32,512,3]{2,1,0} parameter(0)
p2 = bf16[32,512]{1,0} parameter(1)
%constant.8635 = bf16[] constant(0)
%broadcast.21781 = bf16[32,512,50001]{2,1,0} broadcast(bf16[] %constant.8635), dimensions={},
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%select.1954 = s32[32,512,3]{2,1,0} copy(%p1), sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
%slice.1274 = bf16[32,512]{1,0} copy(%p2),
sharding={devices=[1,4,2]<=[8] last_tile_dim_replicate}
%scatter.34 = bf16[32,512,50001]{2,1,0} scatter(bf16[32,512,50001]{2,1,0} %broadcast.21781,
s32[32,512,3]{2,1,0} %select.1954, bf16[32,512]{1,0} %slice.1274),
update_window_dims={}, inserted_window_dims={0,1,2}, scatter_dims_to_operand_dims={0,1,2},
index_vector_dim=2, to_apply=%scatter_add_reducer__33.191857,
sharding={devices=[1,4,1,2]<=[8] last_tile_dim_replicate}
ROOT c = bf16[32,512,50001]{2,1,0} copy(scatter.34),
sharding={replicated}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::Copy(op::AllReduce(op::DynamicUpdateSlice(
_,
op::AllReduce(op::Scatter(op::Shape("bf16[32,128,50001]"),
op::Shape("s32[32,256,3]"),
op::Shape("bf16[32,256]"))),
_, _, _))));
}
TEST_P(SpmdPartitioningTest, GatherOperandPassthroughIndexPassthrough) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9] parameter(0), sharding={replicated}
%indices = s32[7] parameter(1), sharding={replicated}
%input.copy = f32[2,9] copy(%input), sharding={devices=[1,2,2]1,0,3,2 last_tile_dim_replicate}
%indices.copy = s32[7] copy(%indices), sharding={devices=[2,2]1,2,3,0 last_tile_dim_replicate}
%gather = f32[7,9] gather(%input.copy, %indices.copy), offset_dims={1},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,
slice_sizes={1,9}, sharding={devices=[2,2]<=[4]}
ROOT %copy = f32[7,9] copy(%gather), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const HloInstruction* gather = FindInstruction(module.get(), "gather.1");
EXPECT_NE(gather, nullptr);
EXPECT_THAT(gather,
AllOf(op::Shape("f32[4,5]"),
op::Gather(op::Shape("f32[2,5]"), op::Shape("s32[4]"))));
}
TEST_P(SpmdPartitioningTest, GatherIndexPassthroughTrivialSlice) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[17,9] parameter(0)
%indices = s32[2,3] parameter(1)
%input.copy = f32[17,9] copy(%input), sharding={devices=[2,1,2]3,2,1,0 last_tile_dim_replicate}
%indices.copy = s32[2,3] copy(%indices), sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
%gather = f32[2,3,9] gather(%input.copy, %indices.copy), offset_dims={2},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2,
slice_sizes={1,9}, sharding={devices=[2,1,1,2]1,0,3,2 last_tile_dim_replicate}
ROOT %copy = f32[2,3,9] copy(%gather), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
const HloInstruction* gather = FindInstruction(module.get(), "gather.1");
EXPECT_NE(gather, nullptr);
EXPECT_THAT(gather,
AllOf(op::Shape("f32[1,3,9]"),
op::Gather(op::Shape("f32[9,9]"), op::Shape("s32[1,3]"))));
}
TEST_P(SpmdPartitioningTest, GatherReplicatedCorrectOutput) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[64,2,250112] parameter(0), sharding={devices=[16,1,2]<=[32]}
%indices = s32[10,1] parameter(1), sharding={replicated}
%input.copy = f32[64,2,250112] copy(%input), sharding={
devices=[16,1,2]<=[32]}
%indices.copy = s32[10,1] copy(%indices), sharding={replicated}
%gather = f32[64,2,10] gather(f32[64,2,250112] %input,
s32[10,1]{1,0} %indices.copy), offset_dims={0,1}, collapsed_slice_dims={2},
start_index_map={2}, index_vector_dim=1, slice_sizes={64,2,1},
sharding={devices=[16,1,1,2]<=[32] last_tile_dim_replicate}
ROOT %copy = (f32[64,2,10]) tuple(gather),
sharding={{devices=[16,1,1,2]<=[32] last_tile_dim_replicate}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 32));
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Shape("(f32[4,2,10])"));
}
TEST_P(SpmdPartitioningTest, GatherTrivialRestoreSharding) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = bf16[250112,4096] parameter(0), sharding={replicated}
%cpy.input = bf16[250112,4096] copy(%input), sharding={devices=[32,1]<=[32]}
%indices = s32[64,1,1] parameter(1), sharding={replicated}
%cpy.indices = s32[64,1,1] copy(%indices), sharding={replicated}
%gather = bf16[64,1,4096] gather(bf16[250112,4096] %cpy.input, s32[64,1,1] %cpy.indices),
offset_dims={2}, collapsed_slice_dims={0}, start_index_map={0},
index_vector_dim=2, slice_sizes={1,4096}, sharding={replicated}
ROOT %copy = bf16[64,1,4096] copy(gather), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 32));
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Shape("bf16[64,1,4096]"));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(op::AllReduce(op::Select(
_, _, op::Gather(op::Shape("bf16[7816,4096]"), _)))));
}
TEST_P(SpmdPartitioningTest, SliceTo1) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[512] parameter(0), sharding={devices=[4]<=[4]}
ROOT slice.134 = f32[1] slice(input), slice={[0:1]},
sharding={devices=[4]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(op::Slice(op::Parameter()), op::Shape("f32[1]")));
}
TEST_P(SpmdPartitioningTest, SliceTo1_8Shards) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[4,4] parameter(0), sharding={devices=[4,2]<=[8]}
ROOT %slice = f32[1,4] slice(%input), slice={[0:1], [0:4]},
sharding={devices=[4,2]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(op::Copy(op::Parameter()), op::Shape("f32[1,2]")));
}
TEST_P(SpmdPartitioningTest, SliceTo1PartialReplicate) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[16] parameter(0),
sharding={devices=[2,2]<=[4] last_tile_dim_replicate}
ROOT slice.134 = f32[1] slice(input), slice={[0:1]},
sharding={devices=[2,2]<=[4] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(op::Slice(op::Parameter()), op::Shape("f32[1]")));
}
TEST_P(SpmdPartitioningTest, SliceTo2) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[512] parameter(0), sharding={devices=[4]<=[4]}
ROOT slice.134 = f32[2] slice(input), slice={[0:2]},
sharding={devices=[4]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto slice1 = AllOf(op::Slice(op::Parameter()), op::Shape("f32[2]"));
auto halo =
op::CollectivePermute(AllOf(op::Slice(slice1), op::Shape("f32[1]")));
auto slice_self = AllOf(op::Slice(slice1), op::Shape("f32[1]"));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Copy(AllOf(op::DynamicSlice(op::Concatenate(halo, slice_self), _),
op::Shape("f32[1]"))));
}
TEST_P(SpmdPartitioningTest, SliceToMiddle2) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[512] parameter(0), sharding={devices=[8]<=[8]}
ROOT %slice = f32[2] slice(input), slice={[300:302]},
sharding={devices=[8]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
auto slice = AllOf(op::Slice(op::Parameter()), op::Shape("f32[2]"));
auto halo_slice = AllOf(op::Slice(slice), op::Shape("f32[1]"));
auto halo = AllOf(op::CollectivePermute(halo_slice), op::Shape("f32[1]"));
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(op::Select(_, halo, halo)));
}
TEST_P(SpmdPartitioningTest, SliceToMiddle2PartiallyReplicated) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[512] parameter(0),
sharding={devices=[8,2]<=[16] last_tile_dim_replicate}
ROOT %slice = f32[2] slice(input), slice={[300:302]},
sharding={devices=[8,2]<=[16] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 16));
auto slice = AllOf(op::Slice(op::Parameter()), op::Shape("f32[2]"));
auto halo_slice = AllOf(op::Slice(slice), op::Shape("f32[1]"));
auto halo = AllOf(op::CollectivePermute(halo_slice), op::Shape("f32[1]"));
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(op::Select(_, halo, halo)));
}
TEST_P(SpmdPartitioningTest, SliceToHalfSize) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[32] parameter(0), sharding={devices=[16]<=[16]}
ROOT %slice = f32[16] slice(input), slice={[0:16]},
sharding={devices=[16]<=[16]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 16));
VLOG(1) << module->ToString();
auto piece1 =
AllOf(op::Pad(op::CollectivePermute(op::Slice(op::Parameter())), _),
op::Shape("f32[2]"));
auto piece2 =
op::Select(_, op::CollectivePermute(op::Parameter()), op::Parameter());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(op::DynamicSlice(op::Select(_, piece1, piece2), _)));
}
TEST_P(SpmdPartitioningTest, PadToDoubleSize) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[16] parameter(0), sharding={devices=[16]<=[16]}
%pv = f32[] constant(-1)
ROOT %pad = f32[32] pad(input, pv), padding=0_16,
sharding={devices=[16]<=[16]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 16));
VLOG(1) << module->ToString();
auto cp1 = op::CollectivePermute(op::Parameter(0));
auto cp2 = op::CollectivePermute(op::Parameter(0));
auto piece1 = op::Select(_, cp1, op::Parameter(0));
auto piece2 = op::Select(_, cp2, cp1);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Select(_, op::Concatenate(piece1, piece2),
op::Broadcast(op::Constant())));
}
TEST_P(SpmdPartitioningTest, PadAllPadvalue) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[16] parameter(0), sharding={devices=[16]<=[16]}
%pv = f32[] constant(-1)
ROOT %pad = f32[16] pad(input, pv), padding=16_-16,
sharding={devices=[16]<=[16]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 16));
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(op::Broadcast(op::Constant()), op::Shape("f32[1]")));
}
TEST_P(SpmdPartitioningTest, PadFrom1To24) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[1] parameter(0), sharding={devices=[8]<=[8]}
%pv = f32[] constant(-1)
ROOT %pad = f32[24] pad(input, pv), padding=3_20,
sharding={devices=[8]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto cp = op::CollectivePermute(op::Parameter(0));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
AllOf(op::Shape("f32[3]"),
op::Select(_, op::Concatenate(cp, op::Broadcast(op::Constant())),
op::Broadcast(op::Constant()))));
}
TEST_P(SpmdPartitioningTest, SliceToLessThanHalf) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[100,2] parameter(0), sharding={devices=[2,1]0,1}
ROOT slice.20 = f32[6,2] slice(input), slice={[0:6], [0:2]}, sharding={devices=[2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto cp = op::CollectivePermute(op::Slice(op::Parameter(0)));
auto self = op::Slice(op::Parameter(0));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(op::Select(_, cp, self)));
}
TEST_P(SpmdPartitioningTest, PartialDusReplicate) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[3,2] parameter(0), sharding={devices=[8,2]<=[16]}
ROOT %copy = f32[3,2] copy(input), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 16));
VLOG(1) << module->ToString();
auto dus =
AllOf(op::Shape("f32[3,2]"),
op::DynamicUpdateSlice(op::Broadcast(),
op::Select(_, op::Parameter(0), _), _, _));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(AllOf(op::AllReduce(op::AllReduce(dus)))));
}
TEST_P(SpmdPartitioningTest, GatherPassthrough) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
p = f32[16,64,768,768]{3,2,1,0} parameter(0), sharding={replicated}
c = f32[16,64,768,768]{3,2,1,0} copy(p), sharding={devices=[1,4,1,1]<=[4]}
constant.1669 = s32[] constant(0)
iota.1012 = s32[6]{0} iota(), iota_dimension=0, sharding={replicated}
constant.1748 = s32[] constant(128), sharding={replicated}
broadcast.2642 = s32[6]{0} broadcast(constant.1748), dimensions={}, sharding={replicated}
multiply.92 = s32[6]{0} multiply(iota.1012, broadcast.2642), sharding={replicated}
broadcast.2643 = s32[2,6]{1,0} broadcast(multiply.92), dimensions={1}, sharding={replicated}
transpose.542 = s32[6,2]{0,1} transpose(broadcast.2643), dimensions={1,0}, sharding={replicated}
pad.19 = s32[6,4]{1,0} pad(transpose.542, constant.1669), padding=0_0x2_0, sharding={replicated}
ROOT gather.1 = f32[16,64,6,128,128]{4,3,2,1,0} gather(c, pad.19), offset_dims={0,1,3,4}, collapsed_slice_dims={}, start_index_map={0,1,2,3}, index_vector_dim=1, slice_sizes={16,64,128,128}, sharding={devices=[1,4,1,1,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Gather(), op::Shape("f32[16,16,6,128,128]")));
}
TEST_P(SpmdPartitioningTest, ComplexReshardFromPartialReplicate) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%p = f32[4,15,4,16] parameter(0)
%p.copy = f32[4,15,4,16] copy(p),
sharding={devices=[1,1,1,2,4]<=[4,2]T(1,0) last_tile_dim_replicate}
%a = f32[4,15,4,16] add(p.copy, p.copy),
sharding={devices=[1,1,1,2,4]<=[4,2]T(1,0) last_tile_dim_replicate}
ROOT %c2 = f32[4,15,4,16] copy(a), sharding={devices=[1,8,1,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Copy(op::Reshape(op::Reshape(op::Transpose(op::AllToAll(_))))));
}
TEST_P(SpmdPartitioningTest, ComplexReshardToPartialReplicate) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%p = f32[4,15,4,16] parameter(0)
%p.copy = f32[4,15,4,16] copy(p),
sharding={devices=[1,4,2,1]<=[8]}
%a = f32[4,15,4,16] add(p.copy, p.copy),
sharding={devices=[1,4,2,1]<=[8]}
ROOT %c2 = f32[4,15,4,16] copy(a), sharding={devices=[1,1,1,2,4]<=[4,2]T(1,0) last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(op::Reshape(op::Transpose(op::AllToAll(_)))));
}
TEST_P(SpmdPartitioningTest, ComplexReshardMoveMergeDimensionRight) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%p = f32[4,15,4,15] parameter(0)
%p.copy = f32[4,15,4,15] copy(p),
sharding={devices=[1,4,1,2]<=[8]}
%a = f32[4,15,4,15] add(p.copy, p.copy),
sharding={devices=[1,4,1,2]<=[8]}
ROOT %c2 = f32[4,15,4,15] copy(a), sharding={devices=[1,1,1,8]<=[4,2]T(1,0)}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(op::Reshape(
op::Slice(op::Reshape(op::Transpose(op::AllToAll(_)))))));
}
TEST_P(SpmdPartitioningTest, ComplexReshardMoveMergeDimensionLeft) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%p = f32[2,15,1,2] parameter(0)
%p.copy = f32[2,15,1,2] copy(p),
sharding={devices=[1,4,1,2]<=[8]}
%a = f32[2,15,1,2] add(p.copy, p.copy),
sharding={devices=[1,4,1,2]<=[8]}
ROOT %c2 = f32[2,15,1,2] copy(a), sharding={devices=[1,8,1,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Copy(op::Reshape(op::Reshape(op::Transpose(op::AllToAll(_))))));
}
TEST_P(SpmdPartitioningTest, ComplexReshardMoveMergeDimensionLeftReorder) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%p = f32[4,15,4,16] parameter(0)
%p.copy = f32[4,15,4,16] copy(p),
sharding={devices=[1,4,1,2]<=[8]}
%a = f32[4,15,4,16] add(p.copy, p.copy),
sharding={devices=[1,4,1,2]<=[8]}
ROOT %c2 = f32[4,15,4,16] copy(a), sharding={devices=[1,8,1,1]<=[4,2]T(1,0)}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(op::Reshape(op::CollectivePermute(
op::Reshape(op::Transpose(op::AllToAll(_)))))));
}
TEST_P(SpmdPartitioningTest, PaddedConvReshard) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%p = bf16[16,256,256,384]{3,2,1,0} parameter(0)
%p2 = bf16[3,3,384,384]{3,2,1,0} parameter(1)
%p.copy = bf16[16,256,256,384]{3,2,1,0} copy(%p), sharding={devices=[2,1,4,1]<=[8]}
%p2.copy = bf16[3,3,384,384]{3,2,1,0} copy(%p2), sharding={replicated}
ROOT %convolution.10115 = bf16[16,256,256,384]{3,2,1,0} convolution(%p.copy, %p2.copy), window={size=3x3 pad=128_128x128_128 rhs_dilate=128x128}, dim_labels=b01f_01io->b01f, sharding={devices=[2,1,4,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Convolution(
op::DynamicSlice(op::Pad(_, op::Constant()), _, _, _, _), _));
}
TEST_P(SpmdPartitioningTest, KeepPartitionedNonSlicedDimension) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%p = bf16[16,128,128,384]{3,2,1,0} parameter(0), sharding={replicated}
%constant.1165 = s32[] constant(0), sharding={replicated}
constant.1151 = s32[] constant(192), sharding={replicated}
broadcast.1152 = s32[2]{0} broadcast(constant.1151), dimensions={}, sharding={replicated}
slice.1576 = s32[1]{0} slice(broadcast.1152), slice={[0:1]}, sharding={replicated}
reshape.1888 = s32[] reshape(slice.1576), sharding={replicated}
slice.1546 = s32[1]{0} slice(broadcast.1152), slice={[1:2]}, sharding={replicated}
reshape.1890 = s32[] reshape(slice.1546), sharding={replicated}
constant.861 = bf16[] constant(0), sharding={replicated}
broadcast.862 = bf16[16,512,512,384]{3,2,1,0} broadcast(constant.861), dimensions={}, sharding={devices=[2,2,1,1]<=[4]}
%c = bf16[16,128,128,384]{3,2,1,0} copy(p), sharding={devices=[2,2,1,1]<=[4]}
add.228 = bf16[16,128,128,384]{3,2,1,0} add(c, c), sharding={devices=[2,2,1,1]<=[4]}
ROOT dynamic-update-slice.111 = bf16[16,512,512,384]{3,2,1,0} dynamic-update-slice(broadcast.862, add.228, constant.1165, reshape.1888, reshape.1890, constant.1165), sharding={devices=[2,2,1,1]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::DynamicSlice(AllOf(op::DynamicUpdateSlice(),
op::Shape("bf16[8,512,512,384]")),
_, _, _, _));
}
TEST_P(SpmdPartitioningTest,
KeepPartitionedNonSlicedDimensionWithConstantIndices) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
p1 = bf16[16,192,192,384]{3,2,1,0} parameter(0), sharding={replicated}
p2 = bf16[16,128,128,384]{3,2,1,0} parameter(1), sharding={replicated}
c1 = bf16[16,192,192,384]{3,2,1,0} copy(p1), sharding={devices=[2,2,2,1]<=[8]}
c2 = bf16[16,128,128,384]{3,2,1,0} copy(p2), sharding={devices=[2,2,2,1]<=[8]}
constant.1163 = bf16[] constant(0), sharding={replicated}
constant.1165 = s32[] constant(0), sharding={replicated}
pad.179 = bf16[16,224,224,384]{3,2,1,0} pad(c1, constant.1163), padding=0_0x16_16x16_16x0_0, sharding={devices=[2,2,2,1]<=[8]}
add.439 = bf16[16,128,128,384]{3,2,1,0} add(c2, c2), sharding={devices=[2,2,2,1]<=[8]}
constant.1070 = s32[] constant(48), sharding={replicated}
dynamic-update-slice.128 = bf16[16,224,224,384]{3,2,1,0} dynamic-update-slice(pad.179, add.439, constant.1165, constant.1070, constant.1070, constant.1165), sharding={devices=[2,2,2,1]<=[8]}
ROOT c = bf16[16,224,224,384]{3,2,1,0} copy(dynamic-update-slice.128), sharding={devices=[2,2,2,1]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Copy(op::DynamicSlice(
AllOf(op::DynamicUpdateSlice(), op::Shape("bf16[8,224, 224,384]")), _,
_, _, _)));
}
TEST_P(SpmdPartitioningTest, CustomCallManualSharding) {
const char* const hlo_string = R"(
HloModule pjit_xmap_dummy.5
ENTRY %main.21 (Arg_0.1: f32[4,4,8], Arg_1.2: f32[4,8]) -> (f32[4,4,8], f32[4]) {
%Arg_0.1 = f32[4,4,8]{2,1,0} parameter(0), sharding={devices=[4,1,1]<=[4]}
%copy.3 = f32[4,4,8]{2,1,0} copy(f32[4,4,8]{2,1,0} %Arg_0.1), sharding={devices=[4,1,1]<=[4]}
%custom-call.4 = f32[1,4,8]{2,1,0} custom-call(f32[4,4,8]{2,1,0} %copy.3), custom_call_target="SPMDFullToShardShape", sharding={manual}
%reshape.7 = f32[4,8]{1,0} reshape(f32[1,4,8]{2,1,0} %custom-call.4), sharding={manual}
%Arg_1.2 = f32[4,8]{1,0} parameter(1), sharding={replicated}
%copy.2 = f32[4,8]{1,0} copy(f32[4,8]{1,0} %Arg_1.2), sharding={replicated}
%custom-call.6 = f32[4,8]{1,0} custom-call(f32[4,8]{1,0} %copy.2), custom_call_target="SPMDFullToShardShape", sharding={manual}
%custom-call.8 = (f32[4,8]{1,0}, f32[1]{0}) custom-call(f32[4,8]{1,0} %reshape.7, f32[4,8]{1,0} %custom-call.6), custom_call_target="dummy", operand_layout_constraints={f32[4,8]{1,0}, f32[4,8]{1,0}}, api_version=API_VERSION_STATUS_RETURNING, sharding={{manual}, {manual}}
%get-tuple-element.9 = f32[4,8]{1,0} get-tuple-element((f32[4,8]{1,0}, f32[1]{0}) %custom-call.8), index=0, sharding={manual}
%reshape.11 = f32[1,4,8]{2,1,0} reshape(f32[4,8]{1,0} %get-tuple-element.9), sharding={manual}
%copy.1 = f32[1,4,8]{2,1,0} copy(f32[1,4,8]{2,1,0} %reshape.11), sharding={manual}
%custom-call.14 = f32[4,4,8]{2,1,0} custom-call(f32[1,4,8]{2,1,0} %copy.1), custom_call_target="SPMDShardToFullShape", sharding={devices=[4,1,1]<=[4]}
%reshape.18 = f32[4,4,8]{2,1,0} reshape(f32[4,4,8]{2,1,0} %custom-call.14), sharding={devices=[4,1,1]<=[4]}
%get-tuple-element.10 = f32[1]{0} get-tuple-element((f32[4,8]{1,0}, f32[1]{0}) %custom-call.8), index=1, sharding={manual}
%reshape.12 = f32[1,1]{1,0} reshape(f32[1]{0} %get-tuple-element.10), sharding={manual}
%copy = f32[1,1]{1,0} copy(f32[1,1]{1,0} %reshape.12), sharding={manual}
%custom-call.16 = f32[4,1]{1,0} custom-call(f32[1,1]{1,0} %copy), custom_call_target="SPMDShardToFullShape", sharding={devices=[4,1]<=[4]}
%reshape.17 = f32[4]{0} reshape(f32[4,1]{1,0} %custom-call.16), sharding={devices=[4]<=[4]}
%reshape.19 = f32[4]{0} reshape(f32[4]{0} %reshape.17), sharding={devices=[4]<=[4]}
ROOT %tuple.20 = (f32[4,4,8]{2,1,0}, f32[4]{0}) tuple(f32[4,4,8]{2,1,0} %reshape.18, f32[4]{0} %reshape.19), sharding={{replicated}, {replicated}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::DynamicUpdateSlice(
_, op::Shape("f32[1,4,8]"), _, _, _)),
op::AllReduce(op::DynamicUpdateSlice(
_, op::Shape("f32[1]"), _))));
}
TEST_P(SpmdPartitioningTest, UnevenPadAllToAllReshard) {
const char* const hlo_string = R"(
HloModule pjit_xmap_dummy.5
ENTRY %main.21 {
%Arg_0.1 = f32[19,19]{1,0} parameter(0), sharding={devices=[4,2]<=[8]}
%add.3171 = f32[19,19]{1,0} add(%Arg_0.1, %Arg_0.1), sharding={devices=[4,2]<=[8]}
%transpose.3172 = f32[19,19]{0,1} transpose(%add.3171), dimensions={1,0}, sharding={devices=[2,4]<=[4,2]T(1,0)}
ROOT %add.3173 = f32[19,19]{1,0} add(%add.3171, %transpose.3172), sharding={devices=[4,2]<=[8]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
int64_t collective_permute_count = 0;
for (auto* i : module->entry_computation()->instructions()) {
if (i->opcode() == HloOpcode::kCollectivePermute) {
++collective_permute_count;
}
}
EXPECT_EQ(collective_permute_count, 1);
}
TEST_P(SpmdPartitioningTest, UnevenPadAllToAllReshard2) {
const char* const hlo_string = R"(
HloModule pjit_xmap_dummy.5
ENTRY %main.21 {
%Arg_0.1 = f32[5,5]{1,0} parameter(0), sharding={devices=[4,2]<=[8]}
add.3171 = f32[5,5]{1,0} add(Arg_0.1, Arg_0.1), sharding={devices=[4,2]<=[8]}
transpose.3172 = f32[5,5]{0,1} transpose(add.3171), dimensions={1,0}, sharding={devices=[2,4]<=[4,2]T(1,0)}
ROOT add.3173 = f32[5,5]{1,0} add(add.3171, transpose.3172), sharding={devices=[4,2]<=[8]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
int64_t collective_permute_count = 0;
for (auto* i : module->entry_computation()->instructions()) {
if (i->opcode() == HloOpcode::kCollectivePermute) {
++collective_permute_count;
}
}
EXPECT_EQ(collective_permute_count, 3);
}
TEST_P(SpmdPartitioningTest, CustomCallShardingRegistration) {
class BatchableCustomCallPartitioner : public CustomCallPartitioner {
public:
HloSharding PropagateUserSharding(
const HloInstruction* instruction, const HloInstruction* user,
const HloSharding& sharding) const override {
return sharding;
}
std::optional<HloSharding> InferShardingFromOperands(
const HloInstruction* instruction) const override {
if (instruction->operand(0)->has_sharding()) {
return instruction->operand(0)->sharding();
}
return std::nullopt;
}
bool IsCustomCallShardable(
const HloInstruction* instruction) const override {
return true;
}
absl::Status Partition(spmd::SpmdPartitioningVisitor* partitioner,
HloInstruction* hlo) const override {
if (hlo->shape().rank() <= 2) {
return partitioner->DefaultAction(hlo);
}
const int first_non_batch_dim = hlo->shape().rank() - 2;
HloInstruction* operand = hlo->mutable_operand(0);
HloSharding target_sharding =
hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
hlo->sharding(), {first_non_batch_dim, first_non_batch_dim + 1});
spmd::PartitionedHlo operand_partitioned =
partitioner->GetPartitionedHlo(operand).Reshard(target_sharding);
HloCustomCallInstruction* custom_call =
Cast<HloCustomCallInstruction>(hlo);
Shape partitioned_shape_with_layout_constraint =
operand_partitioned.hlo()->shape();
(*partitioned_shape_with_layout_constraint.mutable_layout()) =
custom_call->operand_shapes_with_layout()[0].layout();
HloInstruction* partitioned_hlo = partitioner->builder()->AddInstruction(
HloInstruction::CreateCustomCall(
operand_partitioned.hlo()->shape(), {operand_partitioned.hlo()},
"BatchableCustomCall",
{partitioned_shape_with_layout_constraint}));
partitioned_hlo->set_sharding(target_sharding);
spmd::PartitionedHlo result_partitioned =
spmd::PartitionedHlo(partitioned_hlo,
operand_partitioned.base_shape(),
operand_partitioned.state())
.Reshard(hlo->sharding());
partitioner->SetPartitionedHlo(hlo, result_partitioned);
return absl::OkStatus();
}
};
RegisterCustomCallPartitioner(
"BatchableCustomCall",
std::make_unique<BatchableCustomCallPartitioner>());
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%p = f32[102,128,128]{2,1,0:T(8,128)} parameter(0), sharding={devices=[2,1,2]<=[4]}
ROOT custom-call = f32[102,128,128]{2,1,0:T(8,128)} custom-call(p), custom_call_target="BatchableCustomCall", operand_layout_constraints={f32[102,128,128]{2,1,0}}, sharding={devices=[2,1,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 4));
VLOG(1) << module->ToString();
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::DynamicSlice(
AllOf(op::CustomCall(_), op::Shape("f32[51,128,128]")),
_, _, _));
}
TEST_P(SpmdPartitioningTest, ManualGetTupleElement) {
const char* const hlo_string = R"(
HloModule pjit
orclone {
lhs.1 = u32[] parameter(0)
rhs.1 = u32[] parameter(2)
or.2 = u32[] or(lhs.1, rhs.1)
lhs.0 = u32[] parameter(1)
rhs.0 = u32[] parameter(3)
or.3 = u32[] or(lhs.0, rhs.0)
ROOT tuple.4 = (u32[], u32[]) tuple(or.2, or.3)
}
ENTRY %main.21 {
select.104 = u32[2,2]{1,0} parameter(0), sharding={manual}
shift-left.5 = u32[2,2]{1,0} parameter(1), sharding={manual}
constant.4183 = u32[] constant(0), sharding={manual}
reduce.1 = (u32[2]{0}, u32[2]{0}) reduce(shift-left.5, select.104, constant.4183, constant.4183), dimensions={1}, sharding={{manual},{manual}}, to_apply=orclone
ROOT get-tuple-element.13 = u32[2]{0} get-tuple-element(reduce.1), index=0, sharding={manual}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::GetTupleElement(op::Reduce(_, _, _, _)));
}
TEST_P(SpmdPartitioningTest, CombiningScatterPartitiong) {
const char* const hlo_string = R"(
HloModule pjit
region_110.8267 {
Arg_0.8268 = bf16[] parameter(0)
Arg_1.8269 = bf16[] parameter(1)
ROOT add.8270 = bf16[] add(Arg_0.8268, Arg_1.8269)
}
ENTRY %main.21 {
broadcast.8659 = bf16[2,8,12288,192,64]{4,3,2,1,0} parameter(0), sharding={devices=[2,1,2,4,1]<=[16]}
reshape.9796 = bf16[2,1,12288,192,64]{4,3,2,1,0} parameter(1), sharding={devices=[2,1,2,4,1]<=[16]}
iota.50 = s32[2,1]{1,0} iota(), iota_dimension=0, sharding={devices=[2,1,8]<=[16] last_tile_dim_replicate}
constant.1585 = s32[] constant(0), sharding={replicated}
broadcast.3764 = s32[2,1]{1,0} broadcast(constant.1585), dimensions={}, sharding={devices=[2,1,8]<=[16] last_tile_dim_replicate}
reshape_idx = s32[2,1]{1,0} parameter(2), sharding={devices=[2,1,8]<=[16] last_tile_dim_replicate}
concatenate.8907 = s32[2,5]{1,0} concatenate(iota.50, reshape_idx, broadcast.3764, broadcast.3764, broadcast.3764), dimensions={1}, sharding={devices=[2,1,8]<=[16] last_tile_dim_replicate}
scatter.9797 = bf16[2,8,12288,192,64]{4,3,2,1,0} scatter(broadcast.8659, concatenate.8907, reshape.9796), update_window_dims={1,2,3,4}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0,1,2,3,4}, index_vector_dim=1, indices_are_sorted=true, unique_indices=true, to_apply=region_110.8267, sharding={devices=[2,1,2,4,1]<=[16]}
ROOT c = bf16[2,8,12288,192,64]{4,3,2,1,0} copy(scatter.9797), sharding={devices=[2,1,2,4,1]<=[16]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 16));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Copy(AllOf(op::Shape("bf16[1,8,6144,48,64]"), op::Scatter(_, _, _))));
EXPECT_EQ(FindInstruction(module.get(), HloOpcode::kAllReduce), nullptr);
}
TEST_P(SpmdPartitioningTest, MatchOutputAlignmentNonContractingDot) {
const char* const hlo_string = R"(
HloModule pjit
ENTRY %main.21 {
multiply.3535 = f32[4,4]{1,0} parameter(0), sharding={devices=[2,4,2]0,8,1,9,2,10,3,11,4,12,5,13,6,14,7,15 last_tile_dim_replicate}
reshape.4221 = f32[4,4]{1,0} parameter(1), sharding={devices=[4,1,4]0,8,4,12,1,9,5,13,2,10,6,14,3,11,7,15 last_tile_dim_replicate}
dot.11597 = f32[4,4]{1,0} dot(multiply.3535, reshape.4221), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1,8]0,8,1,9,2,10,3,11,4,12,5,13,6,14,7,15 last_tile_dim_replicate}
ROOT copy.1 = f32[4,4]{1,0} copy(dot.11597), sharding={devices=[2,1,8]0,8,1,9,2,10,3,11,4,12,5,13,6,14,7,15 last_tile_dim_replicate}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 16));
EXPECT_EQ(FindInstruction(module.get(), HloOpcode::kCollectivePermute),
nullptr);
}
TEST_P(SpmdPartitioningTest, ComplexReshardPartialMerging) {
const char* const hlo_string = R"(
HloModule pjit
ENTRY %main.21 {
multiply.3535 = f32[256,256,256]{2,1,0} parameter(0), sharding={devices=[2,1,2,2]<=[8] last_tile_dim_replicate}
ROOT copy.1 = f32[256,256,256]{2,1,0} copy(multiply.3535), sharding={devices=[1,2,1,4]<=[8] last_tile_dim_replicate}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_NE(FindInstruction(module.get(), HloOpcode::kAllToAll), nullptr);
}
TEST_P(SpmdPartitioningTest, PartialReshardingInfiniteLoops) {
const char* const hlo_string = R"(
HloModule pjit
ENTRY %main.21 {
multiply.3535 = f32[256,256,256]{2,1,0} parameter(0), sharding={devices=[4,1,1,2]<=[8] last_tile_dim_replicate}
ROOT copy.1 = f32[256,256,256]{2,1,0} copy(multiply.3535), sharding={devices=[2,2,1,2]<=[8] last_tile_dim_replicate}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
}
TEST_P(SpmdPartitioningTest, GatherCostModelForUnmatchedSharding) {
const char* const hlo_string = R"(
HloModule pjit
region_10.581.clone {
Arg_0.53 = bf16[] parameter(0)
Arg_1.53 = bf16[] parameter(1)
ROOT add.1294 = bf16[] add(Arg_0.53, Arg_1.53)
}
ENTRY %main.21 {
p0 = bf16[8192,128]{1,0} parameter(0), sharding={devices=[2,4,2]<=[2,4,2]T(2,1,0) last_tile_dim_replicate}
p1 = s32[16384,1]{1,0} parameter(1), sharding={devices=[8,1,2]<=[16] last_tile_dim_replicate}
gather.0 = bf16[16384,128]{1,0} gather(p0, p1), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,128}, sharding={devices=[8,2]<=[16]}
constant.2467 = bf16[] constant(0)
reduce.1749 = bf16[16384]{0} reduce(gather.0, constant.2467), dimensions={1}, to_apply=region_10.581.clone, sharding={devices=[8,2]<=[16] last_tile_dim_replicate}
ROOT copy.1 = bf16[16384]{0} copy(reduce.1749), sharding={devices=[8,2]<=[16] last_tile_dim_replicate}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 16));
XLA_VLOG_LINES(1, module->ToString());
auto* gather = FindInstruction(module.get(), HloOpcode::kGather);
EXPECT_NE(gather, nullptr);
EXPECT_THAT(gather, op::Shape("bf16[2048,64]"));
}
TEST_P(SpmdPartitioningTest, ScatterCostModelForUnmatchedSharding) {
const char* const hlo_string = R"(
HloModule pjit
%region_335.4575 {
%Arg_0.4576 = bf16[] parameter(0)
%Arg_1.4577 = bf16[] parameter(1)
ROOT %add.4578 = bf16[] add(%Arg_0.4576, %Arg_1.4577)
}
ENTRY %main.21 {
%p0 = bf16[8192,128]{1,0} parameter(0), sharding={devices=[2,4,2]<=[2,4,2]T(2,1,0) last_tile_dim_replicate}
%p1 = s32[32768,1]{1,0} parameter(1), sharding={devices=[8,1,2]<=[16] last_tile_dim_replicate}
%p2 = bf16[32768,128]{1,0} parameter(2), sharding={devices=[8,2]<=[16]}
%scatter.0 = bf16[8192,128]{1,0} scatter(%p0, %p1, %p2), update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%region_335.4575, sharding={devices=[2,4,2]<=[2,4,2]T(2,1,0) last_tile_dim_replicate}
ROOT %convert.427 = f32[8192,128]{1,0} convert(%scatter.0), sharding={devices=[2,4,2]<=[2,4,2]T(2,1,0) last_tile_dim_replicate}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 16));
XLA_VLOG_LINES(1, module->ToString());
auto* scatter = FindInstruction(module.get(), HloOpcode::kScatter);
EXPECT_NE(scatter, nullptr);
auto* updates = scatter->operand(2);
EXPECT_THAT(updates, op::Shape("bf16[4096,64]"));
}
TEST_P(SpmdPartitioningTest, ComplexReshardUnmerge) {
const char* const hlo_string = R"(
HloModule Test
ENTRY main.4 {
Arg_0.1 = f32[8,8,8,8]{3,2,1,0} parameter(0), sharding={devices=[1,1,2,8]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}
tuple.2 = (f32[8,8,8,8]{3,2,1,0}) tuple(Arg_0.1), sharding={{devices=[1,4,2,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}}
ROOT get-tuple-element.3 = f32[8,8,8,8]{3,2,1,0} get-tuple-element(tuple.2), index=0, sharding={devices=[1,4,2,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 16));
XLA_VLOG_LINES(1, module->ToString());
auto* allreduce = FindInstruction(module.get(), HloOpcode::kAllReduce);
EXPECT_EQ(allreduce, nullptr);
auto* alltoall = FindInstruction(module.get(), HloOpcode::kAllToAll);
EXPECT_NE(alltoall, nullptr);
}
TEST_P(SpmdPartitioningTest, ComplexReshardUnmergeToRight) {
const char* const hlo_string = R"(
HloModule Test
ENTRY main.4 {
Arg_0.1 = f32[8,32]{1,0} parameter(0), sharding={devices=[8,1]<=[4,2]T(1,0)}
tuple.2 = (f32[8,32]{1,0}) tuple(Arg_0.1), sharding={{devices=[2,4]<=[4,2]T(1,0)}}
ROOT get-tuple-element.3 = f32[8,32]{1,0} get-tuple-element(tuple.2), index=0, sharding={devices=[2,4]<=[4,2]T(1,0)}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
auto* allreduce = FindInstruction(module.get(), HloOpcode::kAllReduce);
EXPECT_EQ(allreduce, nullptr);
auto* alltoall = FindInstruction(module.get(), HloOpcode::kAllToAll);
EXPECT_NE(alltoall, nullptr);
}
TEST_P(SpmdPartitioningTest, ComplexReshardUnmergeToLeft) {
const char* const hlo_string = R"(
HloModule Test
ENTRY main.4 {
Arg_0.1 = f32[8,32]{1,0} parameter(0), sharding={devices=[1,8]<=[4,2]T(1,0)}
tuple.2 = (f32[8,32]{1,0}) tuple(Arg_0.1), sharding={{devices=[2,4]<=[4,2]T(1,0)}}
ROOT get-tuple-element.3 = f32[8,32]{1,0} get-tuple-element(tuple.2), index=0, sharding={devices=[2,4]<=[4,2]T(1,0)}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
auto* allreduce = FindInstruction(module.get(), HloOpcode::kAllReduce);
EXPECT_EQ(allreduce, nullptr);
auto* alltoall = FindInstruction(module.get(), HloOpcode::kAllToAll);
EXPECT_NE(alltoall, nullptr);
}
TEST_P(SpmdPartitioningTest, NoComplexReshardUnmergeToLeft) {
const char* const hlo_string = R"(
HloModule Test
ENTRY main.4 {
Arg_0.1 = f32[8,33]{1,0} parameter(0), sharding={devices=[1,8]<=[4,2]T(1,0)}
tuple.2 = (f32[8,33]{1,0}) tuple(Arg_0.1), sharding={{devices=[2,4]<=[4,2]T(1,0)}}
ROOT get-tuple-element.3 = f32[8,33]{1,0} get-tuple-element(tuple.2), index=0, sharding={devices=[2,4]<=[4,2]T(1,0)}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
auto* allreduce = FindInstruction(module.get(), HloOpcode::kAllReduce);
EXPECT_NE(allreduce, nullptr);
auto* alltoall = FindInstruction(module.get(), HloOpcode::kAllToAll);
EXPECT_EQ(alltoall, nullptr);
}
TEST_P(SpmdPartitioningTest, ReshardCrash) {
const char* const hlo_string = R"(
HloModule Test
ENTRY main.6 {
Arg_0.1 = f32[8,32,4] parameter(0), sharding={devices=[4,2,1]0,2,1,3,4,6,5,7}
ROOT copy = copy(Arg_0.1), sharding={devices=[2,2,2]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
auto* alltoall = FindInstruction(module.get(), HloOpcode::kAllToAll);
EXPECT_NE(alltoall, nullptr);
}
TEST_P(SpmdPartitioningTest, ReshardNoFullRematCompatible) {
const char* const hlo_string = R"(
HloModule Test
ENTRY main.6 {
Arg_0.1 = f32[6,32,4] parameter(0), sharding={devices=[4,2,1]0,2,1,3,4,6,5,7}
ROOT copy = copy(Arg_0.1), sharding={devices=[2,2,2]<=[8]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
auto* allreduce = FindInstruction(module.get(), HloOpcode::kAllReduce);
EXPECT_NE(allreduce, nullptr);
EXPECT_EQ(allreduce->replica_groups().size(), 2);
EXPECT_EQ(FindInstruction(module.get(), HloOpcode::kCollectivePermute),
nullptr);
}
TEST_P(SpmdPartitioningTest, ReshardNoFullRematIncompatible) {
const char* const hlo_string = R"(
HloModule Test
ENTRY main.6 {
Arg_0.1 = f32[6,32,4] parameter(0), sharding={devices=[4,2,1]0,2,1,3,4,6,5,7}
ROOT copy = copy(Arg_0.1), sharding={devices=[2,2,2]0,1,3,4,2,6,5,7}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
auto* allreduce = FindInstruction(module.get(), HloOpcode::kAllReduce);
EXPECT_NE(allreduce, nullptr);
EXPECT_EQ(allreduce->replica_groups().size(), 2);
EXPECT_NE(FindInstruction(module.get(), HloOpcode::kCollectivePermute),
nullptr);
}
TEST_P(SpmdPartitioningTest, OutfeedChainedManualPartitioned) {
const char* const hlo_string = R"(
HloModule Test
ENTRY %entry (p0: f32[8], p1: f32[1]) -> (f32[1], token[]) {
%p1 = f32[1]{0} parameter(1), sharding={replicated}
%p0 = f32[8]{0} parameter(0), sharding={manual}
%tuple.1 = (f32[8]{0}) tuple(f32[8]{0} %p0), sharding={{manual}}
%constant.8 = u32[2]{0} constant({3, 12})
%tuple.10 = (u32[2]{0}) tuple(u32[2]{0} %constant.8), sharding={{manual}}
%aa.1 = token[] after-all()
%outfeed.1 = token[] outfeed((u32[2]{0}) %tuple.10, token[] %aa.1), outfeed_shape=(u32[2]{0}), sharding={{manual}, {manual}}
%outfeed.2 = token[] outfeed((f32[8]{0}) %tuple.1, token[] %outfeed.1), outfeed_shape=(f32[8]{0}), sharding={{manual}, {manual}}
ROOT %tuple.15 = (f32[1]{0}, token[]) tuple(f32[1]{0} %p1, token[] %outfeed.2), sharding={{replicated}, {manual}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
XLA_VLOG_LINES(1, module->ToString());
auto* outfeed = FindInstruction(module.get(), HloOpcode::kOutfeed);
EXPECT_NE(outfeed, nullptr);
EXPECT_THAT(outfeed->operand(0), op::Shape("(u32[2]{0})"));
}
TEST_P(SpmdPartitioningTest, PadUneven) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
%param0 = f32[128,13,257] parameter(0), sharding={devices=[1,2,1]0,1}
%const = f32[] constant(0)
ROOT %pad = f32[128,14,257] pad(%param0, %const), padding=0_0x0_1x0_0,
sharding={devices=[1,2,1]0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Select(), op::Shape("f32[128,7,257]")));
}
TEST_P(SpmdPartitioningTest, MatchOutputPartitioningForContractingRHS) {
absl::string_view hlo_string = R"(
HloModule extracted_module
ENTRY %extracted_computation {
%param = bf16[256,1,114688]{2,1,0} parameter(0)
%reshape.788 = bf16[256,114688]{1,0} reshape(bf16[256,1,114688]{2,1,0} %param), sharding={devices=[1,4,2]<=[2,4]T(1,0) last_tile_dim_replicate}
%param.1 = bf16[1,114688,14336]{2,1,0} parameter(1)
%reshape.747 = bf16[114688,14336]{1,0} reshape(bf16[1,114688,14336]{2,1,0} %param.1), sharding={devices=[4,2]<=[2,4]T(1,0)}
%dot.89 = bf16[256,14336]{1,0} dot(bf16[256,114688]{1,0} %reshape.788, bf16[114688,14336]{1,0} %reshape.747), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[1,8]<=[8]}
%reshape.789 = bf16[256,1,14336]{2,1,0} reshape(bf16[256,14336]{1,0} %dot.89), sharding={devices=[1,1,8]<=[8]}
ROOT %copy = bf16[256,1,14336]{2,1,0} copy(bf16[256,1,14336]{2,1,0} %reshape.789)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto* dot = FindInstruction(module.get(), HloOpcode::kDot);
EXPECT_NE(dot, nullptr);
EXPECT_NE(dot->operand(1)->opcode(), HloOpcode::kAllReduce);
}
TEST_P(SpmdPartitioningTest, MatchOutputPartitioningForContractingLHS) {
absl::string_view hlo_string = R"(
HloModule extracted_module
ENTRY %extracted_computation {
%param = bf16[256,1,114688]{2,1,0} parameter(0)
%reshape.788 = bf16[256,114688]{1,0} reshape(bf16[256,1,114688]{2,1,0} %param), sharding={devices=[2,4]<=[8]}
%param.1 = bf16[1,114688,14336]{2,1,0} parameter(1)
%reshape.747 = bf16[114688,14336]{1,0} reshape(bf16[1,114688,14336]{2,1,0} %param.1), sharding={devices=[4,1,2]<=[2,4]T(1,0) last_tile_dim_replicate}
%dot.89 = bf16[256,14336]{1,0} dot(bf16[256,114688]{1,0} %reshape.788, bf16[114688,14336]{1,0} %reshape.747), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[8,1]<=[8]}
%reshape.789 = bf16[256,1,14336]{2,1,0} reshape(bf16[256,14336]{1,0} %dot.89), sharding={devices=[8,1,1]<=[8]}
ROOT %copy = bf16[256,1,14336]{2,1,0} copy(bf16[256,1,14336]{2,1,0} %reshape.789)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 8));
VLOG(1) << module->ToString();
auto* dot = FindInstruction(module.get(), HloOpcode::kDot);
EXPECT_NE(dot, nullptr);
EXPECT_NE(dot->operand(0)->opcode(), HloOpcode::kAllReduce);
}
TEST_P(SpmdPartitioningTest, TopKCustomCallTopKDimSharded) {
absl::string_view hlo_string = R"(
HloModule module
region_695.22546 {
Arg_2.22549 = s32[] parameter(2)
Arg_3.22550 = s32[] parameter(3)
Arg_0.22547 = bf16[] parameter(0)
Arg_1.22548 = bf16[] parameter(1)
ROOT compare.22551 = pred[] compare(Arg_0.22547, Arg_1.22548), direction=GT, type=TOTALORDER
}
ENTRY %entry {
%multiply.43401 = bf16[64,256000]{1,0} parameter(0), sharding={devices=[1,2]0,1}
%custom-call = (bf16[64,40]{1,0}, s32[64,40]{1,0}) custom-call(bf16[64,256000]{1,0} %multiply.43401), custom_call_target="TopK", called_computations={%region_695.22546}, sharding={{devices=[1,2]0,1}, {devices=[1,2]0,1}}
%get-tuple-element.336 = bf16[64,40]{1,0} get-tuple-element((bf16[64,40]{1,0}, s32[64,40]{1,0}) %custom-call), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto sort_instruction = FindInstruction(module.get(), HloOpcode::kSort);
EXPECT_THAT(sort_instruction,
op::Shape("(bf16[64,80]{1,0}, s32[64,80]{1,0})"));
auto topk_instruction = FindInstruction(module.get(), HloOpcode::kCustomCall);
auto topk_operand = topk_instruction->operand(0);
EXPECT_EQ(topk_instruction->custom_call_target(), "TopK");
EXPECT_THAT(topk_instruction,
op::Shape("(bf16[64,40]{1,0}, s32[64,40]{1,0})"));
EXPECT_THAT(topk_operand, op::Shape("bf16[64,128000]{1,0}"));
}
TEST_P(SpmdPartitioningTest, TopKCustomCallNonTopKDimSharded) {
absl::string_view hlo_string = R"(
HloModule module
region_695.22546 {
Arg_2.22549 = s32[] parameter(2)
Arg_3.22550 = s32[] parameter(3)
Arg_0.22547 = bf16[] parameter(0)
Arg_1.22548 = bf16[] parameter(1)
ROOT compare.22551 = pred[] compare(Arg_0.22547, Arg_1.22548), direction=GT, type=TOTALORDER
}
ENTRY %entry {
%multiply.43401 = bf16[64,256000]{1,0} parameter(0), sharding={devices=[2,1]0,1}
%custom-call = (bf16[64,40]{1,0}, s32[64,40]{1,0}) custom-call(bf16[64,256000]{1,0} %multiply.43401), custom_call_target="TopK", called_computations={%region_695.22546}, sharding={{devices=[1,2]0,1}, {devices=[2,1]0,1}}
%get-tuple-element.336 = bf16[64,40]{1,0} get-tuple-element((bf16[64,40]{1,0}, s32[64,40]{1,0}) %custom-call), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto sort_instruction = FindInstruction(module.get(), HloOpcode::kSort);
CHECK_NE(sort_instruction, nullptr);
auto topk_instruction = FindInstruction(module.get(), HloOpcode::kCustomCall);
auto topk_operand = topk_instruction->operand(0);
EXPECT_EQ(topk_instruction->custom_call_target(), "TopK");
EXPECT_THAT(topk_instruction,
op::Shape("(bf16[32,40]{1,0}, s32[32,40]{1,0})"));
EXPECT_THAT(topk_operand, op::Shape("bf16[32,256000]{1,0}"));
}
TEST_P(SpmdPartitioningTest,
TopKCustomCallTopkReplicatedOperandNonTopKDimSharded) {
absl::string_view hlo_string = R"(
HloModule module
region_695.22546 {
Arg_2.22549 = s32[] parameter(2)
Arg_3.22550 = s32[] parameter(3)
Arg_0.22547 = bf16[] parameter(0)
Arg_1.22548 = bf16[] parameter(1)
ROOT compare.22551 = pred[] compare(Arg_0.22547, Arg_1.22548), direction=GT, type=TOTALORDER
}
ENTRY %entry {
%multiply.43401 = bf16[64,256000]{1,0} parameter(0), sharding={devices=[2,1]0,1}
%custom-call = (bf16[64,40]{1,0}, s32[64,40]{1,0}) custom-call(bf16[64,256000]{1,0} %multiply.43401), custom_call_target="TopK", called_computations={%region_695.22546}, sharding={{replicated}, {replicated}}
%get-tuple-element.336 = bf16[64,40]{1,0} get-tuple-element((bf16[64,40]{1,0}, s32[64,40]{1,0}) %custom-call), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto sort_instruction = FindInstruction(module.get(), HloOpcode::kSort);
EXPECT_THAT(sort_instruction,
op::Shape("(bf16[32,40]{1,0}, s32[32,40]{1,0})"));
auto topk_instruction = FindInstruction(module.get(), HloOpcode::kCustomCall);
auto topk_operand = topk_instruction->operand(0);
EXPECT_EQ(topk_instruction->custom_call_target(), "TopK");
EXPECT_THAT(topk_instruction,
op::Shape("(bf16[32,40]{1,0}, s32[32,40]{1,0})"));
EXPECT_THAT(topk_operand, op::Shape("bf16[32,256000]{1,0}"));
}
TEST_P(SpmdPartitioningTest,
TopKCustomCallTopkReplicatedOperandTopKDimSharded) {
absl::string_view hlo_string = R"(
HloModule module
region_695.22546 {
Arg_2.22549 = s32[] parameter(2)
Arg_3.22550 = s32[] parameter(3)
Arg_0.22547 = bf16[] parameter(0)
Arg_1.22548 = bf16[] parameter(1)
ROOT compare.22551 = pred[] compare(Arg_0.22547, Arg_1.22548), direction=GT, type=TOTALORDER
}
ENTRY %entry {
%multiply.43401 = bf16[64,256000]{1,0} parameter(0), sharding={devices=[1,2]0,1}
%custom-call = (bf16[64,40]{1,0}, s32[64,40]{1,0}) custom-call(bf16[64,256000]{1,0} %multiply.43401), custom_call_target="TopK", called_computations={%region_695.22546}, sharding={{replicated}, {replicated}}
%get-tuple-element.336 = bf16[64,40]{1,0} get-tuple-element((bf16[64,40]{1,0}, s32[64,40]{1,0}) %custom-call), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
auto sort_instruction = FindInstruction(module.get(), HloOpcode::kSort);
EXPECT_THAT(sort_instruction,
op::Shape("(bf16[64,80]{1,0}, s32[64,80]{1,0})"));
auto topk_instruction = FindInstruction(module.get(), HloOpcode::kCustomCall);
auto topk_operand = topk_instruction->operand(0);
EXPECT_EQ(topk_instruction->custom_call_target(), "TopK");
EXPECT_THAT(topk_instruction,
op::Shape("(bf16[64,40]{1,0}, s32[64,40]{1,0})"));
EXPECT_THAT(topk_operand, op::Shape("bf16[64,128000]{1,0}"));
}
TEST_P(SpmdPartitioningTest, TopKCustomCallManualSharding) {
absl::string_view hlo_string = R"(
HloModule module
region {
Arg_2.22549 = s32[] parameter(2)
Arg_3.22550 = s32[] parameter(3)
Arg_0.22547 = bf16[] parameter(0)
Arg_1.22548 = bf16[] parameter(1)
ROOT compare.22551 = pred[] compare(Arg_0.22547, Arg_1.22548), direction=GT, type=TOTALORDER
}
ENTRY %entry {
%p0 = bf16[64,256000]{1,0} parameter(0), sharding={manual}
%custom-call = (bf16[64,40]{1,0}, s32[64,40]{1,0}) custom-call(bf16[64,256000]{1,0} %p0), custom_call_target="TopK", called_computations={%region}, sharding={{manual}, {manual}}
%get-tuple-element.336 = bf16[64,40]{1,0} get-tuple-element((bf16[64,40]{1,0}, s32[64,40]{1,0}) %custom-call), index=0, sharding={manual}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(hlo_string, 2));
VLOG(1) << module->ToString();
EXPECT_EQ(FindInstruction(module.get(), HloOpcode::kSort), nullptr);
auto topk_instruction = FindInstruction(module.get(), HloOpcode::kCustomCall);
EXPECT_EQ(topk_instruction->custom_call_target(), "TopK");
EXPECT_THAT(topk_instruction->operand(0), op::Shape("bf16[64,256000]{1,0}"));
EXPECT_THAT(topk_instruction,
op::Shape("(bf16[64,40]{1,0}, s32[64,40]{1,0})"));
}
TEST_P(SpmdPartitioningTest, WindowedEinsumShouldMatchLhs_b305313406) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %entry {
%copy.11 = bf16[64,2048,20480]{2,1,0} parameter(0), sharding={devices=[8,1,4]<=[32]}
%reshape.44 = bf16[20480,65536]{1,0} parameter(1), sharding={devices=[4,4,2]0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23,8,24,9,25,10,26,11,27,12,28,13,29,14,30,15,31 last_tile_dim_replicate}
ROOT %dot.339 = bf16[64,2048,65536]{2,1,0} dot(bf16[64,2048,20480]{2,1,0} %copy.11, bf16[20480,65536]{1,0} %reshape.44), lhs_contracting_dims={2}, rhs_contracting_dims={0}, sharding={devices=[8,1,4]<=[32]}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 32,
true,
true,
false,
true,
-1));
XLA_VLOG_LINES(1, module->ToString());
const auto collective_permute =
AllOf(op::CollectivePermute(), op::Shape("bf16[8,2048,1,5120]"));
const auto broadcast =
AllOf(op::Broadcast(), op::Shape("bf16[8,2048,16384]"));
const auto all_reduce =
AllOf(op::AllReduce(), op::Shape("bf16[20480,16384]"));
const auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::GetTupleElement(op::While(op::Tuple(
op::Reshape(), all_reduce, op::Broadcast(),
collective_permute, op::Constant()))),
op::Shape("bf16[8,2048,16384]")));
}
TEST_P(SpmdPartitioningTest, ComplexReshapeReshard) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %extracted_computation (param: f32[13,128,312,16,312]) -> f32[13,39936,4992] {
%param = f32[13,128,312,16,312]{4,2,3,1,0} parameter(0)
%copy.1261 = f32[13,128,312,16,312]{4,3,2,1,0} copy(f32[13,128,312,16,312]{4,2,3,1,0} %param), sharding={devices=[1,32,1,2,1,2]<=[2,64]T(1,0) last_tile_dim_replicate}
%reshape.27217 = f32[13,39936,4992]{2,1,0} reshape(f32[13,128,312,16,312]{4,3,2,1,0} %copy.1261), sharding={devices=[1,2,32,2]<=[2,32,2]T(2,1,0) last_tile_dim_replicate}
%copy.1260 = f32[13,39936,4992]{2,1,0} copy(f32[13,39936,4992]{2,1,0} %reshape.27217), sharding={devices=[1,2,32,2]<=[2,32,2]T(2,1,0) last_tile_dim_replicate}
ROOT %copy = f32[13,39936,4992]{2,1,0} copy(f32[13,39936,4992]{2,1,0} %copy.1260)
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(hlo_string, 128,
true,
true,
false,
true,
-1));
XLA_VLOG_LINES(1, module->ToString());
auto all_to_all = FindInstruction(module.get(), HloOpcode::kAllToAll);
EXPECT_NE(all_to_all, nullptr);
}
TEST_P(SpmdPartitioningTest, SortAllGatherNonMovableDimension) {
const char* const hlo_string = R"(
HloModule module
top_k_gt_f32_comparator_64.35303 {
Arg_2.35306 = s32[] parameter(2)
Arg_3.35307 = s32[] parameter(3)
Arg_0.35304 = f32[] parameter(0)
Arg_1.35305 = f32[] parameter(1)
ROOT compare.35308 = pred[] compare(Arg_0.35304, Arg_1.35305), direction=GT
}
ENTRY entry {
param.0 = f32[4,16384,4096]{2,1,0} parameter(0), sharding={devices=[4,4,4]<=[64]}
param.1 = s32[4,16384,4096]{2,1,0} parameter(1), sharding={devices=[4,4,4]<=[64]}
ROOT sort.209 = (f32[4,16384,4096]{2,1,0}, s32[4,16384,4096]{2,1,0}) sort(param.0, param.1), dimensions={2}, to_apply=top_k_gt_f32_comparator_64.35303, sharding={{devices=[4,4,4]<=[64]}, {devices=[4,4,4]<=[64]}}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(
hlo_string, 64,
true,
true));
XLA_VLOG_LINES(1, module->ToString());
auto* root = module->entry_computation()->root_instruction();
auto* sort = FindInstruction(module.get(), HloOpcode::kSort);
EXPECT_THAT(
root,
AllOf(op::Tuple(),
op::Shape("(f32[1,4096,1024]{2,1,0}, s32[1,4096,1024]{2,1,0})")));
EXPECT_THAT(
sort,
AllOf(op::Sort(
AllOf(op::AllReduce(), op::Shape("f32[1,4096,4096]{2,1,0}")),
AllOf(op::AllReduce(), op::Shape("s32[1,4096,4096]{2,1,0}"))),
op::Shape("(f32[1,4096,4096]{2,1,0}, s32[1,4096,4096]{2,1,0})")));
}
TEST_P(SpmdPartitioningTest, PartitionOffloading) {
const char* const hlo_string = R"(
HloModule module, entry_computation_layout={(f32[1,256,128]{2,1,0})->f32[1,256,128]{2,1,0}}
ENTRY offloading (param0: f32[1,256,128]) -> f32[1,256,128] {
zero = f32[] constant(0), sharding={replicated}
broadcast = f32[256,256,128]{2,1,0} broadcast(zero), dimensions={}, sharding={devices=[1,1,4]0,1,2,3}
param0 = f32[1,256,128]{2,1,0} parameter(0), sharding={devices=[1,1,4]0,1,2,3}
move-to-host = f32[1,256,128]{2,1,0} custom-call(param0), custom_call_target="MoveToHost", sharding={devices=[1,1,4]0,1,2,3}
izero = s32[] constant(0)
dynamic-update-slice = f32[256,256,128]{2,1,0} dynamic-update-slice(broadcast, move-to-host, izero, izero, izero), sharding={devices=[1,1,4]0,1,2,3}
dynamic-slice = f32[1,256,128]{2,1,0} dynamic-slice(dynamic-update-slice, izero, izero, izero), dynamic_slice_sizes={1,256,128}, sharding={devices=[1,1,4]0,1,2,3}
move-to-device = f32[1,256,128]{2,1,0} custom-call(dynamic-slice), custom_call_target="MoveToDevice", sharding={devices=[1,4,1]0,1,2,3}
ROOT copy = f32[1,256,128]{2,1,0} copy(move-to-device), sharding={devices=[1,4,1]0,1,2,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
PartitionComputation(
hlo_string, 4,
true,
true));
XLA_VLOG_LINES(1, module->ToString());
auto move_to_host = FindInstruction(module.get(), "move-to-host.1");
auto move_to_device = FindInstruction(module.get(), "move-to-device.1");
EXPECT_EQ(
FindInstruction(module.get(), HloOpcode::kDynamicUpdateSlice)->operand(1),
move_to_host);
EXPECT_EQ(move_to_device->operand(0)->opcode(), HloOpcode::kDynamicSlice);
EXPECT_THAT(move_to_host, op::Shape("f32[1,256,32]"));
EXPECT_THAT(move_to_device, op::Shape("f32[1,256,32]"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/spmd_partitioner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/spmd_partitioner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d229d95d-83c7-4da2-964e-3b50e74751e5 | cpp | tensorflow/tensorflow | canonicalize_all_gather_for_cse | third_party/xla/xla/service/spmd/canonicalize_all_gather_for_cse.cc | third_party/xla/xla/service/spmd/canonicalize_all_gather_for_cse_test.cc | #include "xla/service/spmd/canonicalize_all_gather_for_cse.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::StatusOr<bool> CanonicalizeAllGatherForCSE::RunOnComputation(
HloComputation* comp) {
bool changed = false;
std::vector<HloInstruction*> ordered_hlos = comp->MakeInstructionPostOrder();
for (HloInstruction* hlo : ordered_hlos) {
HloAllGatherInstruction* ag = DynCast<HloAllGatherInstruction>(hlo);
if (!ag || ag->operand_count() > 1) {
continue;
}
HloInstruction* real_data = ag->mutable_operand(0);
while (real_data->ReshapeMerelyInsertsOrDeletes1SizedDimensions()
.has_value()) {
real_data = real_data->mutable_operand(0);
}
if (real_data == ag->operand(0)) {
continue;
}
const int64_t ag_dim = ag->all_gather_dimension();
int64_t new_ag_dim;
if (auto dims = ShapeUtil::ReshapeLeavesDimensionsUnmodified(
ag->operand(0)->shape(), real_data->shape(), {ag_dim})) {
new_ag_dim = dims->at(0);
} else {
int64_t major_elements =
Product(absl::MakeConstSpan(ag->operand(0)->shape().dimensions())
.subspan(0, ag_dim));
new_ag_dim = 0;
while (major_elements > 1) {
major_elements /= real_data->shape().dimensions(new_ag_dim++);
}
}
if (new_ag_dim == real_data->shape().rank()) {
continue;
}
const int64_t all_gather_participants =
ShapeUtil::ElementsIn(ag->shape()) /
ShapeUtil::ElementsIn(ag->operand(0)->shape());
Shape new_ag_shape = real_data->shape();
new_ag_shape.set_dimensions(
new_ag_dim,
all_gather_participants * new_ag_shape.dimensions(new_ag_dim));
std::optional<int64_t> new_channel_id =
ag->channel_id() ? std::make_optional(this->NextChannelId())
: std::nullopt;
HloInstruction* new_ag =
comp->AddInstruction(HloInstruction::CreateAllGather(
new_ag_shape, {real_data}, new_ag_dim,
ag->device_list(), ag->constrain_layout(), new_channel_id,
ag->use_global_device_ids()));
ag->SetupDerivedInstruction(new_ag);
HloInstruction* new_formatting = comp->AddInstruction(
HloInstruction::CreateReshape(ag->shape(), new_ag));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(ag, new_formatting));
changed = true;
}
return changed;
}
absl::StatusOr<bool> CanonicalizeAllGatherForCSE::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
next_channel_id_ = hlo_query::NextChannelId(*module);
for (HloComputation* comp : module->computations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool comp_changed, RunOnComputation(comp));
changed |= comp_changed;
}
return changed;
}
} | #include "xla/service/spmd/canonicalize_all_gather_for_cse.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace spmd {
namespace {
using ::testing::_;
using ::testing::AllOf;
namespace op = xla::testing::opcode_matchers;
class AllGatherCanonicalizeTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(
hlo_module, GetModuleConfigForTest()));
HloPassPipeline pipeline("all-gather-cse");
pipeline.AddPass<CanonicalizeAllGatherForCSE>();
TF_RETURN_IF_ERROR(pipeline.Run(module.get()).status());
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
absl::Status RunPassOnModule(HloModule* module,
int64_t distance_threshold = 100) {
HloPassPipeline pipeline("all-gather-cse");
pipeline.AddPass<CanonicalizeAllGatherForCSE>();
TF_RETURN_IF_ERROR(pipeline.Run(module).status());
return absl::OkStatus();
}
};
TEST_F(AllGatherCanonicalizeTest, SimpleReshape) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
resh = s32[1,8]{1,0} reshape(param0)
ROOT ag = s32[2,8]{1,0} all-gather(resh), replica_groups={{0,1}},
dimensions={0}, channel_id=0, use_global_device_ids=true
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
const HloInstruction* const reshape =
module->entry_computation()->root_instruction();
EXPECT_THAT(reshape,
AllOf(op::Reshape(op::AllGather(_)), op::Shape("s32[2,8]")));
}
TEST_F(AllGatherCanonicalizeTest, MultipleDegenerateReshapes) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
resh = s32[1,8]{1,0} reshape(param0)
resh2 = s32[1,8,1,1]{3,2,1,0} reshape(resh)
ROOT ag = s32[2,8,1,1]{3,2,1,0} all-gather(resh2), replica_groups={{0,1}},
dimensions={0}, channel_id=0, use_global_device_ids=true
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
const HloInstruction* const reshape =
module->entry_computation()->root_instruction();
EXPECT_THAT(reshape, op::Reshape(op::AllGather(op::Parameter())));
}
TEST_F(AllGatherCanonicalizeTest, MultipleDegenerateReshapes2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
resh = s32[8,1,1]{2,1,0} reshape(param0)
resh2 = s32[1,8,1,1]{3,2,1,0} reshape(resh)
ROOT ag = s32[2,8,1,1]{3,2,1,0} all-gather(resh2), replica_groups={{0,1}},
dimensions={0}, channel_id=0, use_global_device_ids=true
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
const HloInstruction* const reshape =
module->entry_computation()->root_instruction();
EXPECT_THAT(reshape, op::Reshape(op::AllGather(op::Parameter())));
}
TEST_F(AllGatherCanonicalizeTest, MultipleDegenerateReshapesNoDim0) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
resh = s32[8,1,1]{2,1,0} reshape(param0)
resh2 = s32[1,8,1,1]{3,2,1,0} reshape(resh)
ROOT ag = s32[1,16,1,1]{3,2,1,0} all-gather(resh2), replica_groups={{0,1}},
dimensions={1}, channel_id=0, use_global_device_ids=true
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
const HloInstruction* const reshape =
module->entry_computation()->root_instruction();
EXPECT_THAT(reshape, op::Reshape(op::AllGather(op::Parameter())));
}
TEST_F(AllGatherCanonicalizeTest, NonDegenerateReshape) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
resh = s32[8,1,1]{2,1,0} reshape(param0)
resh2 = s32[1,4,2,1,1]{4,3,2,1,0} reshape(resh)
ROOT ag = s32[2,4,2,1,1]{4,3,2,1,0} all-gather(resh2), replica_groups={{0,1}},
dimensions={0}, channel_id=0, use_global_device_ids=true
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
const HloInstruction* const reshape =
module->entry_computation()->root_instruction();
EXPECT_THAT(reshape, AllOf(op::AllGather(op::Reshape(op::Reshape(_))),
op::Shape("s32[2,4,2,1,1]")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/canonicalize_all_gather_for_cse.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/canonicalize_all_gather_for_cse_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f912456d-8bbf-4712-ae1d-caa5d3675f31 | cpp | tensorflow/tensorflow | spmd_partitioner_util | third_party/xla/xla/service/spmd/spmd_partitioner_util.cc | third_party/xla/xla/service/spmd/spmd_partitioner_util_test.cc | #include "xla/service/spmd/spmd_partitioner_util.h"
#include <algorithm>
#include <cstdint>
#include <limits>
#include <map>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/shape_inference.h"
#include "xla/service/spmd/spmd_partitioner.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace spmd {
namespace {
using hlo_sharding_util::GroupedSharding;
}
bool HasReplicatedSharding(const HloSharding& sharding) {
if (sharding.IsTuple()) {
return absl::c_any_of(sharding.tuple_elements(), HasReplicatedSharding);
}
return sharding.IsReplicated();
}
HloComputation* MakeBinaryAdd(PrimitiveType type, HloModule* module) {
HloComputation::Builder sum_b("add");
auto x = sum_b.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(type, {}), "x"));
auto y = sum_b.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(type, {}), "y"));
if (type == PRED) {
sum_b.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(type, {}), HloOpcode::kOr, x, y));
} else {
sum_b.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(type, {}), HloOpcode::kAdd, x, y));
}
HloComputation* reduction = module->AddEmbeddedComputation(sum_b.Build());
return reduction;
}
bool EvenlyPartitions(const Shape& shape, const HloSharding& sharding) {
if (sharding.IsTuple()) {
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
if (!EvenlyPartitions(ShapeUtil::GetTupleElementShape(shape, i),
sharding.GetSubSharding(shape, {i}))) {
return false;
}
}
}
if (sharding.IsTileMaximal()) {
return sharding.IsReplicated();
}
for (int64_t i = 0; i < shape.dimensions_size(); ++i) {
if (shape.dimensions(i) % sharding.tile_assignment().dim(i) != 0) {
return false;
}
}
return true;
}
Shape MakePartitionedShape(const Shape& shape, const HloSharding& sharding) {
if (sharding.IsTuple()) {
std::vector<Shape> subshapes;
const int64_t shape_n = ShapeUtil::TupleElementCount(shape);
subshapes.reserve(shape_n);
for (int64_t i = 0; i < shape_n; ++i) {
subshapes.push_back(
MakePartitionedShape(ShapeUtil::GetTupleElementShape(shape, i),
sharding.GetSubSharding(shape, {i})));
}
return ShapeUtil::MakeTupleShape(subshapes);
}
return sharding.TileShape(shape);
}
int64_t ShapeSizeInBytes(const Shape& shape) {
if (shape.IsTuple()) {
int64_t total_size = 0;
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
total_size += ShapeSizeInBytes(ShapeUtil::GetTupleElementShape(shape, i));
}
return total_size;
}
return ShapeUtil::ByteSizeOfPrimitiveType(shape.element_type()) *
ShapeUtil::ElementsIn(shape);
}
Shape MakeNonPaddedShapeForGivenPartition(const Shape& shape,
const HloSharding& sharding,
int64_t partition_id) {
if (sharding.IsTuple()) {
std::vector<Shape> subshapes;
const int64_t shape_n = ShapeUtil::TupleElementCount(shape);
subshapes.reserve(shape_n);
for (int64_t i = 0; i < shape_n; ++i) {
subshapes.push_back(MakeNonPaddedShapeForGivenPartition(
ShapeUtil::GetTupleElementShape(shape, i),
sharding.GetSubSharding(shape, {i}), partition_id));
}
return ShapeUtil::MakeTupleShape(subshapes);
}
if (sharding.IsReplicated()) {
return shape;
}
if (sharding.IsTileMaximal()) {
if (partition_id == *sharding.UniqueDevice()) {
return shape;
}
return ShapeUtil::MakeTupleShape({});
}
auto partition_shape = shape;
std::vector<int64_t> tile_offset =
sharding.TileOffsetForDevice(shape, partition_id);
std::vector<int64_t> tile_limit =
sharding.TileLimitForDevice(shape, partition_id);
for (int64_t i = 0; i < tile_offset.size(); ++i) {
if (sharding.UsesDevice(partition_id)) {
partition_shape.set_dimensions(i, tile_limit[i] - tile_offset[i]);
} else {
partition_shape.set_dimensions(i, 0);
}
}
return partition_shape;
}
std::vector<HloInstruction*> MakePartitionOffsets(
const Shape& shape, const HloSharding& sharding,
HloInstruction* partition_id, SpmdBuilder* b,
absl::Span<const int64_t> dims) {
CHECK(!shape.IsTuple());
auto shard_shape = MakePartitionedShape(shape, sharding);
std::vector<HloInstruction*> offsets;
for (int64_t i = 0; i < shape.rank(); ++i) {
if (sharding.tile_assignment().dim(i) == 1 ||
(!dims.empty() && !absl::c_linear_search(dims, i))) {
offsets.push_back(b->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(S32))));
} else {
std::vector<int32_t> offset_array(
sharding.tile_assignment().num_elements());
sharding.tile_assignment().Each(
[&](absl::Span<const int64_t> indices, int64_t device) {
offset_array[device] = indices[i] * shard_shape.dimensions(i);
});
offsets.push_back(
TableLookup<int32_t>(offset_array, S32, partition_id, b));
}
}
return offsets;
}
std::vector<HloInstruction*> MakeTiledPartitionOrdinals(
const HloSharding& sharding, HloInstruction* partition_id, SpmdBuilder* b) {
CHECK(!sharding.IsTileMaximal());
auto dimensions = sharding.tile_assignment().dimensions();
if (sharding.ReplicateOnLastTileDim()) {
dimensions.remove_suffix(1);
}
auto table_shape = ShapeUtil::MakeShape(S32, dimensions);
return MakePartitionOffsets(table_shape, sharding, partition_id, b);
}
Shape GetPaddedShapeForUnevenPartitioning(const Shape& base_shape,
const HloSharding& sharding) {
if (sharding.IsTileMaximal()) {
return base_shape;
}
if (EvenlyPartitions(base_shape, sharding)) {
return base_shape;
}
auto shard_shape = MakePartitionedShape(base_shape, sharding);
Shape padded_base_shape = base_shape;
for (int64_t i = 0; i < padded_base_shape.rank(); ++i) {
padded_base_shape.set_dimensions(
i, shard_shape.dimensions(i) * sharding.tile_assignment().dim(i));
}
return padded_base_shape;
}
HloInstruction* GetInGroupPartitionId(
HloInstruction* partition_id,
const std::vector<std::vector<int64_t>>& device_groups, SpmdBuilder* b) {
int64_t total_devices = device_groups.size() * device_groups[0].size();
std::vector<uint32_t> in_group_ids(total_devices);
for (uint32_t i = 0; i < device_groups.size(); ++i) {
for (uint32_t j = 0; j < device_groups[i].size(); ++j) {
in_group_ids[device_groups[i][j]] = j;
}
}
return TableLookup<uint32_t>(in_group_ids, U32, partition_id, b);
}
namespace {
bool IsIota(absl::Span<const int64_t> x) {
for (int64_t i = 0; i < x.size(); ++i) {
if (x[i] != i) {
return false;
}
}
return true;
}
SPMDCollectiveOpsCreator GetPerGroupCollectiveOpsCreator(
const SPMDCollectiveOpsCreator& creator,
const std::vector<std::vector<int64_t>>& device_groups) {
if (device_groups.size() == 1 && IsIota(device_groups[0])) {
return creator;
}
SPMDCollectiveOpsCreator result;
auto device_groups_ptr =
std::make_shared<const std::vector<std::vector<int64_t>>>(device_groups);
result.create_partition_id = [creator, device_groups_ptr](SpmdBuilder* b) {
return GetInGroupPartitionId(creator.create_partition_id(b),
*device_groups_ptr, b);
};
auto expand_partition_groups =
[device_groups_ptr](
const std::vector<std::vector<int64_t>>& partition_subgroups) {
auto& device_groups = *device_groups_ptr;
if (partition_subgroups.empty()) {
return device_groups;
}
std::vector<std::vector<int64_t>> result(partition_subgroups.size() *
device_groups.size());
for (int64_t g = 0; g < device_groups.size(); ++g) {
for (int64_t i = 0; i < partition_subgroups.size(); ++i) {
result[g * partition_subgroups.size() + i].resize(
partition_subgroups[i].size());
for (int64_t j = 0; j < partition_subgroups[i].size(); ++j) {
result[g * partition_subgroups.size() + i][j] =
device_groups[g][partition_subgroups[i][j]];
}
}
}
return result;
};
result.create_cross_partition_all_reduce =
[creator, expand_partition_groups](
SpmdBuilder* b, HloInstruction* operand, HloComputation* reduction,
const std::vector<std::vector<int64_t>>& partition_subgroups,
int64_t channel_id) {
return creator.create_cross_partition_all_reduce(
b, operand, reduction, expand_partition_groups(partition_subgroups),
channel_id);
};
result.create_cross_partition_collective_permute =
[creator, device_groups_ptr](
SpmdBuilder* b, HloInstruction* operand,
std::vector<std::pair<int64_t, int64_t>>& src_dst_pairs,
int64_t next_channel_id) {
auto& device_groups = *device_groups_ptr;
std::vector<std::pair<int64_t, int64_t>> expanded_pairs(
src_dst_pairs.size() * device_groups.size());
for (int64_t g = 0; g < device_groups.size(); ++g) {
for (int64_t i = 0; i < src_dst_pairs.size(); ++i) {
expanded_pairs[g * src_dst_pairs.size() + i] =
std::pair<int64_t, int64_t>{
device_groups[g][src_dst_pairs[i].first],
device_groups[g][src_dst_pairs[i].second]};
}
}
return creator.create_cross_partition_collective_permute(
b, operand, expanded_pairs, next_channel_id);
};
result.create_cross_partition_all_to_all =
[creator, expand_partition_groups](
SpmdBuilder* b, absl::Span<HloInstruction* const> operands,
const std::vector<std::vector<int64_t>>& partition_subgroups,
int64_t channel_id, std::optional<int64_t> split_dimension) {
return creator.create_cross_partition_all_to_all(
b, operands, expand_partition_groups(partition_subgroups),
channel_id, split_dimension);
};
if (creator.create_cross_partition_all_gather) {
result.create_cross_partition_all_gather =
[creator, expand_partition_groups](
SpmdBuilder* b, HloInstruction* operand, const Shape& ag_shape,
const std::vector<std::vector<int64_t>>& partition_subgroups,
int64_t channel_id, int64_t all_gather_dimension) {
return creator.create_cross_partition_all_gather(
b, operand, ag_shape,
expand_partition_groups(partition_subgroups), channel_id,
all_gather_dimension);
};
}
return result;
}
}
std::optional<HloSharding> PartialReplicateReshardCompatibleSharding(
const HloSharding& partial_sharding, const HloSharding& target_sharding) {
if (!partial_sharding.ReplicateOnLastTileDim()) {
return std::nullopt;
}
if (partial_sharding.tile_assignment().num_elements() !=
target_sharding.tile_assignment().num_elements()) {
return std::nullopt;
}
const int64_t rank = partial_sharding.TiledDataRank();
if (rank != target_sharding.TiledDataRank()) {
return std::nullopt;
}
std::vector<int64_t> expand_tile_dims_indices(rank, -1);
std::vector<int64_t> expand_tile_sizes;
int64_t num_expand_dims = 0;
for (int64_t dim = 0; dim < rank; dim++) {
int64_t partial_tile_size = partial_sharding.tile_assignment().dim(dim);
int64_t target_tile_size = target_sharding.tile_assignment().dim(dim);
if (target_tile_size % partial_tile_size != 0) {
return std::nullopt;
}
if (target_tile_size > partial_tile_size) {
expand_tile_dims_indices[dim] = num_expand_dims++;
expand_tile_sizes.emplace_back(target_tile_size / partial_tile_size);
}
}
const std::vector<int64_t> shape_dims(
target_sharding.tile_assignment().dimensions().begin(),
target_sharding.tile_assignment().dimensions().begin() + rank);
if (hlo_sharding_util::IsSubTilingOrEqualSharding(
ShapeUtil::MakeShape(F32, shape_dims), target_sharding,
partial_sharding)) {
return target_sharding;
}
std::vector<int64_t> reshape_dimensions(
partial_sharding.tile_assignment().dimensions().begin(),
partial_sharding.tile_assignment().dimensions().begin() + rank);
reshape_dimensions.insert(reshape_dimensions.end(), expand_tile_sizes.begin(),
expand_tile_sizes.end());
std::vector<int> perm;
perm.reserve(rank + expand_tile_sizes.size());
for (int64_t dim = 0; dim < rank; dim++) {
perm.emplace_back(dim);
if (expand_tile_dims_indices[dim] > -1) {
perm.emplace_back(expand_tile_dims_indices[dim] + rank);
}
}
if (target_sharding.ReplicateOnLastTileDim()) {
reshape_dimensions.push_back(
target_sharding.tile_assignment().dimensions().back());
perm.push_back(reshape_dimensions.size() - 1);
}
auto transpose_tile_assignment =
partial_sharding.tile_assignment()
.Reshape(reshape_dimensions)
.Transpose(perm)
.Reshape(target_sharding.tile_assignment().dimensions());
return target_sharding.ReplicateOnLastTileDim()
? HloSharding::PartialTile(transpose_tile_assignment)
: HloSharding::Tile(transpose_tile_assignment);
}
std::optional<HloInstruction*> TileToPartialReplicateHaloExchange(
HloInstruction* hlo, const Shape& base_shape,
const HloSharding& src_sharding, const HloSharding& dst_sharding,
const std::vector<int64_t>& replicate_dims,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, HloInstruction* partition_id, SpmdBuilder* b) {
auto padded_src_shape =
GetPaddedShapeForUnevenPartitioning(base_shape, src_sharding);
auto padded_dst_shape =
GetPaddedShapeForUnevenPartitioning(base_shape, dst_sharding);
if (ShapeUtil::Compatible(padded_dst_shape, hlo->shape())) {
return hlo;
}
auto partition_ordinals =
MakeTiledPartitionOrdinals(src_sharding, partition_id, b);
auto result = hlo;
auto hlo_shape = hlo->shape();
for (auto dim : replicate_dims) {
int64_t src_shard_count = src_sharding.tile_assignment().dim(dim);
int64_t dst_shard_count = dst_sharding.tile_assignment().dim(dim);
int64_t src_per_dst_shard_size =
padded_src_shape.dimensions(dim) / dst_shard_count;
int64_t dst_per_shard_size =
padded_dst_shape.dimensions(dim) / dst_shard_count;
if (src_per_dst_shard_size <= dst_per_shard_size || dst_shard_count == 1) {
continue;
}
int64_t replicate_factor = src_shard_count / dst_shard_count;
OffsetCalculation left_halo_size_function = OffsetCalculation(
HloOpcode::kMultiply,
OffsetCalculation(MultiplyAddDivideOffsetCalculation(
0, src_per_dst_shard_size - dst_per_shard_size, 1)),
OffsetCalculation(
MultiplyAddDivideOffsetCalculation(1, 0, replicate_factor)));
OffsetCalculation right_halo_size_function =
OffsetCalculation(MultiplyAddDivideOffsetCalculation(0, 0, 1)) -
left_halo_size_function;
result = ExchangeHaloCompact(result, base_shape, left_halo_size_function,
right_halo_size_function, nullptr, dim,
src_sharding, partition_ordinals[dim],
collective_ops_creator, next_channel_id, b);
}
return result;
}
std::optional<HloInstruction*> PadFromPartialReplicateShape(
HloInstruction* hlo, const Shape& base_shape,
const HloSharding& src_sharding, const HloSharding& dst_sharding,
const std::vector<int64_t>& expand_tile_dims,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, HloInstruction* partition_id, SpmdBuilder* b) {
auto padded_src_shape =
GetPaddedShapeForUnevenPartitioning(base_shape, src_sharding);
auto padded_dst_shape =
GetPaddedShapeForUnevenPartitioning(base_shape, dst_sharding);
if (ShapeUtil::Compatible(padded_dst_shape, hlo->shape())) {
return hlo;
}
auto partition_ordinals =
MakeTiledPartitionOrdinals(src_sharding, partition_id, b);
HloInstruction* result = hlo;
auto zero = b->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(hlo->shape().element_type())));
std::vector<int64_t> expand_dims_without_halo_exchange;
for (auto dim : expand_tile_dims) {
int64_t src_shard_count = src_sharding.tile_assignment().dim(dim);
int64_t src_per_shard_size =
padded_src_shape.dimensions(dim) / src_shard_count;
int64_t dst_per_shard_size =
padded_dst_shape.dimensions(dim) / src_shard_count;
if (src_per_shard_size >= dst_per_shard_size) {
continue;
}
if (src_shard_count == 1) {
expand_dims_without_halo_exchange.emplace_back(dim);
continue;
}
OffsetCalculation left_halo_size_function =
OffsetCalculation(MultiplyAddDivideOffsetCalculation(
src_per_shard_size - dst_per_shard_size, 0, 1));
OffsetCalculation right_halo_size_function =
OffsetCalculation(MultiplyAddDivideOffsetCalculation(
dst_per_shard_size - src_per_shard_size,
dst_per_shard_size - src_per_shard_size, 1));
result = ExchangeHaloCompact(result, base_shape, left_halo_size_function,
right_halo_size_function, nullptr, dim,
src_sharding, partition_ordinals[dim],
collective_ops_creator, next_channel_id, b);
}
if (!expand_dims_without_halo_exchange.empty()) {
std::vector<int64_t> zero_padding(result->shape().rank());
PaddingConfig pad_config = window_util::MakeSymmetricPadding(zero_padding);
auto padded_shape = result->shape();
for (auto dim : expand_dims_without_halo_exchange) {
pad_config.mutable_dimensions(dim)->set_edge_padding_low(0);
pad_config.mutable_dimensions(dim)->set_edge_padding_high(
padded_dst_shape.dimensions(dim) - padded_src_shape.dimensions(dim));
padded_shape.set_dimensions(dim, result->shape().dimensions(dim) +
padded_dst_shape.dimensions(dim) -
padded_src_shape.dimensions(dim));
}
result = b->AddInstruction(
HloInstruction::CreatePad(padded_shape, result, zero, pad_config));
}
return result;
}
std::optional<int64_t> UniqueTiledDim(const HloSharding& sharding) {
if (sharding.IsTileMaximal()) {
return std::nullopt;
}
int64_t dim = -1;
int64_t rank = sharding.ReplicateOnLastTileDim()
? sharding.tile_assignment().num_dimensions() - 1
: sharding.tile_assignment().num_dimensions();
for (int64_t i = 0; i < rank; ++i) {
if (sharding.tile_assignment().dim(i) > 1) {
if (dim != -1) {
return std::nullopt;
}
dim = i;
}
}
CHECK_NE(dim, -1);
return dim;
}
MultiplyAddDivideOffsetCalculation::MultiplyAddDivideOffsetCalculation(
int64_t multiplier, int64_t offset, int64_t divisor)
: multiplier_(multiplier), offset_(offset), divisor_(divisor) {
CHECK_GT(divisor_, 0);
Simplify();
}
OffsetCalculation MultiplyAddDivideOffsetCalculation::operator-(
const MultiplyAddDivideOffsetCalculation& other) const {
if (divisor_ == 1 && other.divisor_ == 1) {
return OffsetCalculation(MultiplyAddDivideOffsetCalculation(
multiplier_ - other.multiplier_, offset_ - other.offset_, 1));
}
return OffsetCalculation(HloOpcode::kSubtract, *this, other);
}
OffsetCalculation MultiplyAddDivideOffsetCalculation::operator+(
const MultiplyAddDivideOffsetCalculation& other) const {
if (divisor_ == 1 && other.divisor_ == 1) {
return OffsetCalculation(MultiplyAddDivideOffsetCalculation(
multiplier_ + other.multiplier_, offset_ + other.offset_, 1));
}
return OffsetCalculation(HloOpcode::kAdd, *this, other);
}
void MultiplyAddDivideOffsetCalculation::Simplify() {
if (divisor_ != 1 && multiplier_ % divisor_ == 0 &&
(offset_ % divisor_ == 0 || offset_ * multiplier_ > 0)) {
multiplier_ /= divisor_;
offset_ /= divisor_;
divisor_ = 1;
}
}
int64_t MultiplyAddDivideOffsetCalculation::Calculate(
int64_t shard_ordinal) const {
return (shard_ordinal * multiplier_ + offset_) / divisor_;
}
HloInstruction* MultiplyAddDivideOffsetCalculation::Calculate(
HloInstruction* shard_ordinal, SpmdBuilder* b) const {
auto scalar_shape = ShapeUtil::MakeShape(S32, {});
if (multiplier_ == 0) {
return b->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(offset_ / divisor_)));
}
HloInstruction* result = shard_ordinal;
if (multiplier_ != 1) {
result = b->AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kMultiply, shard_ordinal,
b->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(multiplier_)))));
}
if (offset_ != 0) {
auto offset = b->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(offset_)));
result = b->AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, result, offset));
}
if (divisor_ != 1) {
auto divisor = b->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(divisor_)));
result = b->AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kDivide, result, divisor));
}
return result;
}
int64_t MultiplyAddDivideOffsetCalculation::MaxInRange(
int64_t start_ordinal, int64_t limit_ordinal) const {
int64_t max = Calculate(start_ordinal);
for (int64_t i = start_ordinal + 1; i < limit_ordinal; ++i) {
max = std::max(max, Calculate(i));
}
return max;
}
OffsetCalculation& OffsetCalculation::operator=(
const OffsetCalculation& other) {
opcode_ = other.opcode_;
copy_from_ = other.copy_from_;
if (opcode_ != HloOpcode::kCopy) {
lhs_ = std::make_unique<OffsetCalculation>(*other.lhs_);
rhs_ = std::make_unique<OffsetCalculation>(*other.rhs_);
}
return *this;
}
bool OffsetCalculation::IsConstant() const {
if (opcode_ == HloOpcode::kCopy) {
return copy_from_.IsConstant();
}
if (opcode_ == HloOpcode::kSubtract && *lhs_ == *rhs_) {
return true;
}
return lhs_->IsConstant() && rhs_->IsConstant();
}
OffsetCalculation OffsetCalculation::operator-(
const OffsetCalculation& other) const {
if (opcode_ == HloOpcode::kCopy && other.opcode_ == HloOpcode::kCopy) {
return copy_from_ - other.copy_from_;
}
return OffsetCalculation(HloOpcode::kSubtract, *this, other);
}
OffsetCalculation OffsetCalculation::operator+(
const OffsetCalculation& other) const {
if (opcode_ == HloOpcode::kCopy && other.opcode_ == HloOpcode::kCopy) {
return copy_from_ + other.copy_from_;
}
return OffsetCalculation(HloOpcode::kAdd, *this, other);
}
bool OffsetCalculation::operator==(const OffsetCalculation& other) const {
if (opcode_ != other.opcode_) {
return false;
}
if (opcode_ == HloOpcode::kCopy) {
return copy_from_ == other.copy_from_;
}
return *lhs_ == *other.lhs_ && *rhs_ == *other.rhs_;
}
int64_t OffsetCalculation::Calculate(int64_t shard_ordinal) const {
switch (opcode_) {
case HloOpcode::kAdd:
return lhs_->Calculate(shard_ordinal) + rhs_->Calculate(shard_ordinal);
case HloOpcode::kCopy:
return copy_from_.Calculate(shard_ordinal);
case HloOpcode::kSubtract:
return lhs_->Calculate(shard_ordinal) - rhs_->Calculate(shard_ordinal);
case HloOpcode::kMultiply:
return lhs_->Calculate(shard_ordinal) * rhs_->Calculate(shard_ordinal);
default:
LOG(FATAL) << "Should not happen";
}
}
HloInstruction* OffsetCalculation::Calculate(HloInstruction* shard_ordinal,
SpmdBuilder* b) const {
if (opcode_ == HloOpcode::kCopy) {
return copy_from_.Calculate(shard_ordinal, b);
}
auto lhs = lhs_->Calculate(shard_ordinal, b);
auto rhs = rhs_->Calculate(shard_ordinal, b);
return b->AddInstruction(
HloInstruction::CreateBinary(lhs->shape(), opcode_, lhs, rhs));
}
int64_t OffsetCalculation::MaxInRange(int64_t start_ordinal,
int64_t limit_ordinal) const {
if (IsConstant()) {
return Calculate(start_ordinal);
}
if (opcode_ == HloOpcode::kCopy) {
return std::max(Calculate(start_ordinal), Calculate(limit_ordinal - 1));
}
int64_t max = Calculate(start_ordinal);
for (int64_t i = start_ordinal + 1; i < limit_ordinal; ++i) {
max = std::max(max, Calculate(i));
}
return max;
}
std::optional<HloInstruction*> ExchangeHalo(
HloInstruction* hlo, const OffsetCalculation& left_halo_size_function,
const OffsetCalculation& right_halo_size_function, int64_t dim,
const HloSharding& target,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, SpmdBuilder* b) {
int64_t input_shard_size = hlo->shape().dimensions(dim);
int64_t shard_count = target.tile_assignment().dim(dim);
std::vector<HloInstruction*> concat_pieces;
int64_t max_left_halo_size =
left_halo_size_function.MaxInRange(1, shard_count);
int64_t max_right_halo_size =
right_halo_size_function.MaxInRange(0, shard_count - 1);
if (max_left_halo_size + max_right_halo_size + input_shard_size >=
input_shard_size * shard_count &&
(max_left_halo_size > input_shard_size ||
max_right_halo_size > input_shard_size)) {
return std::nullopt;
}
const int64_t left_bound =
-left_halo_size_function.MaxInRange(0, shard_count);
const int64_t right_bound =
input_shard_size + right_halo_size_function.MaxInRange(0, shard_count);
if (left_bound >= right_bound) {
return std::nullopt;
}
for (int64_t i = CeilOfRatio(max_left_halo_size, input_shard_size) - 1;
i >= 0 && (-i - 1) * input_shard_size < right_bound; --i) {
std::vector<std::pair<int64_t, int64_t>> source_target_pairs;
target.tile_assignment().Each(
[&](absl::Span<const int64_t> indices, int64_t device) {
if (indices[dim] > i) {
std::vector<int64_t> source_indices(indices.begin(), indices.end());
source_indices[dim] -= i + 1;
source_target_pairs.emplace_back(
target.tile_assignment()(source_indices), device);
}
});
int64_t halo_size_including_skips =
std::min(max_left_halo_size - input_shard_size * i, input_shard_size);
int64_t halo_right_skips =
std::max<int64_t>(-i * input_shard_size - right_bound, 0);
int64_t halo_size = halo_size_including_skips - halo_right_skips;
auto halo_shape = hlo->shape();
auto source_halo_slice = hlo;
if (halo_size != hlo->shape().dimensions(dim)) {
halo_shape.set_dimensions(dim, halo_size);
std::vector<int64_t> halo_start_indices(halo_shape.rank(), 0);
halo_start_indices[dim] =
hlo->shape().dimensions(dim) - halo_size_including_skips;
std::vector<int64_t> halo_limit_indices(hlo->shape().dimensions().begin(),
hlo->shape().dimensions().end());
halo_limit_indices[dim] -= halo_right_skips;
std::vector<int64_t> halo_slice_strides(halo_shape.rank(), 1);
source_halo_slice = b->AddInstruction(
HloInstruction::CreateSlice(halo_shape, hlo, halo_start_indices,
halo_limit_indices, halo_slice_strides));
}
auto left_halo =
collective_ops_creator.create_cross_partition_collective_permute(
b, source_halo_slice, source_target_pairs, (*next_channel_id)++);
concat_pieces.push_back(left_halo);
}
if (left_bound < input_shard_size && right_bound > 0) {
int64_t self_start = std::max<int64_t>(0, left_bound);
int64_t self_limit = std::min<int64_t>(input_shard_size, right_bound);
if (self_start == 0 && self_limit == input_shard_size) {
concat_pieces.push_back(hlo);
} else {
auto self_shape = hlo->shape();
self_shape.set_dimensions(dim, self_limit - self_start);
std::vector<int64_t> start_indices(self_shape.rank(), 0);
start_indices[dim] = self_start;
std::vector<int64_t> limit_indices(hlo->shape().dimensions().begin(),
hlo->shape().dimensions().end());
limit_indices[dim] = self_limit;
std::vector<int64_t> slice_strides(self_shape.rank(), 1);
concat_pieces.push_back(b->AddInstruction(HloInstruction::CreateSlice(
self_shape, hlo, start_indices, limit_indices, slice_strides)));
}
}
int64_t skipped_right_halos =
std::min<int64_t>(std::max<int64_t>(left_bound - input_shard_size, 0),
std::max<int64_t>(max_right_halo_size, 0)) /
input_shard_size;
for (int64_t i = skipped_right_halos;
i < CeilOfRatio(max_right_halo_size, input_shard_size); ++i) {
std::vector<std::pair<int64_t, int64_t>> source_target_pairs;
target.tile_assignment().Each(
[&](absl::Span<const int64_t> indices, int64_t device) {
if (indices[dim] > i) {
std::vector<int64_t> target_indices(indices.begin(), indices.end());
target_indices[dim] -= i + 1;
source_target_pairs.emplace_back(
device, target.tile_assignment()(target_indices));
}
});
int64_t halo_size_including_skips =
std::min(max_right_halo_size - input_shard_size * i, input_shard_size);
int64_t halo_left_skips =
std::max<int64_t>(left_bound - (i + 1) * input_shard_size, 0);
int64_t halo_size = halo_size_including_skips - halo_left_skips;
auto halo_shape = hlo->shape();
HloInstruction* source_halo_slice = hlo;
if (halo_size != halo_shape.dimensions(dim)) {
halo_shape.set_dimensions(dim, halo_size);
std::vector<int64_t> halo_start_indices(halo_shape.rank(), 0);
halo_start_indices[dim] = halo_left_skips;
std::vector<int64_t> halo_limit_indices(halo_shape.dimensions().begin(),
halo_shape.dimensions().end());
halo_limit_indices[dim] += halo_left_skips;
std::vector<int64_t> halo_slice_strides(halo_shape.rank(), 1);
source_halo_slice = b->AddInstruction(
HloInstruction::CreateSlice(halo_shape, hlo, halo_start_indices,
halo_limit_indices, halo_slice_strides));
}
auto right_halo =
collective_ops_creator.create_cross_partition_collective_permute(
b, source_halo_slice, source_target_pairs, (*next_channel_id)++);
concat_pieces.push_back(right_halo);
}
auto concat = concat_pieces[0];
if (concat_pieces.size() > 1) {
auto concat_shape = hlo->shape();
int64_t concat_dim_size = 0;
for (auto piece : concat_pieces) {
concat_dim_size += piece->shape().dimensions(dim);
}
concat_shape.set_dimensions(dim, concat_dim_size);
concat = b->AddInstruction(
HloInstruction::CreateConcatenate(concat_shape, concat_pieces, dim));
}
return concat;
}
HloInstruction* ExchangeHaloCompact(
HloInstruction* hlo, const Shape& base_shape,
const OffsetCalculation& left_halo_size_function,
const OffsetCalculation& right_halo_size_function,
HloInstruction* pad_value, int64_t dim, const HloSharding& sharding,
HloInstruction* shard_ordinal,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, SpmdBuilder* b) {
int64_t input_shard_size = hlo->shape().dimensions(dim);
int64_t shard_count = sharding.tile_assignment().dim(dim);
auto grouped =
hlo_sharding_util::GroupShardingOnAllDimsExcept(sharding, {dim});
auto g_creator = GetPerGroupCollectiveOpsCreator(collective_ops_creator,
grouped.device_groups);
const bool ignore_pad_vale = pad_value == nullptr;
if (ignore_pad_vale) {
pad_value = CreateR0WithType(hlo->shape().element_type(), 0, b);
}
struct Halo {
int64_t my_index;
int64_t start;
int64_t limit;
int64_t cp_idx;
int64_t halo_offset;
int64_t halo_at_shard;
};
std::vector<std::vector<Halo>> halos(shard_count);
constexpr int64_t kPaddingShard = -2;
constexpr int64_t kSelfShard = -1;
int64_t max_window_size = 0;
for (int64_t i = 0; i < shard_count; ++i) {
const int64_t start =
i * input_shard_size - left_halo_size_function.Calculate(i);
int64_t next_start = start;
const int64_t limit =
(i + 1) * input_shard_size + right_halo_size_function.Calculate(i);
max_window_size = std::max(max_window_size, limit - start);
while (next_start < limit) {
Halo& halo = halos[i].emplace_back();
halo.my_index = i;
halo.halo_offset = next_start - start;
halo.start = next_start % input_shard_size;
if (halo.start < 0) {
halo.start += input_shard_size;
}
int64_t size = limit - next_start;
if (next_start < 0 || next_start >= base_shape.dimensions(dim)) {
if (next_start < 0) {
size = std::min(size, 0 - next_start);
}
VLOG(3) << "Halo for shard i " << i << ": pad, size " << size;
halo.limit = halo.start + size;
halo.cp_idx = kPaddingShard;
next_start += size;
continue;
}
size = std::min(input_shard_size - halo.start, size);
halo.limit = halo.start + size;
int64_t shard = next_start / input_shard_size;
halo.halo_at_shard = shard;
halo.cp_idx = kSelfShard;
next_start += size;
VLOG(3) << "Halo for shard i " << i << ": shard " << shard << ", size "
<< size << ", start " << halo.start;
}
}
std::vector<std::vector<std::pair<int64_t, int64_t>>> src_to_dst(shard_count);
{
std::vector<std::vector<Halo>> halos2(shard_count);
std::vector<int64_t> next_halo_idx(halos2.size(), 0);
while (true) {
bool all_padding = true;
bool empty = true;
for (int64_t i = 0; i < halos.size(); ++i) {
if (next_halo_idx[i] >= halos[i].size()) {
continue;
}
if (halos[i][next_halo_idx[i]].cp_idx != kPaddingShard) {
all_padding = false;
}
empty = false;
}
if (empty) {
break;
}
for (int64_t i = 0; i < halos.size(); ++i) {
if (next_halo_idx[i] >= halos[i].size()) {
continue;
}
Halo& h = halos[i][next_halo_idx[i]];
halos2[i].push_back(h);
Halo& new_h = halos2[i].back();
if (!all_padding && h.cp_idx == kPaddingShard &&
h.limit > input_shard_size) {
new_h.limit = input_shard_size;
h.start = 0;
h.limit -= input_shard_size;
VLOG(3) << "Split padding halo for shard i " << i << ": size "
<< new_h.limit - new_h.start;
} else {
next_halo_idx[i] += 1;
}
if (h.cp_idx != kPaddingShard && h.halo_at_shard != i) {
src_to_dst[h.halo_at_shard].emplace_back(i, halos2[i].size() - 1);
}
}
}
halos = std::move(halos2);
}
for (int64_t i = 0; i < src_to_dst.size(); ++i) {
absl::c_stable_sort(src_to_dst[i],
[&](const std::pair<int64_t, int64_t>& a,
const std::pair<int64_t, int64_t>& b) {
return halos[a.first][a.second].halo_offset <
halos[b.first][b.second].halo_offset;
});
}
std::vector<std::pair<HloInstruction*, int64_t>> cps;
std::vector<int64_t> next_dst_idx(src_to_dst.size(), 0);
while (true) {
std::vector<std::pair<int64_t, int64_t>> source_target_pairs;
std::vector<bool> dst_seen(shard_count, false);
int64_t start = input_shard_size;
int64_t limit = 0;
for (int64_t i = 0; i < src_to_dst.size(); ++i) {
if (src_to_dst[i].size() <= next_dst_idx[i]) {
continue;
}
const auto& halo_idx = src_to_dst[i][next_dst_idx[i]];
Halo& halo = halos[halo_idx.first][halo_idx.second];
if (!source_target_pairs.empty() &&
(dst_seen[halo.my_index] ||
(start > halo.limit && limit == input_shard_size &&
halo.start == 0) ||
(limit < halo.start && start == 0 &&
halo.limit == input_shard_size))) {
continue;
}
halo.cp_idx = cps.size();
dst_seen[halo.my_index] = true;
source_target_pairs.emplace_back(i, halo.my_index);
start = std::min(start, halo.start);
limit = std::max(limit, halo.limit);
next_dst_idx[i] += 1;
}
if (source_target_pairs.empty()) {
break;
}
CHECK_LT(start, limit);
const int64_t halo_size = limit - start;
Shape halo_shape = hlo->shape();
HloInstruction* source_halo_slice = hlo;
if (halo_size != hlo->shape().dimensions(dim)) {
halo_shape.set_dimensions(dim, halo_size);
std::vector<int64_t> halo_start_indices(halo_shape.rank(), 0);
halo_start_indices[dim] = start;
std::vector<int64_t> halo_limit_indices(hlo->shape().dimensions().begin(),
hlo->shape().dimensions().end());
halo_limit_indices[dim] = limit;
std::vector<int64_t> halo_slice_strides(halo_shape.rank(), 1);
source_halo_slice = b->AddInstruction(
HloInstruction::CreateSlice(halo_shape, hlo, halo_start_indices,
halo_limit_indices, halo_slice_strides));
}
HloInstruction* cp = g_creator.create_cross_partition_collective_permute(
b, source_halo_slice, source_target_pairs, (*next_channel_id)++);
VLOG(3) << "Halo collective-permute created: " << cp->ToString();
cps.emplace_back(cp, start);
}
std::vector<HloInstruction*> concat_pieces;
Shape concat_shape = hlo->shape();
concat_shape.set_dimensions(dim, 0);
int64_t self_piece_start = input_shard_size;
bool all_padding = true;
for (int64_t current_halo_idx = 0; true; ++current_halo_idx) {
int64_t max_size = 0;
constexpr int64_t kUnseen = -5;
std::vector<int64_t> cp_index(halos.size(), kUnseen);
int64_t min_self_start = input_shard_size;
int64_t max_self_limit = 0;
for (int64_t i = 0; i < halos.size(); ++i) {
if (current_halo_idx >= halos[i].size()) {
continue;
}
const Halo& halo = halos[i][current_halo_idx];
cp_index[i] = halo.cp_idx;
if (halo.cp_idx >= 0) {
max_size =
std::max(max_size, cps[cp_index[i]].first->shape().dimensions(dim));
} else if (halo.cp_idx == kSelfShard) {
min_self_start = std::min(min_self_start, halo.start);
max_self_limit = std::max(max_self_limit, halo.limit);
max_size = std::max(max_size, max_self_limit - min_self_start);
} else {
max_size = std::max(max_size, halo.limit - halo.start);
}
}
if (absl::c_all_of(cp_index, [&](int64_t idx) { return idx == kUnseen; })) {
break;
}
min_self_start -= max_size - (max_self_limit - min_self_start);
min_self_start = std::max<int64_t>(min_self_start, 0);
if (current_halo_idx == 0) {
self_piece_start = min_self_start;
}
concat_shape.set_dimensions(dim, max_size + concat_shape.dimensions(dim));
Shape piece_shape = hlo->shape();
piece_shape.set_dimensions(dim, max_size);
HloInstruction* padding = b->AddInstruction(
HloInstruction::CreateBroadcast(piece_shape, pad_value, {}));
std::vector<HloInstruction*> unique_pieces;
std::vector<int64_t> slices_cache(cps.size() + 2, kUnseen);
std::vector<int32_t> piece_index(halos.size());
for (int64_t i = 0; i < halos.size(); ++i) {
HloInstruction* piece;
int64_t cache_idx = cp_index[i];
if (cp_index[i] >= 0) {
all_padding = false;
piece = cps[cp_index[i]].first;
} else if (cp_index[i] == kSelfShard) {
if (hlo->shape().dimensions(dim) == max_size) {
piece = hlo;
} else {
std::vector<int64_t> starts(piece_shape.rank(), 0);
starts[dim] = min_self_start;
std::vector<int64_t> limits(piece_shape.dimensions().begin(),
piece_shape.dimensions().end());
std::vector<int64_t> strides(piece_shape.rank(), 1);
limits[dim] += min_self_start;
piece = b->AddInstruction(HloInstruction::CreateSlice(
piece_shape, hlo, starts, limits, strides));
}
cache_idx = cps.size();
all_padding = false;
} else {
piece = padding;
cache_idx = cps.size() + 1;
}
if (slices_cache[cache_idx] != kUnseen) {
piece_index[i] = slices_cache[cache_idx];
continue;
}
if (piece->shape().dimensions(dim) != max_size) {
PaddingConfig pc;
for (int64_t k = 0; k < piece_shape.rank(); ++k) {
auto pc_dim = pc.add_dimensions();
pc_dim->set_interior_padding(0);
pc_dim->set_edge_padding_low(0);
pc_dim->set_edge_padding_high(0);
if (k != dim) {
continue;
}
int64_t padding_size = max_size - piece->shape().dimensions(dim);
if (concat_pieces.empty()) {
pc_dim->set_edge_padding_low(padding_size);
} else {
pc_dim->set_edge_padding_high(padding_size);
}
}
piece = b->AddInstruction(
HloInstruction::CreatePad(piece_shape, piece, pad_value, pc));
}
piece_index[i] = unique_pieces.size();
unique_pieces.push_back(piece);
slices_cache[cache_idx] = piece_index[i];
}
HloInstruction* selector =
TableLookup<int32_t>(piece_index, S32, shard_ordinal, b);
int64_t init_piece = 0;
if (unique_pieces.size() > 1 && unique_pieces[init_piece] == padding) {
init_piece = 1;
}
HloInstruction* selected = unique_pieces[init_piece];
for (int64_t i = init_piece + 1; i < unique_pieces.size(); ++i) {
if (unique_pieces[i] == padding) {
continue;
}
HloInstruction* pred = b->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeScalarShape(PRED), selector,
CreateR0WithType(S32, i, b), ComparisonDirection::kEq));
pred = b->AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(PRED, selected->shape().dimensions()), pred,
{}));
selected = b->AddInstruction(
HloInstruction::CreateTernary(selected->shape(), HloOpcode::kSelect,
pred, unique_pieces[i], selected));
}
concat_pieces.push_back(selected);
}
if (all_padding) {
concat_shape.set_dimensions(dim, max_window_size);
return b->AddInstruction(
HloInstruction::CreateBroadcast(concat_shape, pad_value, {}));
}
CHECK_GE(concat_shape.dimensions(dim), max_window_size);
HloInstruction* concat;
if (concat_pieces.size() == 1) {
concat = concat_pieces[0];
} else {
concat = b->AddInstruction(
HloInstruction::CreateConcatenate(concat_shape, concat_pieces, dim));
}
std::vector<int32_t> slice_offset(halos.size(), 0);
std::vector<int32_t> non_padding_starts(halos.size(), 0);
std::vector<int32_t> non_padding_limits(halos.size(), 0);
const int64_t first_piece_size = concat_pieces[0]->shape().dimensions(dim);
int64_t padded_concat_size = concat_shape.dimensions(dim);
for (int64_t i = 0; i < halos.size(); ++i) {
if (halos[i].empty()) {
continue;
}
const Halo& halo = halos[i][0];
for (int64_t j = 0; j < halos[i].size(); ++j) {
if (halos[i][j].cp_idx != kPaddingShard) {
break;
}
non_padding_starts[i] += halos[i][j].limit - halos[i][j].start;
}
non_padding_limits[i] = left_halo_size_function.Calculate(i) +
right_halo_size_function.Calculate(i) +
input_shard_size;
int64_t high_padding = right_halo_size_function.Calculate(i) +
input_shard_size * (i + 1) -
base_shape.dimensions(dim);
if (high_padding > 0) {
non_padding_limits[i] -= high_padding;
}
if (halo.cp_idx >= 0) {
slice_offset[i] = halo.start - cps[halo.cp_idx].second +
first_piece_size -
cps[halo.cp_idx].first->shape().dimensions(dim);
} else if (halo.cp_idx == kSelfShard) {
slice_offset[i] = halo.start - self_piece_start;
} else {
slice_offset[i] = first_piece_size - (halo.limit - halo.start);
}
padded_concat_size =
std::max(padded_concat_size, slice_offset[i] + max_window_size);
}
if (padded_concat_size > concat_shape.dimensions(dim)) {
PaddingConfig pc;
for (int64_t k = 0; k < concat_shape.rank(); ++k) {
auto pc_dim = pc.add_dimensions();
pc_dim->set_interior_padding(0);
pc_dim->set_edge_padding_low(0);
pc_dim->set_edge_padding_high(0);
if (k != dim) {
continue;
}
pc_dim->set_edge_padding_high(padded_concat_size -
concat_shape.dimensions(dim));
}
concat_shape.set_dimensions(dim, padded_concat_size);
concat = b->AddInstruction(
HloInstruction::CreatePad(concat_shape, concat, pad_value, pc));
}
if (concat_shape.dimensions(dim) > max_window_size) {
Shape result_shape = concat_shape;
result_shape.set_dimensions(dim, max_window_size);
std::vector<HloInstruction*> offsets(result_shape.rank(),
CreateR0WithType(S32, 0, b));
offsets[dim] = TableLookup<int32_t>(slice_offset, S32, shard_ordinal, b);
concat = b->AddInstruction(HloInstruction::CreateDynamicSlice(
result_shape, concat, offsets, result_shape.dimensions()));
}
if (ignore_pad_vale) {
return concat;
}
HloInstruction* iota = b->AddInstruction(HloInstruction::CreateIota(
ShapeUtil::ChangeElementType(concat->shape(), S32), dim));
HloInstruction* valid_limit =
b->AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::ChangeElementType(concat->shape(), S32),
TableLookup<int32_t>(non_padding_limits, S32, shard_ordinal, b), {}));
HloInstruction* mask = b->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::ChangeElementType(concat->shape(), PRED), iota, valid_limit,
ComparisonDirection::kLt));
if (absl::c_any_of(non_padding_starts,
[](const int32_t s) { return s > 0; })) {
HloInstruction* valid_start =
b->AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::ChangeElementType(concat->shape(), S32),
TableLookup<int32_t>(non_padding_starts, S32, shard_ordinal, b),
{}));
mask = b->AddInstruction(HloInstruction::CreateBinary(
mask->shape(), HloOpcode::kAnd, mask,
b->AddInstruction(HloInstruction::CreateCompare(
mask->shape(), iota, valid_start, ComparisonDirection::kGe))));
}
HloInstruction* padding = b->AddInstruction(
HloInstruction::CreateBroadcast(concat->shape(), pad_value, {}));
return b->AddInstruction(HloInstruction::CreateTernary(
concat->shape(), HloOpcode::kSelect, mask, concat, padding));
}
std::optional<HloInstruction*> ExchangeHalo(
HloInstruction* hlo,
std::vector<OffsetCalculation> left_halo_size_functions,
std::vector<OffsetCalculation> right_halo_size_functions,
const HloSharding& target,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, SpmdBuilder* b) {
CHECK(left_halo_size_functions.size() == hlo->shape().rank());
CHECK(right_halo_size_functions.size() == hlo->shape().rank());
HloInstruction* visiting_hlo = hlo;
for (int dim = 0; dim < hlo->shape().rank(); ++dim) {
auto concat = ExchangeHalo(visiting_hlo, left_halo_size_functions[dim],
right_halo_size_functions[dim], dim, target,
collective_ops_creator, next_channel_id, b);
if (!concat) {
return std::nullopt;
}
visiting_hlo = *concat;
}
return visiting_hlo;
}
std::optional<HloInstruction*> ExchangeHaloAndGetValidData(
HloInstruction* hlo, const Shape& base_shape,
const OffsetCalculation& left_halo_size_function,
const OffsetCalculation& right_halo_size_function,
int64_t explicit_left_padding_on_full_shape, int64_t padded_full_shape_size,
int64_t shard_size_with_halo, int64_t dim, const HloSharding& target,
HloInstruction* offset_on_padded_shape, HloInstruction* pad_value,
HloInstruction* partition_ordinal,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, SpmdBuilder* b, bool mask_invalid_region,
bool force_mask_in_compact) {
int64_t shard_count = target.tile_assignment().dim(dim);
if (explicit_left_padding_on_full_shape ==
left_halo_size_function.Calculate(0)) {
int64_t max_halo =
std::max(left_halo_size_function.MaxInRange(0, shard_count),
right_halo_size_function.MaxInRange(0, shard_count));
int64_t max_shard_size =
hlo->shape().dimensions(dim) +
(left_halo_size_function + right_halo_size_function)
.MaxInRange(0, shard_count);
if (max_shard_size == shard_size_with_halo &&
max_halo > 2 * shard_size_with_halo) {
if (max_shard_size * 2 >= shard_count * hlo->shape().dimensions(dim)) {
return std::nullopt;
}
return ExchangeHaloCompact(
hlo, base_shape, left_halo_size_function, right_halo_size_function,
mask_invalid_region || force_mask_in_compact ? pad_value : nullptr,
dim, target, partition_ordinal, collective_ops_creator,
next_channel_id, b);
}
}
auto halo_exchange_result =
ExchangeHalo(hlo, left_halo_size_function, right_halo_size_function, dim,
target, collective_ops_creator, next_channel_id, b);
if (!halo_exchange_result) {
return std::nullopt;
}
auto concat = *halo_exchange_result;
int64_t max_left_halo_size =
left_halo_size_function.MaxInRange(1, shard_count);
int64_t max_left_halo_or_padding_size =
std::max(max_left_halo_size, explicit_left_padding_on_full_shape);
auto start_offset_on_padded_concat_calculation =
OffsetCalculation(MultiplyAddDivideOffsetCalculation(
0, max_left_halo_or_padding_size, 1)) -
left_halo_size_function;
int64_t extra_left_padding =
std::max(int64_t{0}, max_left_halo_or_padding_size -
std::max(int64_t{0}, max_left_halo_size));
int64_t extra_right_padding =
start_offset_on_padded_concat_calculation.MaxInRange(0, shard_count) +
shard_size_with_halo - concat->shape().dimensions(dim) -
extra_left_padding;
extra_right_padding = std::max(int64_t{0}, extra_right_padding);
if (extra_left_padding > 0 || extra_right_padding > 0) {
PaddingConfig padding_config;
auto padded_concat_shape = concat->shape();
for (int64_t i = 0; i < base_shape.rank(); ++i) {
auto padding_config_dim = padding_config.add_dimensions();
padding_config_dim->set_interior_padding(0);
padding_config_dim->set_edge_padding_low(0);
padding_config_dim->set_edge_padding_high(0);
if (i != dim) {
continue;
}
padding_config_dim->set_edge_padding_low(extra_left_padding);
padding_config_dim->set_edge_padding_high(extra_right_padding);
padded_concat_shape.set_dimensions(dim, concat->shape().dimensions(dim) +
extra_left_padding +
extra_right_padding);
}
concat = b->AddInstruction(HloInstruction::CreatePad(
padded_concat_shape, concat, pad_value, padding_config));
}
auto valid_slice = concat;
if (shard_size_with_halo != concat->shape().dimensions(dim)) {
CHECK_LT(shard_size_with_halo, concat->shape().dimensions(dim));
auto slice_shape = concat->shape();
slice_shape.set_dimensions(dim, shard_size_with_halo);
if (left_halo_size_function.IsConstant() &&
left_halo_size_function.Calculate(0) ==
explicit_left_padding_on_full_shape) {
std::vector<int64_t> start_indices(slice_shape.rank(), 0);
std::vector<int64_t> strides(slice_shape.rank(), 1);
valid_slice = b->AddInstruction(
HloInstruction::CreateSlice(slice_shape, concat, start_indices,
slice_shape.dimensions(), strides));
} else {
auto zero = b->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));
std::vector<HloInstruction*> slice_offsets(base_shape.rank(), zero);
slice_offsets[dim] = start_offset_on_padded_concat_calculation.Calculate(
partition_ordinal, b);
valid_slice = b->AddInstruction(HloInstruction::CreateDynamicSlice(
slice_shape, concat, slice_offsets, slice_shape.dimensions()));
}
}
if (!mask_invalid_region) {
return valid_slice;
}
int64_t total_right_padding = padded_full_shape_size -
base_shape.dimensions(dim) -
explicit_left_padding_on_full_shape;
if (explicit_left_padding_on_full_shape > 0 || total_right_padding > 0) {
auto index_shape = ShapeUtil::ChangeElementType(valid_slice->shape(), S32);
auto iota = b->AddInstruction(HloInstruction::CreateIota(index_shape, dim));
auto broadcast_start_index_in_padded_shape =
b->AddInstruction(HloInstruction::CreateBroadcast(
index_shape, offset_on_padded_shape, {}));
auto index_in_padded_shape = b->AddInstruction(
HloInstruction::CreateBinary(index_shape, HloOpcode::kAdd, iota,
broadcast_start_index_in_padded_shape));
auto mask_shape = ShapeUtil::ChangeElementType(index_shape, PRED);
std::vector<HloInstruction*> predicates;
if (explicit_left_padding_on_full_shape > 0) {
auto valid_index_start =
b->AddInstruction(HloInstruction::CreateBroadcast(
index_shape,
b->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
explicit_left_padding_on_full_shape))),
{}));
predicates.push_back(b->AddInstruction(HloInstruction::CreateCompare(
mask_shape, index_in_padded_shape, valid_index_start,
ComparisonDirection::kGe)));
}
if (total_right_padding > 0) {
auto valid_index_limit =
b->AddInstruction(HloInstruction::CreateBroadcast(
index_shape,
b->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
base_shape.dimensions(dim) +
explicit_left_padding_on_full_shape))),
{}));
predicates.push_back(b->AddInstruction(HloInstruction::CreateCompare(
mask_shape, index_in_padded_shape, valid_index_limit,
ComparisonDirection::kLt)));
}
CHECK(!predicates.empty());
auto is_valid =
predicates.size() == 2
? b->AddInstruction(HloInstruction::CreateBinary(
mask_shape, HloOpcode::kAnd, predicates[0], predicates[1]))
: predicates[0];
if (pad_value->shape().element_type() !=
valid_slice->shape().element_type()) {
pad_value = b->AddInstruction(HloInstruction::CreateConvert(
ShapeUtil::MakeShape(valid_slice->shape().element_type(),
pad_value->shape().dimensions()),
pad_value));
}
auto masking_value = b->AddInstruction(
HloInstruction::CreateBroadcast(valid_slice->shape(), pad_value, {}));
valid_slice = b->AddInstruction(
HloInstruction::CreateTernary(valid_slice->shape(), HloOpcode::kSelect,
is_valid, valid_slice, masking_value));
}
return valid_slice;
}
HloInstruction* HaloExchangeToPadOnLeft(PartitionedHlo& original,
absl::Span<const int64_t> dims) {
if (original.sharding().IsTileMaximal()) {
return original.hlo();
}
Window window;
for (int64_t i = 0; i < original.base_shape().rank(); ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_size(1);
dim->set_stride(1);
dim->set_window_dilation(1);
dim->set_window_reversal(false);
int64_t low_padding = 0;
if (absl::c_linear_search(dims, i)) {
low_padding = RoundUpTo(original.base_shape().dimensions(i),
original.sharding().tile_assignment().dim(i)) -
original.base_shape().dimensions(i);
}
dim->set_padding_low(low_padding);
dim->set_padding_high(0);
dim->set_base_dilation(1);
}
auto reshard_window = original.ReshardAsWindowedInput(
window, original.sharding(),
CreateZero(ShapeUtil::MakeShape(original.base_shape().element_type(), {}),
original.state().b),
false);
if (!reshard_window.has_value()) {
return nullptr;
}
CHECK(!reshard_window->dynamic_slice_index_on_output.has_value());
return reshard_window->sharded_input;
}
bool IsNanSafeGt(HloComputation* comp) {
namespace m = match;
auto match_bitcast_f32 = [](int64_t parameter_number) {
auto param = m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(F32));
auto param_s32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(S32));
auto param_u32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(U32));
return m::Select(
m::Lt(param_s32, m::ConstantScalar(0)),
m::BitcastConvert(
m::Subtract(m::ConstantScalar(std::numeric_limits<int32_t>::max()),
param_u32))
.WithShape(m::Shape().WithElementType(S32)),
param_s32);
};
auto match_bitcast_bf16 = [](int64_t parameter_number) {
auto param = m::Convert(m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(BF16)))
.WithShape(m::Shape().WithElementType(F32));
auto param_s32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(S32));
auto param_u32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(U32));
return m::Select(
m::Lt(param_s32, m::ConstantScalar(0)),
m::BitcastConvert(
m::Subtract(m::ConstantScalar(std::numeric_limits<int32_t>::max()),
param_u32))
.WithShape(m::Shape().WithElementType(S32)),
param_s32);
};
if (comp->root_instruction()->opcode() == HloOpcode::kSelect) {
return Match(comp->root_instruction()->operand(2),
m::Gt(match_bitcast_f32(0), match_bitcast_f32(1))) ||
Match(comp->root_instruction()->operand(2),
m::Gt(match_bitcast_bf16(0), match_bitcast_bf16(1)));
}
return Match(comp->root_instruction(),
m::Gt(match_bitcast_f32(0), match_bitcast_f32(1))) ||
Match(comp->root_instruction(),
m::Gt(match_bitcast_bf16(0), match_bitcast_bf16(1)));
}
std::optional<int64_t> GetKValueInTopKWhenPartitionSortDim(
HloInstruction* hlo) {
HloSortInstruction* sort = DynCast<HloSortInstruction>(hlo);
if (sort == nullptr || sort->operand_count() != 2) {
return std::nullopt;
}
if (!IsNanSafeGt(sort->to_apply())) {
return std::nullopt;
}
HloInstruction* data = sort->mutable_operand(0);
HloIotaInstruction* iota =
DynCast<HloIotaInstruction>(sort->mutable_operand(1));
const PrimitiveType element_type = data->shape().element_type();
if (iota == nullptr || iota->shape().element_type() != S32 ||
iota->opcode() != HloOpcode::kIota ||
iota->iota_dimension() != sort->sort_dimension()) {
return std::nullopt;
}
const int64_t sort_dim = sort->sort_dimension();
if (element_type != F32 && element_type != BF16 && element_type != S32 &&
element_type != U32) {
return std::nullopt;
}
bool supported = true;
std::optional<int64_t> k;
for (HloInstruction* gte : sort->users()) {
if (gte->opcode() != HloOpcode::kGetTupleElement) {
supported = false;
break;
}
const HloInstruction* slice = gte->users()[0];
if (slice->opcode() != HloOpcode::kSlice) {
supported = false;
break;
}
if (absl::c_any_of(slice->slice_starts(), [](int x) { return x != 0; }) ||
absl::c_any_of(slice->slice_strides(), [](int x) { return x != 1; })) {
supported = false;
break;
}
for (int64_t dim = 0; dim < data->shape().dimensions_size(); dim++) {
if (dim == sort_dim) {
continue;
}
if (slice->slice_limits(dim) !=
slice->operand(0)->shape().dimensions(dim)) {
supported = false;
break;
}
}
if (!k.has_value()) {
k = slice->slice_limits(sort_dim);
} else if (k != slice->slice_limits(sort_dim)) {
supported = false;
break;
}
}
if (k == std::nullopt || !supported) {
return std::nullopt;
}
if (!data->has_sharding()) {
return std::nullopt;
}
const HloSharding& sharding = sort->operand(0)->sharding();
if (sharding.IsTileMaximal()) {
return std::nullopt;
}
for (int64_t dim = 0; dim < sort->shape().tuple_shapes(0).dimensions_size();
++dim) {
if (sharding.tile_assignment().dim(dim) > 1) {
if (dim != sort_dim) {
return std::nullopt;
}
}
}
const int64_t shard_count = sharding.tile_assignment().dim(sort_dim);
if (shard_count <= 1) {
return std::nullopt;
}
const int64_t input_size = hlo->operand(0)->shape().dimensions(sort_dim);
const int64_t per_partition_size = CeilOfRatio(input_size, shard_count);
if (k.value() >= per_partition_size) {
return std::nullopt;
}
return k;
}
HloInstruction* SliceFirstK(HloInstruction* hlo, SpmdBuilder* builder,
int64_t slice_dim, int64_t k) {
const Shape& hlo_shape = hlo->shape();
auto hlo_dims = hlo_shape.dimensions();
std::vector<int64_t> start_indices(hlo_shape.dimensions_size(), 0);
std::vector<int64_t> limit_indices(hlo_dims.begin(), hlo_dims.end());
std::vector<int64_t> strides(hlo_shape.dimensions_size(), 1);
limit_indices[slice_dim] = k;
auto output_shape = hlo_shape;
output_shape.set_dimensions(slice_dim, k);
return builder->AddInstruction(HloInstruction::CreateSlice(
output_shape, hlo, start_indices, limit_indices, strides));
}
int64_t ShardCountAtDim(const HloSharding& sharding, int64_t dim) {
if (sharding.IsTileMaximal()) {
return 1;
}
if (dim == -1) {
return 1;
}
return sharding.tile_assignment().dim(dim);
}
std::optional<std::vector<std::pair<int64_t, int64_t>>>
GetReshardAllToAllSourceTargetDims(const HloSharding& source,
const HloSharding& target) {
if (source.IsTileMaximal() || target.IsTileMaximal() ||
source.tile_assignment().num_dimensions() !=
target.tile_assignment().num_dimensions() ||
source.NumTiles() != target.NumTiles()) {
return std::nullopt;
}
std::map<int64_t, std::vector<int64_t>> source_size_to_dim;
std::map<int64_t, std::vector<int64_t>> target_size_to_dim;
for (int64_t i = 0; i < source.tile_assignment().num_dimensions(); ++i) {
if (source.tile_assignment().dim(i) == target.tile_assignment().dim(i)) {
continue;
}
source_size_to_dim[source.tile_assignment().dim(i)].push_back(i);
target_size_to_dim[target.tile_assignment().dim(i)].push_back(i);
}
if (source_size_to_dim.empty() ||
source_size_to_dim.size() != target_size_to_dim.size()) {
return std::nullopt;
}
for (const auto& entry : source_size_to_dim) {
auto target_it = target_size_to_dim.find(entry.first);
if (target_it == target_size_to_dim.end() ||
target_it->second.size() != entry.second.size()) {
return std::nullopt;
}
}
std::vector<std::pair<int64_t, int64_t>> result;
auto remove_entry = [](int64_t size, int64_t dim,
std::map<int64_t, std::vector<int64_t>>& size_to_dim) {
size_to_dim[size].erase(
std::remove_if(size_to_dim[size].begin(), size_to_dim[size].end(),
[dim](int64_t a) { return a == dim; }),
size_to_dim[size].end());
if (size_to_dim[size].empty()) {
size_to_dim.erase(size);
}
};
while (!source_size_to_dim.empty()) {
int64_t source_size = source_size_to_dim.begin()->first;
int64_t i = source_size_to_dim.begin()->second.back();
int64_t target_i_size = target.tile_assignment().dim(i);
if (target_i_size == source_size) {
remove_entry(source_size, i, source_size_to_dim);
remove_entry(source_size, i, target_size_to_dim);
continue;
}
auto j_it = source_size_to_dim[target_i_size].begin();
int64_t j = *j_it;
if (source_size == 1) {
while (target.tile_assignment().dim(j) == 1) {
if (++j_it == source_size_to_dim[target_i_size].end()) {
break;
}
j = *j_it;
}
} else if (target_i_size % source_size == 0) {
while (target.tile_assignment().dim(j) != source_size) {
if (++j_it == source_size_to_dim[target_i_size].end()) {
break;
}
j = *j_it;
}
} else {
return std::nullopt;
}
result.emplace_back(j, i);
remove_entry(target_i_size, i, target_size_to_dim);
source_size_to_dim.begin()->second.back() = j;
remove_entry(target_i_size, j, source_size_to_dim);
}
return result;
}
bool CanReshardWithCollectivePermute(const HloSharding& source,
const HloSharding& target) {
return !source.IsTileMaximal() && !target.IsTileMaximal() &&
source.tile_assignment().dimensions() ==
target.tile_assignment().dimensions() &&
source.ReplicateOnLastTileDim() == target.ReplicateOnLastTileDim() &&
source.tile_assignment() != target.tile_assignment();
}
std::optional<GroupedSharding> AlignGroupsWithInternal(
GroupedSharding grouped_sharding, const GroupedSharding& reference,
bool requires_compatibility, bool ignore_group_order) {
auto get_permutation = [](absl::Span<const int64_t> src,
absl::Span<const int64_t> dst) {
CHECK_EQ(src.size(), dst.size());
absl::flat_hash_map<int64_t, int64_t> dst_reverse_map(dst.size());
for (int64_t i = 0; i < dst.size(); ++i) {
dst_reverse_map[dst[i]] = i;
}
std::vector<int64_t> permutation(src.size());
for (int64_t i = 0; i < src.size(); ++i) {
auto it = dst_reverse_map.find(src[i]);
CHECK(it != dst_reverse_map.end());
permutation[i] = it->second;
}
return permutation;
};
CHECK_EQ(grouped_sharding.device_groups.size(),
reference.device_groups.size());
std::vector<int64_t> device_to_ref_group(reference.device_groups.size() *
reference.device_groups[0].size());
for (int64_t g = 0; g < reference.device_groups.size(); ++g) {
for (int64_t device : reference.device_groups[g]) {
device_to_ref_group[device] = g;
}
}
auto unique_ref_dev_group =
[&](absl::Span<const int64_t> devices) -> int64_t {
int64_t ref_g = -1;
for (int64_t device : devices) {
if (ref_g == -1) {
ref_g = device_to_ref_group[device];
} else if (ref_g != device_to_ref_group[device]) {
return -1;
}
}
return ref_g;
};
bool matching_groups = true;
std::vector<int64_t> original_src_to_ref_permutation;
for (int64_t g = 0; g < grouped_sharding.device_groups.size(); ++g) {
int64_t ref_g = unique_ref_dev_group(grouped_sharding.device_groups[g]);
if (ref_g < 0 || (!ignore_group_order && g != ref_g)) {
if (requires_compatibility) {
return std::nullopt;
}
matching_groups = false;
break;
}
if (g == 0) {
original_src_to_ref_permutation = get_permutation(
grouped_sharding.device_groups[g], reference.device_groups[ref_g]);
} else if (requires_compatibility) {
if (original_src_to_ref_permutation !=
get_permutation(grouped_sharding.device_groups[g],
reference.device_groups[ref_g])) {
return std::nullopt;
}
}
}
if (matching_groups && !grouped_sharding.sharding.IsTileMaximal()) {
auto tiles = [&] {
auto array =
grouped_sharding.sharding.tile_assignment().shared_array_clone();
array->Each([&](absl::Span<const int64_t> indices, int64_t* device) {
*device = original_src_to_ref_permutation[*device];
});
return TileAssignment(std::move(array));
}();
grouped_sharding.sharding =
grouped_sharding.sharding.ReplicateOnLastTileDim()
? HloSharding::PartialTile(tiles)
: HloSharding::Tile(tiles);
}
grouped_sharding.device_groups = reference.device_groups;
return grouped_sharding;
}
GroupedSharding AlignGroupsWith(GroupedSharding grouped_sharding,
const GroupedSharding& reference,
bool ignore_group_order) {
return *AlignGroupsWithInternal(std::move(grouped_sharding), reference,
false,
ignore_group_order);
}
std::optional<GroupedSharding> AlignGroupsWithIfCompatible(
GroupedSharding grouped_sharding, const GroupedSharding& reference) {
return AlignGroupsWithInternal(std::move(grouped_sharding), reference,
true,
false);
}
HloSharding AlignShardingOnDims(const HloSharding& sharding,
absl::Span<const int64_t> sharding_dims,
const HloSharding& reference,
absl::Span<const int64_t> reference_dims) {
auto sharding_grouped =
hlo_sharding_util::GroupShardingOnDims(sharding, sharding_dims);
auto reference_grouped =
hlo_sharding_util::GroupShardingOnDims(reference, reference_dims);
return hlo_sharding_util::UngroupSharding(
AlignGroupsWith(sharding_grouped, reference_grouped));
}
Shape GetPerGroupBaseShape(const GroupedSharding& grouped_sharding,
const Shape& original_base_shape) {
auto result = original_base_shape;
for (int64_t i = 0; i < grouped_sharding.group_dims.size(); ++i) {
int64_t dim = grouped_sharding.group_dims[i];
if (dim >= original_base_shape.rank()) {
continue;
}
int64_t groups = grouped_sharding.group_dim_sizes[i];
result.set_dimensions(dim, CeilOfRatio(result.dimensions(dim), groups));
}
return result;
}
PartitionedHlo::PartitioningState CreatePerGroupPartitioningState(
const PartitionedHlo::PartitioningState& state,
const std::vector<std::vector<int64_t>>& device_groups, SpmdBuilder* b) {
auto result = state;
result.collective_ops_creator = GetPerGroupCollectiveOpsCreator(
state.collective_ops_creator, device_groups);
result.partition_id =
GetInGroupPartitionId(state.partition_id, device_groups, b);
std::vector<std::string> per_group_strings(device_groups.size());
for (int64_t i = 0; i < per_group_strings.size(); ++i) {
per_group_strings[i] = absl::StrJoin(device_groups[i], ",");
}
auto& grouped_cache =
state.reshard_cache->groupd_caches[absl::StrJoin(per_group_strings, ";")];
if (!grouped_cache) {
grouped_cache = std::make_unique<PartitionedHlo::ReshardCache>();
}
result.reshard_cache = grouped_cache.get();
return result;
}
HloInstruction* PerGroupSliceFromReplicated(
HloInstruction* replicated, HloInstruction* partition_id,
const std::vector<std::vector<int64_t>>& device_groups,
absl::Span<const int64_t> group_dims,
absl::Span<const int64_t> group_dim_sizes, SpmdBuilder* b) {
std::vector<uint32_t> group_ids(device_groups.size() *
device_groups[0].size());
for (int64_t g = 0; g < device_groups.size(); ++g) {
for (int64_t device : device_groups[g]) {
group_ids[device] = g;
}
}
auto group_id = TableLookup<uint32_t>(group_ids, U32, partition_id, b);
std::vector<int64_t> group_level_tile_dims(replicated->shape().rank(), 1);
for (int64_t i = 0; i < group_dims.size(); ++i) {
group_level_tile_dims[group_dims[i]] = group_dim_sizes[i];
}
auto group_level_tile = [&] {
absl::InlinedVector<int, 6> perm_dims(group_dims.begin(), group_dims.end());
absl::c_sort(perm_dims);
absl::InlinedVector<int, 6> perm_dim_map(group_level_tile_dims.size(), -1);
for (int i = 0; i < perm_dims.size(); ++i) {
perm_dim_map[perm_dims[i]] = i;
}
absl::InlinedVector<int, 6> transpose_perm(group_dims.size());
for (int i = 0; i < group_dims.size(); ++i) {
transpose_perm[i] = perm_dim_map[group_dims[i]];
CHECK_NE(transpose_perm[i], -1);
}
return TileAssignment(group_level_tile_dims, group_dim_sizes,
transpose_perm);
}();
auto group_level_sharding = HloSharding::Tile(std::move(group_level_tile));
auto padded_hlo = PadBaseShapeBeforeUnevenTiledSharding(
replicated, group_level_sharding, b);
auto shard_shape =
MakePartitionedShape(replicated->shape(), group_level_sharding);
return b->AddInstruction(HloInstruction::CreateDynamicSlice(
shard_shape, padded_hlo,
MakePartitionOffsets(replicated->shape(), group_level_sharding, group_id,
b),
shard_shape.dimensions()));
}
std::optional<std::vector<int64_t>> FindMatchingPartitionedDimsForGrouping(
const HloSharding& sharding,
const std::vector<std::vector<int64_t>>& device_groups) {
if (sharding.IsTileMaximal() || device_groups.size() < 2) {
return std::nullopt;
}
const int64_t num_devices = sharding.tile_assignment().num_elements();
if (num_devices != device_groups.size() * device_groups[0].size()) {
return std::nullopt;
}
std::vector<int64_t> dims;
if (device_groups[0].size() < 2) {
for (int64_t i = 0; i < sharding.tile_assignment().num_dimensions(); ++i) {
if (sharding.tile_assignment().dim(i) > 1) {
dims.push_back(i);
}
}
return dims;
}
std::vector<std::vector<int64_t>> device_to_index(
num_devices,
std::vector<int64_t>(sharding.tile_assignment().num_dimensions()));
sharding.tile_assignment().Each(
[&](absl::Span<const int64_t> index, int64_t device) {
device_to_index[device].assign(index.begin(), index.end());
});
int64_t group_count = 1;
for (int64_t i = 0; i < sharding.tile_assignment().num_dimensions(); ++i) {
if (device_to_index[device_groups[0][0]][i] ==
device_to_index[device_groups[0][1]][i]) {
dims.push_back(i);
group_count *= sharding.tile_assignment().dim(i);
}
}
if (group_count != device_groups.size()) {
return std::nullopt;
}
for (const auto& group : device_groups) {
for (int64_t i = 1; i < group.size(); ++i) {
if (absl::c_any_of(dims, [&](const int64_t dim) {
return device_to_index[group[i]][dim] !=
device_to_index[group[0]][dim];
})) {
return std::nullopt;
}
}
}
return dims;
}
HloSharding CreateMatchingShardingOnDims(
const Shape& target_shape, const HloSharding& source_sharding,
absl::Span<const int64_t> target_dims,
absl::Span<const int64_t> source_dims) {
CHECK(target_dims.size() == source_dims.size())
<< "Expected 1:1 match between parallel dimensions";
if (source_sharding.IsReplicated()) {
return HloSharding::Replicate();
}
absl::InlinedVector<int64_t, 4> tile_dims(target_shape.dimensions_size(), 1);
int num_tiles = 1;
for (int i = 0, end = target_dims.size(); i < end; ++i) {
num_tiles *= source_sharding.tile_assignment().dim(source_dims[i]);
tile_dims[target_dims[i]] =
source_sharding.tile_assignment().dim(source_dims[i]);
}
bool to_be_partially_replicated = false;
if (num_tiles != source_sharding.tile_assignment().num_elements()) {
CHECK_EQ(source_sharding.tile_assignment().num_elements() % num_tiles, 0);
to_be_partially_replicated = true;
tile_dims.push_back(source_sharding.tile_assignment().num_elements() /
num_tiles);
}
auto tgt_tile_assignment =
source_sharding.tile_assignment().Reshape(tile_dims);
if (to_be_partially_replicated) {
return AlignShardingOnDims(HloSharding::PartialTile(tgt_tile_assignment),
target_dims, source_sharding, source_dims);
} else {
return AlignShardingOnDims(HloSharding::Tile(tgt_tile_assignment),
target_dims, source_sharding, source_dims);
}
}
std::optional<GatherScatterParallelDimSharding>
GatherScatterOperandsShardedAcrossParallelDims(
const HloInstruction& operand, const HloInstruction& indices,
const hlo_sharding_util::GatherScatterParallelDims& parallel_dims) {
const auto& indices_parallel_dims = parallel_dims.indices_parallel_dims;
const auto& operand_parallel_dims = parallel_dims.operand_parallel_dims;
if (indices_parallel_dims.size() != operand_parallel_dims.size()) {
return std::nullopt;
}
auto new_index_shard = indices.sharding();
auto new_operand_shard = operand.sharding();
int idx_parallel_tiles_num = new_index_shard.NumTiles(indices_parallel_dims);
int op_parallel_tiles_num = new_operand_shard.NumTiles(operand_parallel_dims);
if (idx_parallel_tiles_num == 1 && op_parallel_tiles_num == 1) {
return std::nullopt;
}
if (new_index_shard.IsReplicated()) {
return GatherScatterParallelDimSharding{
CreateMatchingShardingOnDims(indices.shape(), new_operand_shard,
indices_parallel_dims,
operand_parallel_dims),
new_operand_shard};
}
if (new_operand_shard.IsReplicated()) {
return GatherScatterParallelDimSharding{
new_index_shard, CreateMatchingShardingOnDims(
operand.shape(), new_index_shard,
operand_parallel_dims, indices_parallel_dims)};
}
if (idx_parallel_tiles_num != op_parallel_tiles_num) {
auto to_adjust_dims = operand_parallel_dims;
auto target_dims = indices_parallel_dims;
HloSharding* target = &new_index_shard;
HloSharding* to_adjust = &new_operand_shard;
if (idx_parallel_tiles_num < op_parallel_tiles_num) {
std::swap(to_adjust_dims, target_dims);
std::swap(to_adjust, target);
}
if (!to_adjust->ReplicateOnLastTileDim()) {
return std::nullopt;
}
std::vector<int64_t> new_tile_assignment_dims(
to_adjust->tile_assignment().dimensions().begin(),
to_adjust->tile_assignment().dimensions().end());
for (int i = 0; i < to_adjust_dims.size(); ++i) {
int64_t target_dim = target->tile_assignment().dim(target_dims[i]);
int64_t to_adjust_dim =
to_adjust->tile_assignment().dim(to_adjust_dims[i]);
if (target_dim < to_adjust_dim) {
return std::nullopt;
}
if (target_dim == to_adjust_dim) {
continue;
}
int64_t ratio = target_dim / to_adjust_dim;
if (target_dim % to_adjust_dim != 0 ||
new_tile_assignment_dims.back() % ratio != 0) {
return std::nullopt;
}
new_tile_assignment_dims[to_adjust_dims[i]] *= ratio;
new_tile_assignment_dims.back() /= ratio;
}
CHECK_GE(new_tile_assignment_dims.back(), 1);
bool to_partially_replicate = true;
if (new_tile_assignment_dims.back() == 1) {
new_tile_assignment_dims.pop_back();
to_partially_replicate = false;
}
auto new_tile_assignment =
to_adjust->tile_assignment().Reshape(new_tile_assignment_dims);
if (to_partially_replicate) {
*to_adjust =
AlignShardingOnDims(HloSharding::PartialTile(new_tile_assignment),
to_adjust_dims, *target, target_dims);
} else {
*to_adjust = AlignShardingOnDims(HloSharding::Tile(new_tile_assignment),
to_adjust_dims, *target, target_dims);
}
}
std::vector<int64_t> operand_shard_tile_dims(
new_operand_shard.tile_assignment().dimensions().begin(),
new_operand_shard.tile_assignment().dimensions().end());
for (int i = 0; i < indices_parallel_dims.size(); ++i) {
operand_shard_tile_dims[operand_parallel_dims[i]] =
new_index_shard.tile_assignment().dim(indices_parallel_dims[i]);
}
auto operand_shard_tiles =
new_operand_shard.tile_assignment().Reshape(operand_shard_tile_dims);
new_operand_shard = AlignShardingOnDims(
new_operand_shard.ReplicateOnLastTileDim()
? HloSharding::PartialTile(operand_shard_tiles)
: HloSharding::Tile(operand_shard_tiles),
operand_parallel_dims, new_index_shard, indices_parallel_dims);
return GatherScatterParallelDimSharding{new_index_shard, new_operand_shard};
}
int64_t FindRotateRightPattern(const HloInstruction* concat,
const HloInstruction* lhs,
const HloInstruction* rhs) {
if (lhs->opcode() != HloOpcode::kSlice ||
rhs->opcode() != HloOpcode::kSlice ||
lhs->operand(0) != rhs->operand(0)) {
return -1;
}
const HloInstruction* to_rotate = lhs->operand(0);
if (!ShapeUtil::Compatible(to_rotate->shape(), concat->shape()) ||
concat->sharding() != to_rotate->sharding()) {
return -1;
}
const int64_t dim = concat->concatenate_dimension();
if (lhs->slice_strides(dim) != 1 || rhs->slice_strides(dim) != 1 ||
lhs->slice_starts(dim) != rhs->slice_limits(dim)) {
return -1;
}
return lhs->shape().dimensions(dim);
}
std::optional<PadWithWrapPattern> FindPadWithWrapPattern(
const HloInstruction* concat, const HloInstruction* lhs,
const HloInstruction* mid, const HloInstruction* rhs) {
if (!lhs || !mid || !rhs) {
return std::nullopt;
}
auto skip_elementwise_ops = [&](const HloInstruction* inst) {
std::vector<const HloInstruction*> modifiers;
while (inst->IsElementwise() && inst->operand_count() == 1 &&
inst->user_count() == 1) {
if (inst->opcode() != HloOpcode::kCopy) {
modifiers.push_back(inst);
}
inst = inst->operand(0);
}
return std::make_pair(modifiers, inst);
};
PadWithWrapPattern pad_pattern;
auto skip_result = skip_elementwise_ops(lhs);
pad_pattern.lhs_modifiers = std::move(skip_result.first);
lhs = skip_result.second;
skip_result = skip_elementwise_ops(rhs);
pad_pattern.rhs_modifiers = std::move(skip_result.first);
rhs = skip_result.second;
const int64_t dim = concat->concatenate_dimension();
if (lhs->opcode() != HloOpcode::kSlice ||
rhs->opcode() != HloOpcode::kSlice || lhs->operand(0) != mid ||
rhs->operand(0) != mid || lhs->slice_strides(dim) != 1 ||
rhs->slice_strides(dim) != 1 || lhs->sharding() != mid->sharding() ||
rhs->sharding() != mid->sharding() ||
lhs->sharding() != concat->sharding()) {
return std::nullopt;
}
pad_pattern.lhs_slice_start = lhs->slice_starts(dim);
pad_pattern.rhs_slice_start = rhs->slice_starts(dim);
return pad_pattern;
}
std::optional<PartitionedHlo::WindowedInputShardReturnValue>
ReshardDataForSlicing(absl::Span<const int64_t> strides,
absl::Span<const int64_t> starts,
absl::Span<const int64_t> limits,
PartitionedHlo to_reshard,
const HloSharding& target_sharding, SpmdBuilder* b) {
Window window;
for (int64_t i = 0; i < starts.size(); ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_size(1);
dim->set_stride(strides[i]);
dim->set_window_dilation(1);
dim->set_window_reversal(false);
dim->set_padding_low(-starts[i]);
dim->set_padding_high(limits[i] - to_reshard.base_shape().dimensions(i));
dim->set_base_dilation(1);
}
return to_reshard.ReshardAsWindowedInput(
window, target_sharding,
CreateZero(
ShapeUtil::MakeShape(to_reshard.hlo()->shape().element_type(), {}),
b),
false);
}
HloInstruction* SliceDataFromWindowReshard(
const PartitionedHlo::WindowedInputShardReturnValue& reshard_operand,
absl::Span<const int64_t> strides, const Shape& base_shape,
const HloSharding& target_sharding, SpmdBuilder* b) {
std::vector<int64_t> start_indices(strides.size());
std::vector<int64_t> limit_indices(strides.size());
bool need_slice = false;
for (int64_t i = 0; i < strides.size(); ++i) {
auto dim = reshard_operand.shard_window.dimensions(i);
start_indices[i] = -dim.padding_low();
limit_indices[i] = reshard_operand.sharded_input->shape().dimensions(i) +
dim.padding_high();
if (start_indices[i] != 0 || strides[i] != 1 ||
limit_indices[i] !=
reshard_operand.sharded_input->shape().dimensions(i)) {
need_slice = true;
}
}
if (need_slice) {
auto shard_shape = MakePartitionedShape(base_shape, target_sharding);
return b->AddInstruction(
HloInstruction::CreateSlice(shard_shape, reshard_operand.sharded_input,
start_indices, limit_indices, strides));
}
return reshard_operand.sharded_input;
}
std::optional<PartitionedHlo::WindowedInputShardReturnValue> ReshardDataForPad(
HloInstruction* pad_value, PaddingConfig pc, PartitionedHlo to_reshard,
const HloSharding& target_sharding, SpmdBuilder* b) {
Window window;
bool needs_masking = false;
const bool pad_value_is_zero =
pad_value->IsConstant() && pad_value->literal().IsZero({});
for (int64_t i = 0; i < to_reshard.hlo()->shape().rank(); ++i) {
WindowDimension* dim = window.add_dimensions();
auto pd = pc.dimensions(i);
dim->set_size(1);
dim->set_stride(1);
dim->set_window_dilation(1);
dim->set_window_reversal(false);
dim->set_padding_low(pd.edge_padding_low());
dim->set_padding_high(pd.edge_padding_high());
dim->set_base_dilation(pd.interior_padding() + 1);
const int64_t shard_count = target_sharding.tile_assignment().dim(i);
needs_masking |= shard_count > 1 &&
(pd.edge_padding_low() > 0 || pd.edge_padding_high() > 0 ||
pd.interior_padding() > 0) &&
(!pad_value_is_zero ||
to_reshard.base_shape().dimensions(i) % shard_count != 0);
}
return to_reshard.ReshardAsWindowedInput(
window, target_sharding, pad_value,
needs_masking, true);
}
HloInstruction* PadDataFromWindowReshard(
const PartitionedHlo::WindowedInputShardReturnValue& reshard_operand,
HloInstruction* pad_value, SpmdBuilder* b) {
PaddingConfig sharded_padding_config;
bool need_pad = false;
for (int64_t i = 0; i < reshard_operand.sharded_input->shape().rank(); ++i) {
auto dim = sharded_padding_config.add_dimensions();
const auto& wd = reshard_operand.shard_window.dimensions(i);
dim->set_edge_padding_low(wd.padding_low());
dim->set_edge_padding_high(wd.padding_high());
dim->set_interior_padding(wd.base_dilation() - 1);
if (wd.padding_low() != 0 || wd.padding_high() != 0 ||
wd.base_dilation() != 1) {
need_pad = true;
}
}
auto sharded_data = reshard_operand.sharded_input;
if (need_pad) {
auto sharded_data_shape =
ShapeInference::InferPadShape(sharded_data->shape(), pad_value->shape(),
sharded_padding_config)
.value();
return b->AddInstruction(HloInstruction::CreatePad(
sharded_data_shape, sharded_data, pad_value, sharded_padding_config));
}
return sharded_data;
}
std::vector<std::vector<int64_t>> GetPartitionGroupsForReplication(
const HloSharding& sharding, absl::Span<const int64_t> replication_dims) {
int64_t group_size = 1;
for (int64_t i : replication_dims) {
group_size *= sharding.tile_assignment().dim(i);
}
std::vector<std::vector<int64_t>> partition_groups(
sharding.tile_assignment().num_elements() / group_size);
sharding.tile_assignment().Each(
[&](absl::Span<const int64_t> indices, int64_t partition) {
int64_t group_id = 0;
for (int64_t i = 0; i < indices.size(); ++i) {
if (!absl::c_linear_search(replication_dims, i)) {
group_id *= sharding.tile_assignment().dim(i);
group_id += indices[i];
}
}
partition_groups[group_id].push_back(partition);
});
return partition_groups;
}
std::optional<IotaReplicaGroupList> GetIotaPartitionGroupsForReplication(
const HloSharding& sharding, absl::Span<const int64_t> replication_dims,
int64_t num_partitions) {
if (!sharding.tile_assignment().iota().has_value()) {
return std::nullopt;
}
if (sharding.tile_assignment().num_elements() != num_partitions) {
return std::nullopt;
}
int64_t group_size = 1;
for (int64_t i : replication_dims) {
group_size *= sharding.tile_assignment().dim(i);
}
int64_t num_replica_groups =
sharding.tile_assignment().num_elements() / group_size;
std::vector<int> transpose_dims(sharding.tile_assignment().num_dimensions());
std::iota(transpose_dims.begin(), transpose_dims.end(), 0);
std::vector<int> replication_dims_sorted(replication_dims.begin(),
replication_dims.end());
std::sort(replication_dims_sorted.begin(), replication_dims_sorted.end());
for (int64_t i : replication_dims_sorted) {
auto it = std::find(transpose_dims.begin(), transpose_dims.end(), i);
if (it != transpose_dims.end()) {
transpose_dims.erase(it);
transpose_dims.push_back(i);
}
}
auto transpose_iota_tile_assignment =
sharding.tile_assignment().iota()->Transpose(transpose_dims);
if (!transpose_iota_tile_assignment.has_value()) {
return std::nullopt;
}
return IotaReplicaGroupList(num_replica_groups, group_size,
transpose_iota_tile_assignment->reshape_dims(),
transpose_iota_tile_assignment->transpose_perm());
}
CollectiveDeviceList ExpandPartitionGroupListAcrossReplicas(
IotaReplicaGroupList partition_group_list, int num_replicas,
int num_partitions) {
int partition_group_count = partition_group_list.num_replica_groups();
int partition_group_size = partition_group_list.num_devices_per_group();
CHECK_EQ((partition_group_count * partition_group_size), num_partitions);
int replica_group_count = partition_group_count * num_replicas;
std::vector<int64_t> new_reshape_dims(
partition_group_list.reshape_dims().begin(),
partition_group_list.reshape_dims().end());
new_reshape_dims.insert(new_reshape_dims.begin(), num_replicas);
std::vector<int> new_transpose_dims;
new_transpose_dims.push_back(0);
for (int64_t dim : partition_group_list.transpose_perm()) {
new_transpose_dims.push_back(dim + 1);
}
return CollectiveDeviceList(
IotaReplicaGroupList(replica_group_count, partition_group_size,
new_reshape_dims, new_transpose_dims));
}
}
} | #include "xla/service/spmd/spmd_partitioner_util.h"
#include <cstdint>
#include <optional>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/tile_assignment.h"
namespace xla {
namespace spmd {
namespace {
TEST(SPMDPartitionerUtilTest, PartialReplicateReshardCompatibleSharding1) {
HloSharding partial_sharding =
HloSharding::PartialTile(TileAssignment({1, 2, 2}));
const std::vector<HloSharding> target_shardings = {
HloSharding::IotaTile({2, 2}),
HloSharding::IotaTile({2, 2}, {2, 2}, {1, 0})};
for (const auto& target_sharding : target_shardings) {
auto result = PartialReplicateReshardCompatibleSharding(partial_sharding,
target_sharding);
EXPECT_EQ(result, target_shardings[1]);
}
partial_sharding =
HloSharding::PartialTile(TileAssignment({1, 2, 2}, {2, 2}, {1, 0}));
for (const auto& target_sharding : target_shardings) {
auto result = PartialReplicateReshardCompatibleSharding(partial_sharding,
target_sharding);
EXPECT_EQ(result, target_shardings[0]);
}
}
TEST(SPMDPartitionerUtilTest, PartialReplicateReshardCompatibleSharding2) {
HloSharding partial_sharding =
HloSharding::PartialTile(TileAssignment({2, 2, 8}));
const std::vector<HloSharding> target_shardings = {
HloSharding::PartialTile(
TileAssignment({4, 4, 2}, {2, 2, 2, 2, 2}, {0, 2, 1, 3, 4})),
HloSharding::PartialTile(
TileAssignment({4, 4, 2}, {2, 2, 2, 2, 2}, {0, 2, 1, 4, 3})),
HloSharding::PartialTile(
TileAssignment({4, 4, 2}, {2, 2, 2, 2, 2}, {0, 3, 1, 2, 4})),
HloSharding::PartialTile(
TileAssignment({4, 4, 2}, {2, 2, 2, 2, 2}, {0, 3, 1, 4, 2})),
HloSharding::PartialTile(
TileAssignment({4, 4, 2}, {2, 2, 2, 2, 2}, {0, 4, 1, 2, 3})),
HloSharding::PartialTile(
TileAssignment({4, 4, 2}, {2, 2, 2, 2, 2}, {0, 4, 1, 3, 2}))};
for (const auto& target_sharding : target_shardings) {
auto result = PartialReplicateReshardCompatibleSharding(partial_sharding,
target_sharding);
EXPECT_EQ(result, target_sharding);
}
}
TEST(SPMDPartitionerUtilTest, GetPartitionGroupsForReplication) {
HloSharding sharding = HloSharding::IotaTile({2, 2, 2});
std::vector<std::vector<int64_t>> actual_partition_groups =
GetPartitionGroupsForReplication(sharding, {1});
std::vector<std::vector<int64_t>> expected_partition_groups = {
{0, 2}, {1, 3}, {4, 6}, {5, 7}};
EXPECT_THAT(actual_partition_groups,
testing::ContainerEq(expected_partition_groups));
}
TEST(SPMDPartitionerUtilTest, GetPartitionGroupsForReplication2) {
HloSharding sharding = HloSharding::IotaTile({2, 2, 2}, {2, 2, 2}, {0, 2, 1});
std::vector<std::vector<int64_t>> actual_partition_groups =
GetPartitionGroupsForReplication(sharding, {0, 2});
std::vector<std::vector<int64_t>> expected_partition_groups = {{0, 2, 4, 6},
{1, 3, 5, 7}};
EXPECT_THAT(actual_partition_groups,
testing::ContainerEq(expected_partition_groups));
}
TEST(SPMDPartitionerUtilTest, GetIotaPartitionGroupsForReplication) {
HloSharding sharding = HloSharding::IotaTile({2, 2, 2});
std::optional<IotaReplicaGroupList> actual_partition_group_list =
GetIotaPartitionGroupsForReplication(sharding, {1}, 8);
EXPECT_TRUE(actual_partition_group_list.has_value());
EXPECT_EQ(actual_partition_group_list->num_replica_groups(), 4);
EXPECT_EQ(actual_partition_group_list->num_devices_per_group(), 2);
EXPECT_THAT(actual_partition_group_list->reshape_dims(),
testing::ElementsAre(2, 2, 2));
EXPECT_THAT(actual_partition_group_list->transpose_perm(),
testing::ElementsAre(0, 2, 1));
}
TEST(SPMDPartitionerUtilTest, GetIotaPartitionGroupsForReplication2) {
HloSharding sharding = HloSharding::IotaTile({2, 2, 2}, {2, 2, 2}, {0, 2, 1});
std::optional<IotaReplicaGroupList> actual_partition_group_list =
GetIotaPartitionGroupsForReplication(sharding, {0, 2}, 8);
EXPECT_TRUE(actual_partition_group_list.has_value());
EXPECT_EQ(actual_partition_group_list->num_replica_groups(), 2);
EXPECT_EQ(actual_partition_group_list->num_devices_per_group(), 4);
EXPECT_THAT(actual_partition_group_list->reshape_dims(),
testing::ElementsAre(4, 2));
EXPECT_THAT(actual_partition_group_list->transpose_perm(),
testing::ElementsAre(1, 0));
}
TEST(SPMDPartitionerUtilTest,
GetIotaPartitionGroupsForReplicationSkipWhenNotUsingAllPartitions) {
HloSharding simple_sharding = HloSharding::IotaTile({2, 2, 2});
std::optional<IotaReplicaGroupList> actual_partition_group_list =
GetIotaPartitionGroupsForReplication(simple_sharding, {1}, 16);
EXPECT_FALSE(actual_partition_group_list.has_value());
}
TEST(SPMDPartitionerUtilTest, ExpandPartitionGroupListAcrossReplicas) {
IotaReplicaGroupList partition_group_list =
IotaReplicaGroupList(10, 5, {2, 5, 5}, {0, 2, 1});
IotaReplicaGroupList expanded_partition_group_list =
ExpandPartitionGroupListAcrossReplicas(partition_group_list, 2, 50)
.iota_replica_group_list()
.value();
EXPECT_EQ(expanded_partition_group_list.num_replica_groups(), 20);
EXPECT_EQ(expanded_partition_group_list.num_devices_per_group(), 5);
EXPECT_THAT(expanded_partition_group_list.reshape_dims(),
testing::ElementsAre(4, 5, 5));
EXPECT_THAT(expanded_partition_group_list.transpose_perm(),
testing::ElementsAre(0, 2, 1));
}
TEST(SPMDPartitionerUtilDeathTest, ExpandPartitionGroupListAcrossReplicas) {
IotaReplicaGroupList partition_group_list =
IotaReplicaGroupList(10, 5, {2, 5, 5}, {0, 2, 1});
ASSERT_DEATH(
{
auto expanded_partition_group_list =
ExpandPartitionGroupListAcrossReplicas(partition_group_list, 2, 60);
},
"Check failed: \\(partition_group_count \\* partition_group_size\\) == "
"num_partitions \\(50 vs\\. 60\\)");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/spmd_partitioner_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/spmd_partitioner_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2cd5293f-5da0-4198-a703-16efdd40591a | cpp | tensorflow/tensorflow | collective_permute_motion | third_party/xla/xla/service/spmd/collective_permute_motion.cc | third_party/xla/xla/service/spmd/collective_permute_motion_test.cc | #include "xla/service/spmd/collective_permute_motion.h"
#include <cstdint>
#include <deque>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/while_loop_analysis.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::flat_hash_set<HloInstruction*> FindLoopConsts(HloComputation* body) {
HloInstruction* root = body->root_instruction();
CHECK_EQ(root->opcode(), HloOpcode::kTuple);
absl::flat_hash_set<HloInstruction*> loop_consts;
for (int64_t i = 0; i < root->operand_count(); ++i) {
HloInstruction* output = root->mutable_operand(i);
while (output->opcode() == HloOpcode::kReshape ||
output->opcode() == HloOpcode::kCopy) {
output = output->mutable_operand(0);
}
if (output->opcode() == HloOpcode::kGetTupleElement &&
output->tuple_index() == i &&
output->operand(0) == body->parameter_instruction(0)) {
loop_consts.insert(output);
}
}
for (HloInstruction* inst : body->MakeInstructionPostOrder()) {
if (inst->IsConstant() || inst->opcode() == HloOpcode::kIota ||
inst->opcode() == HloOpcode::kReplicaId ||
inst->opcode() == HloOpcode::kPartitionId) {
loop_consts.insert(inst);
continue;
}
if (!inst->IsElementwise() && inst->opcode() != HloOpcode::kBroadcast &&
inst->opcode() != HloOpcode::kReduce &&
inst->opcode() != HloOpcode::kReshape &&
inst->opcode() != HloOpcode::kDynamicSlice &&
inst->opcode() != HloOpcode::kTranspose) {
continue;
}
if (inst->HasSideEffectNoRecurse()) {
continue;
}
if (absl::c_all_of(inst->operands(), [&](const HloInstruction* operand) {
return loop_consts.contains(operand);
})) {
loop_consts.insert(inst);
}
}
return loop_consts;
}
constexpr int64_t kMaxMovableClusterSize = 8;
struct MovableCluster {
int64_t root_tuple_index;
std::vector<HloInstruction*> reverse_order_instructions;
HloInstruction* collective_permute = nullptr;
};
std::optional<MovableCluster> FindMovableClusterAtBodyRoot(
HloComputation* body, int64_t root_tuple_index,
const absl::flat_hash_set<HloInstruction*>& loop_consts) {
HloInstruction* root = body->root_instruction();
CHECK_EQ(root->opcode(), HloOpcode::kTuple);
MovableCluster cluster;
cluster.root_tuple_index = root_tuple_index;
std::deque<HloInstruction*> queue;
queue.push_back(root->mutable_operand(root_tuple_index));
while (!queue.empty()) {
HloInstruction* visiting = queue.front();
queue.pop_front();
if (cluster.reverse_order_instructions.size() >= kMaxMovableClusterSize) {
VLOG(2) << "Cannot move: too many instructions to move";
return std::nullopt;
}
if (visiting->user_count() > 1) {
VLOG(2) << "Cannot move: " << visiting->name() << " used multiple times";
return std::nullopt;
}
cluster.reverse_order_instructions.push_back(visiting);
if (visiting->opcode() == HloOpcode::kCollectivePermute) {
if (cluster.collective_permute != nullptr) {
VLOG(2) << "Cannot move: " << visiting->name()
<< " multiple collective permutes";
return std::nullopt;
}
cluster.collective_permute = visiting;
continue;
}
if (!visiting->IsElementwise() || visiting->HasSideEffectNoRecurse()) {
VLOG(2) << "Cannot move: " << visiting->name() << " unsupported op";
return std::nullopt;
}
for (HloInstruction* operand : visiting->mutable_operands()) {
if (!loop_consts.contains(operand)) {
queue.push_back(operand);
}
}
}
if (cluster.collective_permute == nullptr) {
return std::nullopt;
}
return cluster;
}
absl::flat_hash_set<int64_t> FindIndicesUnusedAfterLoop(HloInstruction* loop) {
absl::flat_hash_set<int64_t> indices;
int64_t count = loop->shape().tuple_shapes_size();
for (int64_t i = 0; i < count; ++i) {
indices.insert(i);
}
for (HloInstruction* user : loop->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
indices.clear();
break;
}
indices.erase(user->tuple_index());
}
return indices;
}
absl::StatusOr<bool> MoveCollectivePermutes(HloComputation* computation,
HloInstruction* loop) {
HloComputation* body = loop->while_body();
HloInstruction* root = body->root_instruction();
if (root->opcode() != HloOpcode::kTuple ||
loop->operand(0)->opcode() != HloOpcode::kTuple) {
return false;
}
auto maybe_induction_var_idx = GetLoopInductionVarTupleIdx(loop);
if (!maybe_induction_var_idx.has_value()) {
VLOG(2) << "Skip " << loop->name() << ", no induction var";
return false;
}
absl::flat_hash_map<const HloInstruction*, int64_t> output_appear_counts;
for (const HloInstruction* operand : root->operands()) {
auto res = output_appear_counts.emplace(operand, 1);
if (!res.second) {
res.first->second++;
}
}
absl::flat_hash_set<int64_t> unused_indices_after_loop =
FindIndicesUnusedAfterLoop(loop);
const absl::flat_hash_set<HloInstruction*> loop_consts = FindLoopConsts(body);
int64_t induction_var_idx = *maybe_induction_var_idx;
std::vector<HloInstruction*> input_gtes(root->operand_count(), nullptr);
absl::flat_hash_set<int64_t> multi_use_indices;
for (HloInstruction* user : body->parameter_instruction(0)->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
VLOG(2) << "Skip " << loop->name() << ", non-GTE input use";
return false;
}
if (multi_use_indices.contains(user->tuple_index())) {
continue;
}
if (input_gtes[user->tuple_index()] != nullptr) {
multi_use_indices.insert(user->tuple_index());
input_gtes[user->tuple_index()] = nullptr;
} else {
input_gtes[user->tuple_index()] = user;
}
}
HloInstruction* ind_var = input_gtes[induction_var_idx];
if (ind_var == nullptr || ind_var->shape().rank() > 0) {
VLOG(2) << "Skip " << loop->name() << ", non-scalar induction var";
return false;
}
if (root->operand(induction_var_idx)->opcode() != HloOpcode::kAdd &&
root->operand(induction_var_idx)->opcode() != HloOpcode::kSubtract) {
VLOG(2) << "Skip " << loop->name() << ", non-add/sub induction var";
return false;
}
if (root->operand(induction_var_idx)->operand(0) == ind_var) {
if (!root->operand(induction_var_idx)->operand(1)->IsConstant()) {
VLOG(2) << "Skip " << loop->name() << ", non-add/sub const induction var";
return false;
}
} else if (root->operand(induction_var_idx)->operand(1) == ind_var) {
if (!root->operand(induction_var_idx)->operand(0)->IsConstant()) {
VLOG(2) << "Skip " << loop->name() << ", non-add/sub const induction var";
return false;
}
} else {
return false;
}
HloInstruction* ind_var_orig =
loop->mutable_operand(0)->mutable_operand(induction_var_idx);
if (!ind_var_orig->IsConstant()) {
VLOG(2) << "Skip " << loop->name()
<< ", non-constant initial induction var";
return false;
}
bool changed = false;
std::vector<MovableCluster> movable_outputs;
for (int64_t i = 0; i < root->operand_count(); ++i) {
if (output_appear_counts[root->operand(i)] > 1) {
VLOG(2) << "Skip " << loop->name() << " index " << i
<< " appears multiple times in output.";
continue;
}
if (!unused_indices_after_loop.contains(i)) {
VLOG(2) << "Skip " << loop->name() << " index " << i
<< " used after loop.";
continue;
}
auto cluster = FindMovableClusterAtBodyRoot(body, i, loop_consts);
if (!cluster.has_value()) {
VLOG(2) << "Skip " << loop->name() << " index " << i
<< " did not find a movable cluster.";
continue;
}
HloInstruction* input = input_gtes[cluster->root_tuple_index];
HloInstruction* cp = cluster->collective_permute;
if (input == nullptr || cp->operand(0) == input) {
VLOG(2) << "Skip " << loop->name() << " index " << i
<< " collective-permute already at top.";
continue;
}
const std::vector<HloInstruction*> original_input_users = input->users();
absl::flat_hash_map<const HloInstruction*, HloInstruction*> replacement;
replacement[cp->operand(0)] = input;
for (auto it = cluster->reverse_order_instructions.rbegin();
it != cluster->reverse_order_instructions.rend(); ++it) {
HloInstruction* inst = *it;
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : inst->mutable_operands()) {
auto rit = replacement.find(operand);
if (rit != replacement.end()) {
new_operands.push_back(rit->second);
} else {
new_operands.push_back(operand);
}
}
HloInstruction* clone = body->AddInstruction(
inst->CloneWithNewOperands(inst->shape(), new_operands));
replacement[inst] = clone;
}
HloInstruction* new_input =
replacement[cluster->reverse_order_instructions[0]];
if (ind_var_orig->parent() != body) {
ind_var_orig = body->AddInstruction(ind_var_orig->Clone());
}
HloInstruction* is_first_iter =
body->AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::ChangeElementType(new_input->shape(), PRED),
body->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeScalarShape(PRED), ind_var, ind_var_orig,
Comparison::Direction::kEq)),
{}));
new_input = body->AddInstruction(
HloInstruction::CreateTernary(new_input->shape(), HloOpcode::kSelect,
is_first_iter, input, new_input));
for (HloInstruction* user : original_input_users) {
TF_RETURN_IF_ERROR(input->ReplaceUseWith(user, new_input));
}
TF_RETURN_IF_ERROR(root->ReplaceOperandWith(cluster->root_tuple_index,
cp->mutable_operand(0)));
TF_RETURN_IF_ERROR(body->RemoveInstructionAndUnusedOperands(
cluster->reverse_order_instructions[0]));
VLOG(2) << "Moved " << loop->name() << " index " << i;
changed = true;
}
return changed;
}
absl::StatusOr<bool> CollectivePermuteMotion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instr : computation->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kWhile) {
TF_ASSIGN_OR_RETURN(bool moved,
MoveCollectivePermutes(computation, instr));
changed |= moved;
}
}
}
return changed;
}
} | #include "xla/service/spmd/collective_permute_motion.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
using CollectivePermuteMotionTest = HloTestBase;
namespace op = xla::testing::opcode_matchers;
TEST_F(CollectivePermuteMotionTest, SimpleMove) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
ROOT tuple = (s32[], f32[4,4]) tuple(add, cp)
}
cond {
loop_var = (s32[], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)
while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_TRUE(pass.Run(&*module).value());
VLOG(1) << module->ToString();
const HloInstruction* loop = FindInstruction(module.get(), "while");
const HloInstruction* output =
loop->while_body()->root_instruction()->operand(1);
auto input =
AllOf(op::Shape("f32[4,4]"), op::GetTupleElement(op::Parameter(0)));
auto cp = op::CollectivePermute(input);
auto select = op::Select(op::Broadcast(op::Compare()), input, cp);
EXPECT_THAT(output, op::Multiply(select, select));
}
TEST_F(CollectivePermuteMotionTest, NoCollectivePermute) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[], f32[]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[] get-tuple-element(loop_var), index=1
constant.4 = f32[] constant(4.0)
ROOT tuple = (s32[], f32[], f32[]) tuple(add, constant.4, gte1)
}
cond {
loop_var = (s32[], f32[], f32[]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[] parameter(0)
param.1 = f32[] parameter(1)
tuple.1 = (s32[], f32[], f32[]) tuple(constant.2, param, param.1)
while = (s32[], f32[], f32[]) while(tuple.1), condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(CollectivePermuteMotionTest, MoveWithElementwise) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
constant.4 = f32[] constant(1)
broadcast = f32[4,4] broadcast(constant.4), dimensions={}
add1 = f32[4,4] add(cp, broadcast)
ROOT tuple = (s32[], f32[4,4]) tuple(add, add1)
}
cond {
loop_var = (s32[], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)
while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_TRUE(pass.Run(&*module).value());
VLOG(1) << module->ToString();
const HloInstruction* loop = FindInstruction(module.get(), "while");
const HloInstruction* output =
loop->while_body()->root_instruction()->operand(1);
auto input =
AllOf(op::Shape("f32[4,4]"), op::GetTupleElement(op::Parameter(0)));
auto moved =
op::Add(op::CollectivePermute(input), op::Broadcast(op::Constant()));
auto select = op::Select(op::Broadcast(op::Compare()), input, moved);
EXPECT_THAT(output, op::Multiply(select, select));
}
TEST_F(CollectivePermuteMotionTest, DoNotMoveWithNonConstElementwise) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
constant.4 = f32[] constant(1)
nonconst = f32[4,4] custom-call(), custom_call_target="unknown"
add1 = f32[4,4] add(cp, nonconst)
ROOT tuple = (s32[], f32[4,4]) tuple(add, add1)
}
cond {
loop_var = (s32[], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)
while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(CollectivePermuteMotionTest, DoNotMoveIfOutputUsed) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
ROOT tuple = (s32[], f32[4,4]) tuple(add, cp)
}
cond {
loop_var = (s32[], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)
while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body
ROOT result = f32[4,4] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(CollectivePermuteMotionTest, DoNotMoveIfIndictionVarUnknown) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
custom = s32[] custom-call(gte0, constant.1), custom_call_target="unknown"
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
ROOT tuple = (s32[], f32[4,4]) tuple(custom, cp)
}
cond {
loop_var = (s32[], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)
while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(CollectivePermuteMotionTest, DoNotMoveIfMultiOutput) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
ROOT tuple = (s32[], f32[4,4], f32[4,4]) tuple(add, cp, cp)
}
cond {
loop_var = (s32[], f32[4,4], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4], f32[4,4]) tuple(constant.2, param, param)
while = (s32[], f32[4,4], f32[4,4]) while(tuple.1),
condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_FALSE(pass.Run(&*module).value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/collective_permute_motion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/collective_permute_motion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2d33973b-0f6d-4d9e-933b-378a9e121c71 | cpp | tensorflow/tensorflow | partition_assignment | third_party/xla/xla/service/spmd/partition_assignment.cc | third_party/xla/xla/service/spmd/partition_assignment_test.cc | #include "xla/service/spmd/partition_assignment.h"
#include <cstdint>
#include <memory>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/xla.pb.h"
namespace xla {
PartitioningAlgorithm::PartitioningAlgorithm(AlgorithmKind kind,
int64_t num_partitions) {
kind_ = kind;
CHECK_GT(num_partitions, 1) << "Number of partitions must be at least two.";
num_partitions_ = num_partitions;
}
absl::string_view PartitioningAlgorithm::name() const {
switch (kind_) {
case AlgorithmKind::kNoop:
default:
return "Noop";
}
}
const PartitioningAlgorithm::AlgorithmKind& PartitioningAlgorithm::kind()
const {
return kind_;
}
int64_t PartitioningAlgorithm::num_partitions() const {
return num_partitions_;
}
std::unique_ptr<PartitioningAlgorithm>
PartitioningAlgorithm::CreateNoopPartitioning(int64_t num_partitions) {
return std::make_unique<NoopPartitioning>(num_partitions);
}
NoopPartitioning::NoopPartitioning(int64_t num_partitions)
: PartitioningAlgorithm(AlgorithmKind::kNoop, num_partitions) {
VLOG(2) << "Created a no-op algorithm with the number of partitions: "
<< num_partitions;
}
absl::StatusOr<bool> NoopPartitioning::Run(HloModule* module) const {
VLOG(2) << "No-op algorithm was called to partition module: "
<< module->name();
return false;
}
PartitionAssignment::PartitionAssignment(int64_t num_partitions) {
CHECK_GT(num_partitions, 1) << "Number of partitions must be at least two.";
num_partitions_ = num_partitions;
}
absl::string_view PartitionAssignment::name() const {
return "partitioning-assignment";
}
const PartitioningAlgorithm* PartitionAssignment::algorithm() {
return algorithm_.get();
}
int64_t PartitionAssignment::num_partitions() const { return num_partitions_; }
std::unique_ptr<PartitioningAlgorithm>
PartitionAssignment::ChoosePartitioningAlgorithm(
const HloModule& module) const {
auto algo = module.config().debug_options().xla_partitioning_algorithm();
CHECK_EQ(algo, DebugOptions::PARTITIONING_ALGORITHM_NOOP);
return PartitioningAlgorithm::CreateNoopPartitioning(num_partitions());
}
absl::StatusOr<bool> PartitionAssignment::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "Running partition assignment on module " << module->name();
algorithm_ = ChoosePartitioningAlgorithm(*module);
return algorithm()->Run(module);
}
} | #include "xla/service/spmd/partition_assignment.h"
#include <memory>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using PartitionAssignmentTest = HloTestBase;
TEST_F(PartitionAssignmentTest, NoopAlg) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[16,16]{1,0} parameter(0)
ROOT %copy = f32[16,16]{1,0} copy(%param0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_partitioning_algorithm(
DebugOptions::PARTITIONING_ALGORITHM_NOOP);
PartitionAssignment partition_assignment(16);
EXPECT_EQ(partition_assignment.algorithm(), nullptr);
TF_ASSERT_OK_AND_ASSIGN(bool changed, partition_assignment.Run(module.get()));
EXPECT_FALSE(changed);
EXPECT_NE(partition_assignment.algorithm(), nullptr);
EXPECT_EQ(partition_assignment.algorithm()->kind(),
PartitioningAlgorithm::AlgorithmKind::kNoop);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/partition_assignment.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/partition_assignment_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
60aad745-e11c-4fce-abc6-3248231d3e2a | cpp | tensorflow/tensorflow | whole_graph_manual_pass | third_party/xla/xla/service/spmd/whole_graph_manual_pass.cc | third_party/xla/xla/service/spmd/whole_graph_manual_pass_test.cc | #include "xla/service/spmd/whole_graph_manual_pass.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool ShouldClearInstruction(HloInstruction* inst) {
return inst->opcode() != HloOpcode::kParameter &&
inst != inst->parent()->root_instruction() &&
inst->opcode() != HloOpcode::kPartitionId &&
DynCast<HloCollectiveInstruction>(inst) == nullptr &&
!inst->HasSideEffectNoRecurse();
}
absl::StatusOr<bool> RunOnComputation(HloComputation* computation) {
bool changed = false;
for (HloInstruction* inst : computation->instructions()) {
if (ShouldClearInstruction(inst)) {
inst->clear_sharding();
changed = true;
continue;
}
if (inst->shape().IsTuple()) {
inst->set_sharding(
HloSharding::SingleTuple(inst->shape(), HloSharding::Manual()));
changed = true;
} else {
inst->set_sharding(HloSharding::Manual());
changed = true;
}
}
return changed;
}
}
absl::StatusOr<bool> WholeGraphManualPass::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (auto* comp : module->computations()) {
TF_ASSIGN_OR_RETURN(bool comp_changed, RunOnComputation(comp));
changed |= comp_changed;
}
return changed;
}
} | #include "xla/service/spmd/whole_graph_manual_pass.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace spmd {
namespace {
using ::testing::_;
using ::testing::AllOf;
namespace op = xla::testing::opcode_matchers;
class WholeGraphManualPassTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module) {
TF_ASSIGN_OR_RETURN(
auto module,
ParseAndReturnVerifiedModule(
hlo_module,
GetModuleConfigForTest(1, 4)));
HloPassPipeline pipeline("whole-graph-manual-pass");
pipeline.AddPass<WholeGraphManualPass>();
TF_RETURN_IF_ERROR(pipeline.Run(module.get()).status());
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
absl::Status RunPassOnModule(HloModule* module,
int64_t distance_threshold = 100) {
HloPassPipeline pipeline("all-gather-cse");
pipeline.AddPass<WholeGraphManualPass>();
TF_RETURN_IF_ERROR(pipeline.Run(module).status());
return absl::OkStatus();
}
};
TEST_F(WholeGraphManualPassTest, SimpleRewrite) {
absl::string_view hlo_string = R"(
HloModule module
body {
p_body = (f32[2], f32[2], f32[2], s32[]) parameter(0)
val.0 = f32[2] get-tuple-element(p_body), index=0
val.1 = f32[2] get-tuple-element(p_body), index=1
add = f32[2] add(val.0, val.1)
const = s32[] constant(-1)
ROOT root = (f32[2], f32[2], f32[2], s32[]) tuple(val.0, val.1, add, const)
}
condition {
p_cond = (f32[2], f32[2], f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=3
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param0 = (s32[8]{0}, s32[8]{0}) parameter(0)
g1 = s32[8]{0} get-tuple-element(param0), index=0
g2 = s32[8]{0} get-tuple-element(param0), index=1
resh1 = s32[1,8]{1,0} reshape(g1)
resh2 = s32[1,8]{1,0} reshape(g2)
param1 = f32[2] parameter(1)
param2 = s32[] parameter(2)
while_init = (f32[2], f32[2], f32[2], s32[]) tuple(param1, param1, param1, param2)
while = (f32[2], f32[2], f32[2], s32[]) while(while_init), condition=condition, body=body
g3 = f32[2] get-tuple-element(while), index=0
ROOT t = (s32[1,8]{1,0}, s32[1,8]{1,0}, f32[2]) tuple(resh1, resh2, g3), sharding={{devices=[1,4]0,1,2,3}, {devices=[1,4]0,1,2,3}, {replicated}}
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
for (auto* i : module->entry_computation()->instructions()) {
if (module->entry_computation()->root_instruction() == i) {
EXPECT_THAT(i, op::Sharding("{{manual}, "
"{manual}, {manual}}"));
} else if (i->opcode() == HloOpcode::kParameter) {
EXPECT_THAT(i, AnyOf(op::Sharding("{manual}"),
op::Sharding("{{manual},{manual}}")));
}
}
}
TEST_F(WholeGraphManualPassTest, SimplePartitionIdCollectives) {
absl::string_view hlo_string = R"(
HloModule module
body {
p_body = (f32[2], f32[2], f32[2], s32[]) parameter(0)
val.0 = f32[2] get-tuple-element(p_body), index=0
val.1 = f32[2] get-tuple-element(p_body), index=1
t = token[] after-all()
p = u32[] partition-id()
ag = f32[8] all-gather(val.1), dimensions={0}, replica_groups={{0,1,2,3}}, use_global_device_ids=true, channel_id=1
s = (f32[8], s32[], token[]) send(ag, t), channel_id=2
sd = token[] send-done(s), channel_id=2
add = f32[2] add(val.0, val.1)
const = s32[] constant(-1)
ROOT root = (f32[2], f32[2], f32[2], s32[]) tuple(val.0, val.1, add, const)
}
condition {
p_cond = (f32[2], f32[2], f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=3
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param0 = (s32[8]{0}, s32[8]{0}) parameter(0)
g1 = s32[8]{0} get-tuple-element(param0), index=0
g2 = s32[8]{0} get-tuple-element(param0), index=1
resh1 = s32[1,8]{1,0} reshape(g1)
resh2 = s32[1,8]{1,0} reshape(g2)
param1 = f32[2] parameter(1)
param2 = s32[] parameter(2)
while_init = (f32[2], f32[2], f32[2], s32[]) tuple(param1, param1, param1, param2)
while = (f32[2], f32[2], f32[2], s32[]) while(while_init), condition=condition, body=body
g3 = f32[2] get-tuple-element(while), index=0
ROOT t = (s32[1,8]{1,0}, s32[1,8]{1,0}, f32[2]) tuple(resh1, resh2, g3), sharding={{devices=[1,4]0,1,2,3}, {devices=[1,4]0,1,2,3}, {replicated}}
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
for (auto* c : module->computations()) {
for (auto* i : c->instructions()) {
if (c->root_instruction() == i) {
EXPECT_THAT(
i, AnyOf(op::Sharding("{manual}"),
op::Sharding("{{manual},{manual},{manual}}"),
op::Sharding("{{manual}, {manual}, {manual}, {manual}}")));
} else if (i->opcode() == HloOpcode::kParameter) {
EXPECT_THAT(
i,
AnyOf(op::Sharding("{manual}"), op::Sharding("{{manual},{manual}}"),
op::Sharding("{{manual},{manual},{manual},{manual}}")));
} else if (i->opcode() == HloOpcode::kPartitionId ||
i->opcode() == HloOpcode::kAllGather ||
i->opcode() == HloOpcode::kSendDone) {
EXPECT_THAT(i, op::Sharding("{manual}"));
} else if (i->opcode() == HloOpcode::kSend) {
EXPECT_THAT(i, op::Sharding("{{manual},{manual},{manual}}"));
} else {
EXPECT_FALSE(i->has_sharding());
}
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/whole_graph_manual_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/whole_graph_manual_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d5bce155-aab7-45a5-b882-7ba31775153e | cpp | tensorflow/tensorflow | shardy_xla_pass | third_party/xla/xla/service/spmd/shardy/shardy_xla_pass.cc | third_party/xla/xla/service/spmd/shardy/shardy_xla_pass_test.cc | #include "xla/service/spmd/shardy/shardy_xla_pass.h"
#include <cstdint>
#include <cstdlib>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "mhlo/transforms/passes.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "shardy/common/file_utils.h"
#include "shardy/dialect/sdy/transforms/propagation/passes.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/translate/hlo_to_mhlo/hlo_to_mlir_hlo.h"
#include "xla/hlo/translate/mhlo_to_hlo/mlir_hlo_to_hlo.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/layout.h"
#include "xla/map_util.h"
#include "xla/mlir_hlo/mhlo/transforms/passes.h"
#include "xla/service/computation_layout.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/service/spmd/shardy/constants.h"
#include "xla/service/spmd/shardy/mhlo_round_trip/mhlo_export.h"
#include "xla/service/spmd/shardy/mhlo_round_trip/mhlo_import.h"
#include "xla/service/spmd/shardy/sdy_round_trip/pipelines.h"
#include "xla/service/spmd/shardy/utils.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/shape.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/tsl/framework/mlir/status_scoped_diagnostic_handler.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace sdy {
namespace {
absl::Status createFromProtoAndReplaceComputations(
HloModule* module, const HloModuleProto& proto) {
absl::flat_hash_map<int64_t, HloComputation*> idToComputation;
std::vector<std::unique_ptr<HloComputation>> computations;
HloComputation* entryComputation = nullptr;
for (const HloComputationProto& computationProto : proto.computations()) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloComputation> computation,
HloComputation::CreateFromProto(computationProto, idToComputation));
CHECK_NE(computation.get(), nullptr);
const int64_t computationId = computationProto.id();
CHECK_NE(computationId, -1);
CHECK(!ContainsKey(idToComputation, computationId));
idToComputation[computationId] = computation.get();
if (computationId == proto.entry_computation_id()) {
CHECK_EQ(entryComputation, nullptr);
entryComputation = computation.get();
}
computations.push_back(std::move(computation));
}
CHECK_NE(entryComputation, nullptr);
absl::c_sort(computations, [](const std::unique_ptr<HloComputation>& a,
const std::unique_ptr<HloComputation>& b) {
return a->unique_id() < b->unique_id();
});
for (std::unique_ptr<HloComputation>& computation : computations) {
HloComputation* newComputation =
module->AddComputationAndUnifyNamesAndIds(std::move(computation),
false);
if (newComputation == entryComputation) {
module->ReplaceEntryComputation(newComputation);
}
}
CHECK_OK(HloDCE().Run(module));
return absl::OkStatus();
}
using OriginalParamIndexToFlattenedNum =
std::vector<absl::flat_hash_map<ShapeIndex, int64_t>>;
int64_t getFlattenedParamNumber(
const OriginalParamIndexToFlattenedNum& originalParamIndexToFlattenedNum,
int64_t paramNumber, const ShapeIndex& paramIndex) {
return originalParamIndexToFlattenedNum[paramNumber].at(paramIndex);
}
OriginalParamIndexToFlattenedNum getOriginalParamIndexToFlattenedNum(
HloModule* hloModule) {
OriginalParamIndexToFlattenedNum originalParamIndexToFlattened;
HloComputation* entryComputation = hloModule->entry_computation();
originalParamIndexToFlattened.reserve(entryComputation->num_parameters());
int64_t paramNumber = 0;
for (HloInstruction* paramInstruction :
entryComputation->parameter_instructions()) {
auto& paramMap = originalParamIndexToFlattened.emplace_back();
ShapeUtil::ForEachLeafShape(paramInstruction->shape(),
[&](const Shape&, const ShapeIndex& index) {
paramMap[index] = paramNumber++;
});
}
return originalParamIndexToFlattened;
}
Shape getFlattenedShape(const Shape& shape) {
std::vector<Shape> flattenedShapes;
ShapeUtil::ForEachLeafShape(
shape, [&](const Shape& subShape, const ShapeIndex& index) {
flattenedShapes.push_back(subShape);
});
if (flattenedShapes.empty()) {
return Shape();
}
return ShapeUtil::MakeMaybeTupleShape(flattenedShapes);
}
ComputationLayout getFlattenedComputationLayout(
const ComputationLayout& computationLayout, bool useTupleArgs) {
if (!computationLayout.AnyLayoutSet()) {
return computationLayout;
}
ComputationLayout flattenedComputationLayout = ComputationLayout(
ShapeLayout(getFlattenedShape(computationLayout.result_shape())));
Shape tupleShape;
tupleShape.set_element_type(PrimitiveType::TUPLE);
for (int64_t i = 0; i != computationLayout.parameter_count(); ++i) {
ShapeUtil::ForEachLeafShape(
computationLayout.parameter_shape(i),
[&](const Shape& subShape, const ShapeIndex& index) {
if (useTupleArgs) {
*tupleShape.add_tuple_shapes() = subShape;
} else {
flattenedComputationLayout.add_parameter_layout(
ShapeLayout(subShape));
}
});
}
if (useTupleArgs) {
flattenedComputationLayout.add_parameter_layout(ShapeLayout(tupleShape));
}
return flattenedComputationLayout;
}
std::pair<int64_t, ShapeIndex> getFlattenedParamNumberAndIndex(
const OriginalParamIndexToFlattenedNum& originalParamIndexToFlattenedNum,
int64_t parameterNumber, const ShapeIndex& parameterIndex,
bool useTupleArgs) {
int64_t flattenedIndex = getFlattenedParamNumber(
originalParamIndexToFlattenedNum, parameterNumber, parameterIndex);
if (useTupleArgs) {
return {0, ShapeIndex{flattenedIndex}};
}
return {flattenedIndex, ShapeIndex()};
}
HloInputOutputAliasConfig getFlattenedInputOutputAliasConfig(
const HloInputOutputAliasConfig& inputOutputAliasConfig,
const OriginalParamIndexToFlattenedNum& originalParamIndexToFlattenedNum,
bool useTupleArgs) {
HloInputOutputAliasConfig flattenedInputOutputAliasConfig(
getFlattenedShape(inputOutputAliasConfig.shape()));
int64_t resultIndex = 0;
ShapeUtil::ForEachLeafShape(
inputOutputAliasConfig.shape(),
[&](const Shape&, const ShapeIndex& index) {
if (const std::optional<HloInputOutputAliasConfig::Alias>& alias =
inputOutputAliasConfig.GetAliasedParameter(index)) {
auto [paramNumber, paramIndex] = getFlattenedParamNumberAndIndex(
originalParamIndexToFlattenedNum, alias->parameter_number,
alias->parameter_index, useTupleArgs);
CHECK_OK(flattenedInputOutputAliasConfig.SetUpAlias(
flattenedInputOutputAliasConfig.shape().IsTuple()
? ShapeIndex{resultIndex}
: ShapeIndex(),
paramNumber, paramIndex, alias->kind));
}
++resultIndex;
});
return flattenedInputOutputAliasConfig;
}
HloBufferDonorConfig getFlattenedBufferDonorsConfig(
const HloBufferDonorConfig& bufferDonorsConfig,
const OriginalParamIndexToFlattenedNum& originalParamIndexToFlattenedNum,
bool useTupleArgs) {
HloBufferDonorConfig flattenedBufferDonorsConfig;
for (const HloBufferDonorConfig::BufferDonor& bufferDonor :
bufferDonorsConfig.buffer_donor()) {
auto [paramNumber, paramIndex] = getFlattenedParamNumberAndIndex(
originalParamIndexToFlattenedNum, bufferDonor.param_number,
bufferDonor.param_index, useTupleArgs);
CHECK_OK(
flattenedBufferDonorsConfig.AddBufferDonor(paramNumber, paramIndex));
}
return flattenedBufferDonorsConfig;
}
void removeFrontendAttributes(HloModule* hloModule,
mlir::ArrayRef<mlir::StringRef> attributeNames) {
FrontendAttributes feAttrs = hloModule->frontend_attributes();
auto* map = feAttrs.mutable_map();
for (const auto& attributeName : attributeNames) {
map->erase(attributeName);
}
hloModule->set_frontend_attributes(feAttrs);
}
}
absl::StatusOr<bool> ShardyXLA::Run(
HloModule* hloModule,
const absl::flat_hash_set<absl::string_view>& executionThreads) {
LOG(INFO) << "Using Shardy for XLA SPMD propagation.";
auto mlirContext = std::make_unique<mlir::MLIRContext>();
loadAllRequiredDialects(mlirContext.get());
mlir::OwningOpRef<mlir::ModuleOp> mlirModule =
xla::llvm_ir::CreateMlirModuleOp(
mlir::UnknownLoc::get(mlirContext.get()));
TF_RETURN_IF_ERROR(
ConvertHloToMlirHlo(*mlirModule, hloModule,
false,
true));
std::string shardyDir = hloModule->config().debug_options().xla_dump_to();
if (shardyDir == "sponge") {
shardyDir = getenv("TEST_UNDECLARED_OUTPUTS_DIR");
if (shardyDir.empty()) {
LOG(WARNING) << "\"sponge\" specified as dump directory but "
"TEST_UNDECLARED_OUTPUTS_DIR is not set!";
}
}
if (!shardyDir.empty()) {
shardyDir =
tsl::io::JoinPath(shardyDir, "shardy",
std::string_view(mlirModule->getName().value_or("")));
LOG(INFO) << "Using Shardy output directory: " << shardyDir;
}
bool enableVerifier = false;
#ifndef NDEBUG
enableVerifier = true;
#endif
mlir::PassManager pm(mlirContext.get());
pm.enableVerifier(enableVerifier);
pm.addPass(mlir::sdy::createSaveModuleOpPass(shardyDir,
"sdy_module_before_xla_import"));
bool useTupleArgs = false;
mlir::DictionaryAttr moduleFrontendAttrs = getFrontendAttrs(*mlirModule);
if (moduleFrontendAttrs && moduleFrontendAttrs.get(kUseTupleArgs)) {
useTupleArgs = true;
removeFrontendAttribute(*mlirModule, kUseTupleArgs);
}
if (moduleFrontendAttrs &&
moduleFrontendAttrs.get(kPythonIntegrationComplete)) {
removeFrontendAttribute(*mlirModule, kPythonIntegrationComplete);
addSdyRoundTripImportPipeline(pm);
} else {
auto spanToArrayRef = [](absl::Span<const bool> span) {
return mlir::ArrayRef<bool>(span.data(), span.size());
};
addMhloImportPipeline(
pm,
spanToArrayRef(hloModule->config()
.allow_spmd_sharding_propagation_to_parameters()),
spanToArrayRef(
hloModule->config().allow_spmd_sharding_propagation_to_output()));
}
ComputationLayout flattenedEntryComputationLayout =
getFlattenedComputationLayout(hloModule->entry_computation_layout(),
useTupleArgs);
OriginalParamIndexToFlattenedNum originalParamIndexToFlattenedNum =
getOriginalParamIndexToFlattenedNum(hloModule);
HloInputOutputAliasConfig flattenedInputOutputAliasConfig =
getFlattenedInputOutputAliasConfig(hloModule->input_output_alias_config(),
originalParamIndexToFlattenedNum,
useTupleArgs);
HloBufferDonorConfig flattenedBufferDonorsConfig =
getFlattenedBufferDonorsConfig(hloModule->buffer_donor_config(),
originalParamIndexToFlattenedNum,
useTupleArgs);
if (runSdyShardingPropagation) {
pm.addPass(mlir::mhlo::createHloLegalizeToStablehloPass());
mlir::sdy::addPropagationPipeline(
pm, shardyDir,
hloModule->use_auto_spmd_partitioning());
pm.addPass(mlir::mhlo::createStablehloLegalizeToHloPass());
}
addMhloExportPipeline(pm);
pm.addPass(mlir::sdy::createSaveModuleOpPass(shardyDir,
"sdy_module_after_xla_export"));
tsl::StatusScopedDiagnosticHandler diagnosticHandler(mlirContext.get());
TF_RETURN_IF_ERROR(diagnosticHandler.consumeStatus(pm.run(*mlirModule)));
HloProto hloProto;
TF_RETURN_IF_ERROR(ConvertMlirHloToHlo(*mlirModule, &hloProto, useTupleArgs,
false));
TF_RETURN_IF_ERROR(
createFromProtoAndReplaceComputations(hloModule, hloProto.hlo_module()));
CHECK_OK(TupleSimplifier().Run(hloModule));
*hloModule->mutable_entry_computation_layout() =
flattenedEntryComputationLayout;
hloModule->set_input_output_alias_config(
std::move(flattenedInputOutputAliasConfig));
hloModule->set_buffer_donor_config(std::move(flattenedBufferDonorsConfig));
TF_RETURN_IF_ERROR(
hlo_sharding_util::CanonicalizeLayoutAfterShardingPropagation(
hloModule, true,
true));
removeFrontendAttributes(
hloModule,
{kUseTupleArgs, kPythonIntegrationComplete, kMeshesRoundTripAttr});
return true;
}
}
} | #include "xla/service/spmd/shardy/shardy_xla_pass.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/statusor.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace sdy {
using ShardyXLATest = xla::HloTestBase;
TEST_F(ShardyXLATest, AllowSpmdShardingPropagationParametersOutputRespected) {
const char* const hloString = R"(
HloModule module, allow_spmd_sharding_propagation_to_parameters={false,true}, allow_spmd_sharding_propagation_to_output={true}
ENTRY %conv {
%p0 = f32[8,256,512] parameter(0), sharding={replicated}
%p1 = f32[8,128,512] parameter(1), sharding={devices=[2,1,1,4]<=[8] last_tile_dim_replicate}
%dot = f32[8,256,128] dot(%p0, %p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}, sharding={devices=[2,2,2]<=[8]}
ROOT %copy = f32[8,256,128] copy(%dot), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{replicated}"));
EXPECT_THAT(
module->entry_computation()->parameter_instruction(1),
op::Sharding(
"{devices=[2,2,1,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}"));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,2,2]<=[8]}"));
}
TEST_F(ShardyXLATest, ElementWise) {
const char* const hloString = R"(
HloModule module
ENTRY %entry {
p0 = f32[6,3] parameter(0)
p1 = f32[6,3] parameter(1)
copy.p0 = f32[6,3] copy(p0)
copy.p1 = f32[6,3] copy(p1)
add = f32[6,3] add(copy.p0, copy.p1), sharding={devices=[2,1]<=[2]}, metadata={op_name="simple_example/add" source_file="source.txt" source_line=42}
ROOT result = f32[6,3] copy(add)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
HloInstruction* add = FindInstruction(module.get(), xla::HloOpcode::kAdd);
EXPECT_NE(add, nullptr);
EXPECT_THAT(add, op::Sharding("{devices=[2,1]<=[2]}"));
EXPECT_EQ(add->metadata().op_name(), "simple_example/add");
EXPECT_EQ(add->metadata().source_file(), "source.txt");
EXPECT_EQ(add->metadata().source_line(), 42);
for (HloInstruction* param :
module->entry_computation()->parameter_instructions()) {
EXPECT_THAT(param, op::Sharding("{devices=[2,1]<=[2]}"));
}
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,1]<=[2]}"));
auto* copy = FindInstruction(module.get(), xla::HloOpcode::kCopy);
EXPECT_EQ(copy, nullptr);
}
TEST_F(ShardyXLATest, CostantSplitter) {
const char* const hloString = R"(
HloModule module
ENTRY %constant_splitter {
%p0 = f32[8,8] parameter(0), sharding={devices=[2,2]<=[4]}
%constant = f32[] constant(3.14)
%broadcast = f32[8,16] broadcast(%constant), dimensions={}
%dot = f32[8,8] dot(%broadcast, %broadcast),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
ROOT %add = f32[8,8] add(%p0, %dot)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
HloInstruction* dot = FindInstruction(module.get(), xla::HloOpcode::kDot);
EXPECT_EQ(dot->operand_count(), 2);
EXPECT_EQ(dot->operand(0)->opcode(), HloOpcode::kBroadcast);
EXPECT_EQ(dot->operand(1)->opcode(), HloOpcode::kBroadcast);
EXPECT_NE(dot->operand(0), dot->operand(1));
EXPECT_THAT(dot->operand(0),
op::Sharding("{devices=[2,1,2]<=[4] last_tile_dim_replicate}"));
EXPECT_THAT(
dot->operand(1),
op::Sharding("{devices=[2,1,2]<=[2,2]T(1,0) last_tile_dim_replicate}"));
EXPECT_EQ(dot->operand(0)->operand(0)->opcode(), HloOpcode::kConstant);
EXPECT_EQ(dot->operand(1)->operand(0)->opcode(), HloOpcode::kConstant);
EXPECT_NE(dot->operand(0)->operand(0), dot->operand(1)->operand(0));
}
TEST_F(ShardyXLATest, Dot) {
const char* const hloString = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,256,128] parameter(0)
%p1 = f32[8,128,512] parameter(1)
%p2 = f32[8,128] parameter(2)
%dot0 = f32[8,512,256] dot(%p1, %p0),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_contracting_dims={2}
%dot1 = f32[8,256,512] dot(%p0, %p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
%dot2 = f32[8,256] dot(%p0, %p2),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
%dot3 = f32[8,256,512] dot(%p0, %p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1},
sharding={devices=[2,2,2]<=[8]}
ROOT %tuple = (f32[8,512,256], f32[8,256,512], f32[8,256], f32[8,256,512])
tuple(%dot0, %dot1, %dot2, %dot3)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{devices=[2,2,1,2]<=[8] last_tile_dim_replicate}"));
EXPECT_THAT(
module->entry_computation()->parameter_instruction(1),
op::Sharding(
"{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(2),
op::Sharding("{devices=[2,1,4]<=[8] last_tile_dim_replicate}"));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Sharding("{{devices=[2,2,2]<=[2,2,2]T(0,2,1)}, "
"{devices=[2,2,2]<=[8]}, {devices=[2,2,2]<=[8] "
"last_tile_dim_replicate}, {devices=[2,2,2]<=[8]}}"));
}
TEST_F(ShardyXLATest, DotTiledBatchDim) {
const char* const hloString = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,256,512] parameter(0)
%p1 = f32[8,512,128] parameter(1)
%add = f32[8,256,512] add(%p0, %p0)
%dot = f32[8,256,128] dot(%add, %p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
%res = f32[8,32768] reshape(%dot), sharding={devices=[2,2]<=[4]}
ROOT %tuple = (f32[8,32768]) tuple(%res)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{devices=[2,2,1]<=[4]}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[2,1,1,2]<=[4] last_tile_dim_replicate}"));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,2]<=[4]}"));
}
TEST_F(ShardyXLATest, DotMergeOperands1) {
const char* const hloString = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,256,512] parameter(0),
sharding={devices=[2,2,1,2]<=[8] last_tile_dim_replicate}
%p1 = f32[8,128,512] parameter(1),
sharding={devices=[2,2,1,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}
%dot = f32[8,256,128] dot(%p0, %p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}
ROOT %copy = f32[8,256,128] copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{devices=[2,2,1,2]<=[8] last_tile_dim_replicate}"));
EXPECT_THAT(
module->entry_computation()->parameter_instruction(1),
op::Sharding(
"{devices=[2,2,1,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}"));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,2,2]<=[8]}"));
}
TEST_F(ShardyXLATest, DotMergeOperands2) {
const char* const hloString = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,256,512] parameter(0), sharding={devices=[2,2,2]<=[8]}
%p1 = f32[8,128,512] parameter(1), sharding={devices=[2,2,2]<=[8]}
%dot = f32[8,256,128] dot(%p0, %p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}
ROOT %copy = f32[8,256,128] copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{devices=[2,2,2]<=[8]}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[2,2,2]<=[8]}"));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,1,1,4]<=[8] last_tile_dim_replicate}"));
}
TEST_F(ShardyXLATest, DotMergeOperands3) {
const char* const hloString = R"(
HloModule module
ENTRY %conv {
%p0 = f32[256,512] parameter(0), sharding={devices=[2,4]<=[8]}
%p1 = f32[128,512] parameter(1), sharding={devices=[4,2]<=[2,2,2]T(2,1,0)}
%dot = f32[256,128] dot(%p0, %p1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
ROOT %copy = f32[256,128] copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{devices=[2,4]<=[8]}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[4,2]<=[2,2,2]T(2,1,0)}"));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,4]<=[2,2,2]T(0,2,1)}"));
}
TEST_F(ShardyXLATest, BackwardDotFromContracting) {
const char* const hloString = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,256,512] parameter(0), sharding={devices=[2,2,2]<=[8]}
%p1 = f32[8,128,512] parameter(1)
%copy1 = f32[8,128,512] copy(%p1)
%dot = f32[8,256,128] dot(%p0, %copy1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,1,2,2]<=[8] last_tile_dim_replicate}
ROOT %copy = f32[8,256,128] copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{devices=[2,2,2]<=[8]}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[2,2,2]<=[8]}"));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,1,2,2]<=[8] last_tile_dim_replicate}"));
}
TEST_F(ShardyXLATest, EntryComputationLayoutSingleResult) {
const char* const hloString = R"(
HloModule module, entry_computation_layout={(f32[3,8,32,4]{2,1,3,0:T(8,128)},f32[3,8,32,4]{2,1,3,0:T(8,128)})->f32[3,8,32,4]{2,1,3,0:T(8,128)}}
ENTRY %entry {
%p0 = f32[3,8,32,4] parameter(0)
%p1 = f32[3,8,32,4] parameter(1)
%copy.p0 = f32[3,8,32,4] copy(%p0)
%copy.p1 = f32[3,8,32,4] copy(%p1)
%add = f32[3,8,32,4] add(%copy.p0, %copy.p1), sharding={devices=[2,1,1,1]<=[2]}, metadata={op_name="simple_example/add" source_file="source.txt" source_line=42}
ROOT %result = f32[3,8,32,4] copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_EQ(
module->entry_computation_layout().ToString(),
"(f32[3,8,32,4]{2,1,3,0:T(8,128)}, "
"f32[3,8,32,4]{2,1,3,0:T(8,128)})->f32[3,8,32,4]{2,1,3,0:T(8,128)}");
}
TEST_F(ShardyXLATest, EntryComputationLayoutNestedTuple) {
const char* const hloString = R"(
HloModule module, entry_computation_layout={((f32[4,2]{0,1:T(2,128)},(f32[4,2]{0,1:T(2,128)},f32[4,2]{0,1:T(2,128)})),f32[4,2]{0,1:T(2,128)})->((f32[4,2]{0,1:T(2,128)},(f32[4,2]{0,1:T(2,128)},f32[4,2]{0,1:T(2,128)})),f32[4,2]{0,1:T(2,128)})}
ENTRY %main {
%p0 = (f32[4,2], (f32[4,2], f32[4,2])) parameter(0)
%p1 = f32[4,2] parameter(1)
ROOT %result = ((f32[4,2], (f32[4,2], f32[4,2])), f32[4,2]) tuple(%p0, %p1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_EQ(module->entry_computation_layout().ToString(),
"(f32[4,2]{0,1:T(2,128)}, f32[4,2]{0,1:T(2,128)}, "
"f32[4,2]{0,1:T(2,128)}, "
"f32[4,2]{0,1:T(2,128)})->(f32[4,2]{0,1:T(2,128)}, "
"f32[4,2]{0,1:T(2,128)}, f32[4,2]{0,1:T(2,128)}, "
"f32[4,2]{0,1:T(2,128)})");
}
TEST_F(ShardyXLATest, EntryComputationLayoutMissingLayout) {
const char* const hloString = R"(
HloModule module, entry_computation_layout={(f32[3,8,32,4]{2,1,3,0:T(8,128)},f32[3,8,32,4])->f32[3,8,32,4]}
ENTRY %entry {
%p0 = f32[3,8,32,4] parameter(0)
%p1 = f32[3,8,32,4] parameter(1)
%copy.p0 = f32[3,8,32,4] copy(%p0)
%copy.p1 = f32[3,8,32,4] copy(%p1)
%add = f32[3,8,32,4] add(%copy.p0, %copy.p1), sharding={devices=[2,1,1,1]<=[2]}, metadata={op_name="simple_example/add" source_file="source.txt" source_line=42}
ROOT %result = f32[3,8,32,4] copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_EQ(module->entry_computation_layout().ToString(),
"(f32[3,8,32,4]{2,1,3,0:T(8,128)}, "
"f32[3,8,32,4]{3,2,1,0})->f32[3,8,32,4]{3,2,1,0}");
}
TEST_F(ShardyXLATest, InputOutputAliasConfigSingleResult) {
const char* const hloString = R"(
HloModule module, input_output_alias={ {}: (1, {}, may-alias) }
ENTRY %entry {
%p0 = f32[3,8,32,4] parameter(0)
%p1 = f32[3,8,32,4] parameter(1)
%add = f32[3,8,32,4] add(%p0, %p1)
ROOT %result = f32[3,8,32,4] copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_EQ(module->input_output_alias_config().ToShortString(),
"{}: (1, {}, may-alias)");
}
TEST_F(ShardyXLATest, InputOutputAliasConfigSingleResultNestedParams) {
const char* const hloString = R"(
HloModule module, input_output_alias={ {}: (0, {1}, may-alias) }
ENTRY %entry {
%p0 = (f32[4,2], f32[4,2]) parameter(0)
%get-tuple-element.0 = f32[4,2] get-tuple-element((f32[4,2], f32[4,2]) %p0), index=0
%get-tuple-element.1 = f32[4,2] get-tuple-element((f32[4,2], f32[4,2]) %p0), index=1
%add = f32[4,2] add(%get-tuple-element.0, %get-tuple-element.1)
ROOT %result = f32[4,2] copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_EQ(module->input_output_alias_config().ToShortString(),
"{}: (1, {}, may-alias)");
}
TEST_F(ShardyXLATest, InputOutputAliasConfigNestedResultAndParams) {
const char* const hloString = R"(
HloModule module, input_output_alias={ {0, 1, 0}: (0, {1, 0}, may-alias), {1}: (1, {}, may-alias) }
ENTRY %main {
%p0 = (f32[4,2], (f32[4,2], f32[4,2])) parameter(0)
%p1 = f32[4,2] parameter(1)
ROOT %result = ((f32[4,2], (f32[4,2], f32[4,2])), f32[4,2]) tuple(%p0, %p1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_EQ(module->input_output_alias_config().ToShortString(),
"{1}: (1, {}, may-alias), {3}: (3, {}, may-alias)");
}
TEST_F(ShardyXLATest, BufferDonorConfigSingleResult) {
const char* const hloString = R"(
HloModule module, buffer_donor={ (1, {}) }
ENTRY %entry {
%p0 = f32[3,8,32,4] parameter(0)
%p1 = f32[3,8,32,4] parameter(1)
%add = f32[3,8,32,4] add(%p0, %p1)
ROOT %result = f32[3,8,32,4] copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_EQ(module->buffer_donor_config().ToShortString(), "(1, {})");
}
TEST_F(ShardyXLATest, BufferDonorConfigNestedTuple) {
const char* const hloString = R"(
HloModule module, buffer_donor={ (0, {0}), (0, {1, 1}) }
ENTRY %main {
%p0 = (f32[4,2], (f32[4,2], f32[4,2])) parameter(0)
%p1 = f32[4,2] parameter(1)
ROOT %result = ((f32[4,2], (f32[4,2], f32[4,2])), f32[4,2]) tuple(%p0, %p1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_EQ(module->buffer_donor_config().ToShortString(), "(0, {}), (2, {})");
}
TEST_F(ShardyXLATest, ShardingCustomCall) {
const char* const hloString = R"(
HloModule module
ENTRY %main {
%p0 = f32[8,8] parameter(0), sharding={devices=[2,1]<=[2]}
%annotate = f32[8,8] custom-call(%p0), custom_call_target="Sharding",
sharding={devices=[1,2]<=[2]}
ROOT %add = f32[8,8] add(%p0, %annotate)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{devices=[2,1]<=[2]}"));
EXPECT_THAT(module->entry_computation()->root_instruction()->operand(1),
op::Copy());
}
TEST_F(ShardyXLATest, RngBitGenerator) {
const char* const hloString = R"(
HloModule module
ENTRY main {
state.1 = u64[8]{0} parameter(0), sharding={devices=[8,4]<=[32] last_tile_dim_replicate}
state.2 = u64[8]{0} add(state.1, state.1), sharding={devices=[2,16]<=[32] last_tile_dim_replicate}
rng.1 = u32[512,256] rng-bit-generator(state.1), algorithm=rng_default, sharding={devices=[16,2]<=[32]}
rng.2 = (u64[8]{0}, u32[512,256]) rng-bit-generator(state.2), algorithm=rng_default, sharding={{devices=[4,8]<=[32] last_tile_dim_replicate}, {devices=[8,4]<=[32]}}
gte = u32[512,256] get-tuple-element(rng.2), index=1
ROOT tuple = (u32[512,256], u32[512,256]) tuple(rng.1, gte)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{{devices=[16,2]<=[32]}, {devices=[8,4]<=[32]}}"));
}
TEST_F(ShardyXLATest, WhileWithFreeVariables) {
const char* const hloString = R"(
HloModule main, entry_computation_layout={(f32[32,96]{1,0}, f32[32,96]{1,0})->f32[32,96]{1,0}}
%region_0.7 (arg_tuple.8: (f32[32,96], s32[], s32[], s32[], f32[32,96])) -> (f32[32,96], s32[], s32[], s32[], f32[32,96]) {
%arg_tuple.8 = (f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) parameter(0)
%get-tuple-element.9 = f32[32,96]{1,0} get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %arg_tuple.8), index=0
%get-tuple-element.13 = f32[32,96]{1,0} get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %arg_tuple.8), index=4
%add.15 = f32[32,96]{1,0} add(f32[32,96]{1,0} %get-tuple-element.9, f32[32,96]{1,0} %get-tuple-element.13), metadata={source_file="-" source_line=25}
%get-tuple-element.10 = s32[] get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %arg_tuple.8), index=1
%get-tuple-element.12 = s32[] get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %arg_tuple.8), index=3
%add.14 = s32[] add(s32[] %get-tuple-element.10, s32[] %get-tuple-element.12), metadata={source_file="-" source_line=24}
%get-tuple-element.11 = s32[] get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %arg_tuple.8), index=2
ROOT %tuple.16 = (f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) tuple(f32[32,96]{1,0} %add.15, s32[] %add.14, s32[] %get-tuple-element.11, s32[] %get-tuple-element.12, f32[32,96]{1,0} %get-tuple-element.13)
}
%region_1.17 (arg_tuple.18: (f32[32,96], s32[], s32[], s32[], f32[32,96])) -> pred[] {
%arg_tuple.18 = (f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) parameter(0)
%get-tuple-element.19 = f32[32,96]{1,0} get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %arg_tuple.18), index=0
%get-tuple-element.22 = s32[] get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %arg_tuple.18), index=3
%get-tuple-element.23 = f32[32,96]{1,0} get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %arg_tuple.18), index=4
%get-tuple-element.20 = s32[] get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %arg_tuple.18), index=1
%get-tuple-element.21 = s32[] get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %arg_tuple.18), index=2
ROOT %compare.24 = pred[] compare(s32[] %get-tuple-element.20, s32[] %get-tuple-element.21), direction=LT, metadata={source_file="-" source_line=21}
}
ENTRY %main.30 (Arg_0.1: f32[32,96], Arg_1.2: f32[32,96]) -> f32[32,96] {
%Arg_0.1 = f32[32,96]{1,0} parameter(0), sharding={devices=[2,2]<=[4]}
%constant.3 = s32[] constant(0)
%constant.5 = s32[] constant(32)
%constant.4 = s32[] constant(1)
%Arg_1.2 = f32[32,96]{1,0} parameter(1), sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
%tuple.6 = (f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) tuple(f32[32,96]{1,0} %Arg_0.1, s32[] %constant.3, s32[] %constant.5, s32[] %constant.4, f32[32,96]{1,0} %Arg_1.2), metadata={source_file="-" source_line=19}
%while.25 = (f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) while((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %tuple.6), condition=%region_1.17, body=%region_0.7, metadata={source_file="-" source_line=19}
%get-tuple-element.27 = s32[] get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %while.25), index=1, metadata={source_file="-" source_line=19}
%get-tuple-element.26 = f32[32,96]{1,0} get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %while.25), index=0, metadata={source_file="-" source_line=19}
%tuple.28 = (f32[32,96]{1,0}) tuple(f32[32,96]{1,0} %get-tuple-element.26)
ROOT %get-tuple-element.29 = f32[32,96]{1,0} get-tuple-element((f32[32,96]{1,0}) %tuple.28), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
HloInstruction* whileInst =
FindInstruction(module.get(), xla::HloOpcode::kWhile);
EXPECT_NE(whileInst, nullptr);
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[2,1,2]<=[4] last_tile_dim_replicate}"));
EXPECT_THAT(whileInst,
op::Sharding("{{devices=[2,2]<=[4]}, {replicated}, {replicated}, "
"{devices=[2,2]<=[4]}, {replicated}}"));
}
TEST_F(ShardyXLATest, ShardMap) {
const char* const hloString = R"(
HloModule shard_map
region_add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
shmap_body.11 {
Arg_0.12 = f32[2,8] parameter(0)
add.14 = f32[2,8] add(Arg_0.12, Arg_0.12)
Arg_1.13 = f32[8,32] parameter(1)
dot.15 = f32[2,32] dot(add.14, Arg_1.13), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT all-reduce.16 = f32[2,32] all-reduce(dot.15), channel_id=1, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, to_apply=region_add
}
ENTRY main {
p0 = f32[8,16] parameter(0)
custom-call.3 = f32[8,16] custom-call(p0), custom_call_target="Sharding", sharding={devices=[4,2]<=[8]}
custom-call.4 = f32[2,8] custom-call(custom-call.3), custom_call_target="SPMDFullToShardShape", sharding={manual}
p1 = f32[16,32] parameter(1)
custom-call.5 = f32[16,32] custom-call(p1), custom_call_target="Sharding", sharding={devices=[2,1,4]<=[4,2]T(1,0) last_tile_dim_replicate}
custom-call.6 = f32[8,32] custom-call(custom-call.5), custom_call_target="SPMDFullToShardShape", sharding={manual}
call.17 = f32[2,32] call(custom-call.4, custom-call.6), to_apply=shmap_body.11
custom-call.18 = f32[2,32] custom-call(call.17), custom_call_target="Sharding", sharding={manual}
ROOT custom-call.19 = f32[8,32] custom-call(custom-call.18), custom_call_target="SPMDShardToFullShape", sharding={devices=[4,1,2]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_EQ(module->computation_count(), 2);
EXPECT_EQ(FindInstruction(module.get(), xla::HloOpcode::kCall), nullptr);
auto* dot = FindInstruction(module.get(), xla::HloOpcode::kDot);
EXPECT_NE(dot, nullptr);
EXPECT_TRUE(dot->has_sharding());
EXPECT_TRUE(dot->sharding().IsManual());
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->opcode(), xla::HloOpcode::kCustomCall);
EXPECT_EQ(root->custom_call_target(), "SPMDShardToFullShape");
EXPECT_THAT(root,
op::Sharding("{devices=[4,1,2]<=[8] last_tile_dim_replicate}"));
}
TEST_F(ShardyXLATest, EmptyModule) {
const char* const hloString = R"(
HloModule pjit_f, entry_computation_layout={()->()}, num_partitions=2
ENTRY %main.2 () -> () {
ROOT %tuple.1 = () tuple()
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_EQ(module->entry_computation_layout().ToString(), "()->()");
EXPECT_EQ(module->input_output_alias_config().ToShortString(), "");
}
TEST_F(ShardyXLATest, TestUseTuplesTrue) {
const char* const hloString = R"(
HloModule pjit_f, buffer_donor={ (1, {}) }, input_output_alias={ {}: (2, {}, must-alias) }, entry_computation_layout={(f32[8,16]{1,0:T(8,128)}, f32[16,32]{1,0:T(8,128)}, f32[8,32]{1,0:T(8,128)})->f32[8,32]{1,0:T(8,128)}}, allow_spmd_sharding_propagation_to_parameters={false,false,false}, num_partitions=8, frontend_attributes={xla.sdy.use_tuple_args="t"}
ENTRY %main.7 (Arg_0.1: f32[8,16], Arg_1.2: f32[16,32], Arg_2.3: f32[8,32]) -> f32[8,32] {
%Arg_0.1 = f32[8,16]{1,0} parameter(0)
%Arg_1.2 = f32[16,32]{1,0} parameter(1)
%dot.4 = f32[8,32]{1,0} dot(f32[8,16]{1,0} %Arg_0.1, f32[16,32]{1,0} %Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
%Arg_2.3 = f32[8,32]{1,0} parameter(2)
ROOT %add.5 = f32[8,32]{1,0} add(f32[8,32]{1,0} %dot.4, f32[8,32]{1,0} %Arg_2.3)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_EQ(module->entry_computation()->parameter_instructions().size(), 1);
EXPECT_EQ(module->buffer_donor_config().ToShortString(), "(0, {1})");
EXPECT_EQ(module->input_output_alias_config().ToShortString(),
"{}: (0, {2}, must-alias)");
EXPECT_EQ(module->entry_computation_layout().ToString(),
"((f32[8,16]{1,0:T(8,128)}, f32[16,32]{1,0:T(8,128)}, "
"f32[8,32]{1,0:T(8,128)}))->f32[8,32]{1,0:T(8,128)}");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/shardy/shardy_xla_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/shardy/shardy_xla_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8ded5b53-11c9-465a-a926-7211f30afb8b | cpp | tensorflow/tensorflow | mhlo_import | third_party/xla/xla/service/spmd/shardy/mhlo_round_trip/mhlo_import.cc | third_party/xla/xla/service/spmd/shardy/mhlo_round_trip/mhlo_import_test.cc | #include "xla/service/spmd/shardy/mhlo_round_trip/mhlo_import.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Support/TypeID.h"
#include "mlir/Transforms/DialectConversion.h"
#include "shardy/dialect/sdy/ir/constants.h"
#include "shardy/dialect/sdy/ir/dialect.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/tile_assignment.h"
#include "xla/hlo/translate/mhlo_to_hlo/attribute_exporter.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/mlir_hlo/mhlo/transforms/passes.h"
#include "xla/service/spmd/shardy/constants.h"
#include "xla/service/spmd/shardy/mhlo_round_trip/shard_map_import.h"
#include "xla/service/spmd/shardy/round_trip_common/pipeline_passes.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace sdy {
namespace {
using ::llvm::SmallDenseMap;
using ::llvm::SmallDenseSet;
using ::mlir::ArrayRef;
using ::mlir::LogicalResult;
using ::mlir::ModuleOp;
using ::mlir::OpBuilder;
using ::mlir::OperationPass;
using ::mlir::Pass;
using ::mlir::PassWrapper;
using ::mlir::ShapedType;
using ::mlir::SmallVector;
using ::mlir::StringAttr;
using ::mlir::StringRef;
using ::mlir::func::FuncOp;
using ::mlir::sdy::AxisRefAttr;
using ::mlir::sdy::DimensionShardingAttr;
using ::mlir::sdy::kShardingAttr;
using ::mlir::sdy::MeshAttr;
using ::mlir::sdy::MeshAxisAttr;
using ::mlir::sdy::MeshOp;
using ::mlir::sdy::SdyDialect;
using ::mlir::sdy::TensorShardingAttr;
using ::mlir::sdy::TensorShardingPerValueAttr;
struct SubDimInfo {
int64_t tileDimIndex;
int64_t tileSubDimIndex;
int64_t reshapeDimIndex;
int64_t size;
};
struct AnalyzeTileAssignmentResult {
SmallVector<SubDimInfo> subDims;
SmallVector<int64_t> localMesh;
};
}
xla::HloSharding parseShardingFromString(const StringAttr& sharding) {
const std::optional<xla::OpSharding> shardingProto =
xla::ConvertSharding(sharding.getValue());
CHECK(shardingProto) << sharding.getValue().str();
absl::StatusOr<HloSharding> hloSharding =
xla::HloSharding::FromProto(*shardingProto);
CHECK_OK(hloSharding) << shardingProto->DebugString();
return *hloSharding;
}
namespace {
SmallVector<int64_t> shortestCommonFactorization(ArrayRef<int64_t> array1,
ArrayRef<int64_t> array2) {
SmallVector<int64_t> result;
result.reserve(std::max(array1.size(), array2.size()));
auto nextIndexWithNonOneElement = [](ArrayRef<int64_t> array,
int64_t index) -> int64_t {
while (index < array.size() && array[index] == 1) {
index++;
}
return index;
};
int64_t index1 = nextIndexWithNonOneElement(array1, 0);
int64_t index2 = nextIndexWithNonOneElement(array2, 0);
int64_t nextStride1 = 1;
int64_t nextStride2 = 1;
int64_t accumulatedFactor = 1;
while (index1 < array1.size() || index2 < array2.size()) {
if (index1 < array1.size() && nextStride1 == accumulatedFactor) {
nextStride1 *= array1[index1++];
}
if (index2 < array2.size() && nextStride2 == accumulatedFactor) {
nextStride2 *= array2[index2++];
}
const auto [smallFactor, largeFactor] = std::minmax(
{nextStride1 / accumulatedFactor, nextStride2 / accumulatedFactor});
if (largeFactor % smallFactor != 0 || smallFactor == 1) {
return {};
}
result.push_back(smallFactor);
accumulatedFactor *= smallFactor;
CHECK_EQ(accumulatedFactor, Product(result));
index1 = nextIndexWithNonOneElement(array1, index1);
index2 = nextIndexWithNonOneElement(array2, index2);
}
return result;
}
SmallVector<SubDimInfo> getOrderedSubDimsFromIotaTileAssignment(
const xla::IotaTileAssignment& iota) {
SmallVector<int64_t> deviceShape(iota.transpose_perm().size());
for (auto [index, perm_i] : llvm::enumerate(iota.transpose_perm())) {
deviceShape[index] = iota.reshape_dims()[perm_i];
}
const SmallVector<int64_t> axisSizes = shortestCommonFactorization(
ArrayRef<int64_t>(iota.dims().begin(), iota.dims().end()), deviceShape);
if (axisSizes.empty()) {
return {};
}
SmallVector<SubDimInfo> subDims;
subDims.reserve(axisSizes.size());
int64_t tileDimIndex = iota.ndims() - 1;
int64_t transPermIndex = iota.transpose_perm().size() - 1;
int64_t accTileSize = 1;
int64_t accDeviceSize = 1;
int64_t subDim = 0;
for (const int64_t axisSize : llvm::reverse(axisSizes)) {
while (iota.dim(tileDimIndex) == 1) {
tileDimIndex--;
}
subDims.push_back(SubDimInfo{
tileDimIndex,
subDim++,
iota.transpose_perm()[transPermIndex],
axisSize,
});
accTileSize *= axisSize;
accDeviceSize *= axisSize;
if (iota.dim(tileDimIndex) == accTileSize) {
tileDimIndex--;
accTileSize = 1;
subDim = 0;
}
if (deviceShape[transPermIndex] == accDeviceSize) {
accDeviceSize = 1;
transPermIndex--;
}
}
absl::c_sort(subDims, [](const SubDimInfo& a, const SubDimInfo& b) {
return std::forward_as_tuple(a.reshapeDimIndex, a.tileDimIndex) <
std::forward_as_tuple(b.reshapeDimIndex, b.tileDimIndex);
});
return subDims;
}
AnalyzeTileAssignmentResult analyzeTileAssignment(
const xla::TileAssignment& tileAssignment) {
const std::optional<IotaTileAssignment>& iota = tileAssignment.iota();
CHECK(iota.has_value()) << "tile assignment: " << tileAssignment.ToString();
const SmallVector<SubDimInfo> subDims =
getOrderedSubDimsFromIotaTileAssignment(*iota);
CHECK(!subDims.empty()) << "tile assignment: " << tileAssignment.ToString();
SmallVector<int64_t> mesh;
mesh.reserve(subDims.size());
for (SubDimInfo subDimInfo : subDims) {
mesh.push_back(subDimInfo.size);
}
return AnalyzeTileAssignmentResult{
std::move(subDims),
std::move(mesh),
};
}
absl::flat_hash_set<xla::HloSharding> collectXlaHloShardings(
ModuleOp moduleOp) {
absl::flat_hash_set<xla::HloSharding> oldShardings;
for (FuncOp funcOp : moduleOp.getOps<FuncOp>()) {
for (int64_t argNum = 0; argNum < funcOp.getNumArguments(); ++argNum) {
if (auto oldSharding =
funcOp.getArgAttrOfType<StringAttr>(argNum, kXlaShardingAttr)) {
oldShardings.insert(parseShardingFromString(oldSharding));
}
}
for (int64_t resNum = 0; resNum < funcOp.getNumResults(); ++resNum) {
if (auto oldSharding = funcOp.getResultAttrOfType<StringAttr>(
resNum, kXlaShardingAttr)) {
oldShardings.insert(parseShardingFromString(oldSharding));
}
}
funcOp.front().walk([&](mlir::Operation* op) {
if (auto oldSharding = op->getAttrOfType<StringAttr>(kXlaShardingAttr)) {
const xla::HloSharding hloSharding =
parseShardingFromString(oldSharding);
if (hloSharding.IsTuple()) {
for (const xla::HloSharding& element : hloSharding.tuple_elements()) {
oldShardings.insert(element);
}
} else {
oldShardings.insert(hloSharding);
}
}
});
}
return oldShardings;
}
struct MeshAxesAndIds {
SmallVector<MeshAxisAttr> namedAxes;
SmallVector<int64_t> maximalDeviceIds;
};
MeshAxesAndIds findMeshAxesAndIds(ModuleOp moduleOp) {
MeshAxesAndIds result;
auto& [namedAxes, maximalDeviceIds] = result;
const absl::flat_hash_set<xla::HloSharding> oldShardings =
collectXlaHloShardings(moduleOp);
SmallVector<int64_t> axes;
llvm::SmallDenseSet<int64_t> maximalDeviceIdSet;
for (const xla::HloSharding& hloSharding : oldShardings) {
if (hloSharding.HasUniqueDevice()) {
maximalDeviceIdSet.insert(hloSharding.GetUniqueDevice());
continue;
}
CHECK(!hloSharding.IsTuple());
if (hloSharding.IsReplicated() || hloSharding.IsManual() ||
hloSharding.IsUnknown()) {
continue;
}
CHECK(hloSharding.IsTiled());
const AnalyzeTileAssignmentResult result =
analyzeTileAssignment(hloSharding.tile_assignment());
axes = (axes.empty()) ? result.localMesh
: shortestCommonFactorization(result.localMesh, axes);
CHECK(!axes.empty());
}
namedAxes.reserve(axes.size());
for (auto [axisIndex, axisSize] : llvm::enumerate(axes)) {
auto name = StringAttr::get(moduleOp->getContext(),
absl::StrCat("axis_", axisIndex));
namedAxes.push_back(
MeshAxisAttr::get(moduleOp->getContext(), name, axisSize));
}
maximalDeviceIds = llvm::to_vector(maximalDeviceIdSet);
llvm::sort(maximalDeviceIds);
return result;
}
}
TensorShardingAttr convertToSdySharding(
const xla::HloSharding& hloSharding, MeshAttr globalMesh,
const SmallDenseMap<int64_t, StringRef>& deviceIdToMaximalMeshName,
int64_t rank, bool openDims) {
mlir::MLIRContext* ctx = globalMesh.getContext();
if (hloSharding.HasUniqueDevice()) {
return TensorShardingAttr::getFullyClosed(
ctx, rank,
deviceIdToMaximalMeshName.lookup(hloSharding.GetUniqueDevice()));
}
CHECK(!hloSharding.IsTuple());
if (hloSharding.IsReplicated() || hloSharding.IsManual() ||
hloSharding.IsUnknown()) {
return hloSharding.IsUnknown() || openDims
? TensorShardingAttr::getFullyOpen(ctx, rank, kGlobalMeshName)
: TensorShardingAttr::getFullyClosed(ctx, rank, kGlobalMeshName);
}
CHECK(hloSharding.IsTiled());
const AnalyzeTileAssignmentResult result =
analyzeTileAssignment(hloSharding.tile_assignment());
SmallVector<SmallVector<AxisRefAttr>> localAxisIndexToGlobalAxes;
localAxisIndexToGlobalAxes.reserve(result.localMesh.size());
int64_t globalAxisIndex = 0;
for (int64_t localAxisSize : result.localMesh) {
SmallVector<AxisRefAttr>& globalAxes =
localAxisIndexToGlobalAxes.emplace_back();
int64_t product = 1;
while (product < localAxisSize) {
MeshAxisAttr axisAttr = globalMesh.getAxes()[globalAxisIndex++];
if (axisAttr.getSize() == 1) {
continue;
}
globalAxes.push_back(AxisRefAttr::get(ctx, axisAttr.getName()));
product *= axisAttr.getSize();
}
CHECK_EQ(product, localAxisSize);
}
SmallVector<SmallVector<int64_t>> dimToSubDimToLocalAxisIndex(rank);
for (auto [localAxisIndex, subDimInfo] : llvm::enumerate(result.subDims)) {
if (subDimInfo.tileDimIndex >= rank) {
continue;
}
SmallVector<int64_t>& subDimToLocalAxisIndex =
dimToSubDimToLocalAxisIndex[subDimInfo.tileDimIndex];
if (subDimInfo.tileSubDimIndex >= subDimToLocalAxisIndex.size()) {
subDimToLocalAxisIndex.resize(subDimInfo.tileSubDimIndex + 1);
}
subDimToLocalAxisIndex[subDimInfo.tileSubDimIndex] = localAxisIndex;
}
SmallVector<DimensionShardingAttr> dimShardings;
dimShardings.reserve(rank);
for (ArrayRef<int64_t> subDimToLocalAxisIndex : dimToSubDimToLocalAxisIndex) {
SmallVector<AxisRefAttr> axes;
for (int64_t localAxisIndex : llvm::reverse(subDimToLocalAxisIndex)) {
absl::c_copy(localAxisIndexToGlobalAxes[localAxisIndex],
std::back_inserter(axes));
}
dimShardings.push_back(
DimensionShardingAttr::get(ctx, axes, !openDims));
}
return TensorShardingAttr::get(ctx, StringAttr::get(ctx, kGlobalMeshName),
dimShardings, {});
}
namespace {
bool shouldOpenDims(ArrayRef<bool> allowPropagationToTensors, int64_t index) {
if (allowPropagationToTensors.empty()) {
return false;
}
if (allowPropagationToTensors.size() == 1) {
return allowPropagationToTensors.front();
}
CHECK_LT(index, allowPropagationToTensors.size());
return allowPropagationToTensors[index];
}
LogicalResult importShardings(
FuncOp funcOp, MeshAttr globalMesh,
const SmallDenseMap<int64_t, StringRef>& deviceIdToMaximalMeshName,
ArrayRef<bool> allowPropagationToArgs,
ArrayRef<bool> allowPropagationToResults) {
for (auto [argNum, argType] : llvm::enumerate(funcOp.getArgumentTypes())) {
if (auto oldSharding =
funcOp.getArgAttrOfType<StringAttr>(argNum, kXlaShardingAttr)) {
funcOp.setArgAttr(
argNum, kShardingAttr,
convertToSdySharding(parseShardingFromString(oldSharding), globalMesh,
deviceIdToMaximalMeshName,
mlir::cast<ShapedType>(argType).getRank(),
shouldOpenDims(allowPropagationToArgs, argNum)));
funcOp.removeArgAttr(argNum, kXlaShardingAttr);
}
}
for (auto [resNum, resType] : llvm::enumerate(funcOp.getResultTypes())) {
if (auto oldSharding =
funcOp.getResultAttrOfType<StringAttr>(resNum, kXlaShardingAttr)) {
funcOp.setResultAttr(
resNum, kShardingAttr,
convertToSdySharding(
parseShardingFromString(oldSharding), globalMesh,
deviceIdToMaximalMeshName,
mlir::cast<ShapedType>(resType).getRank(),
shouldOpenDims(allowPropagationToResults, resNum)));
funcOp.removeResultAttr(
resNum, StringAttr::get(funcOp.getContext(), kXlaShardingAttr));
}
}
funcOp.front().walk([&](mlir::Operation* op) {
if (auto oldSharding = op->getAttrOfType<StringAttr>(kXlaShardingAttr)) {
const xla::HloSharding hloSharding = parseShardingFromString(oldSharding);
ArrayRef<xla::HloSharding> flatHloSharding = hloSharding;
if (hloSharding.IsTuple()) {
flatHloSharding = hloSharding.tuple_elements();
}
SmallVector<TensorShardingAttr> newShardings;
newShardings.reserve(op->getNumResults());
for (const auto& [resHloSharding, resType] :
llvm::zip_equal(flatHloSharding, op->getResultTypes())) {
newShardings.push_back(convertToSdySharding(
resHloSharding, globalMesh, deviceIdToMaximalMeshName,
mlir::cast<ShapedType>(resType).getRank(),
false));
}
op->setAttr(kShardingAttr, TensorShardingPerValueAttr::get(
globalMesh.getContext(), newShardings));
op->removeAttr(kXlaShardingAttr);
}
});
return mlir::success();
}
class ImportShardingsPass
: public PassWrapper<ImportShardingsPass, OperationPass<ModuleOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ImportShardingsPass)
ImportShardingsPass(ArrayRef<bool> allowPropagationToArgs,
ArrayRef<bool> allowPropagationToResults)
: allowPropagationToArgs(allowPropagationToArgs),
allowPropagationToResults(allowPropagationToResults) {}
void runOnOperation() final {
ModuleOp moduleOp = getOperation();
auto [namedAxes, deviceIdsForMaximalMesh] = findMeshAxesAndIds(moduleOp);
if (namedAxes.empty() && deviceIdsForMaximalMesh.empty()) {
return;
}
mlir::SymbolTableCollection symbolTableCollection;
mlir::SymbolTable& symbolTable =
symbolTableCollection.getSymbolTable(moduleOp);
OpBuilder opBuilder = mlir::OpBuilder::atBlockBegin(moduleOp.getBody());
symbolTable.insert(opBuilder.create<MeshOp>(
moduleOp.getLoc(), kGlobalMeshName,
MeshAttr::get(moduleOp.getContext(), namedAxes)));
SmallDenseMap<int64_t, StringRef> deviceIdToMaximalMeshName;
for (int64_t deviceId : deviceIdsForMaximalMesh) {
std::string meshName = absl::StrCat("maximal_mesh_", deviceId);
auto meshOp = opBuilder.create<MeshOp>(
moduleOp.getLoc(), meshName,
MeshAttr::get(moduleOp.getContext(), deviceId));
symbolTable.insert(meshOp);
deviceIdToMaximalMeshName[deviceId] = meshOp.getSymName();
}
for (FuncOp funcOp : moduleOp.getOps<FuncOp>()) {
bool isMain = funcOp.getSymName() == "main";
MeshAttr globalMesh = MeshAttr::get(moduleOp.getContext(), namedAxes);
if (mlir::failed(importShardings(
funcOp, globalMesh, deviceIdToMaximalMeshName,
isMain ? allowPropagationToArgs : ArrayRef<bool>(),
isMain ? allowPropagationToResults : ArrayRef<bool>()))) {
signalPassFailure();
}
}
}
StringRef getArgument() const override { return "xla-sdy-import-shardings"; }
StringRef getDescription() const override {
return "Builds the mesh and converts the shardings from kXlaShardingAttr "
"to kShardingAttr.";
}
void getDependentDialects(mlir::DialectRegistry& registry) const final {
registry.insert<SdyDialect>();
}
private:
ArrayRef<bool> allowPropagationToArgs;
ArrayRef<bool> allowPropagationToResults;
};
std::unique_ptr<mlir::Pass> createImportShardingsPass(
ArrayRef<bool> allowPropagationToArgs,
ArrayRef<bool> allowPropagationToResults) {
return std::make_unique<ImportShardingsPass>(allowPropagationToArgs,
allowPropagationToResults);
}
}
void registerMhloImportShardingsPass() {
mlir::registerPass(
std::bind(createImportShardingsPass, ArrayRef<bool>(), ArrayRef<bool>()));
}
void addMhloImportPipeline(mlir::OpPassManager& pm,
ArrayRef<bool> allowPropagationToArgs,
ArrayRef<bool> allowPropagationToResults) {
addCommonPreImportPasses(pm);
pm.addPass(createImportShardingsPass(allowPropagationToArgs,
allowPropagationToResults));
pm.addPass(createMhloRoundTripShardMapImportPass());
addCommonPostImportPasses(pm);
}
void registerMhloImportPipeline() {
mlir::PassPipelineRegistration<> importPipeline(
"xla-sdy-mhlo-import-pipeline",
"Run passes to import an mhlo module with `mhlo.shardings` into the SDY "
"(Shardy) dialect.",
std::bind(addMhloImportPipeline, std::placeholders::_1, ArrayRef<bool>(),
ArrayRef<bool>()));
}
}
} | #include "xla/service/spmd/shardy/mhlo_round_trip/mhlo_import.h"
#include <cstdint>
#include <gtest/gtest.h>
#include "llvm/ADT/DenseMap.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "shardy/dialect/sdy/ir/dialect.h"
#include "shardy/dialect/sdy/ir/register.h"
#include "shardy/dialect/sdy/ir/utils.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "tsl/platform/test.h"
namespace mlir::sdy {
namespace {
TEST(MhloImportTest, SkipFirstAxisOfSize1) {
MLIRContext context;
loadAllRequiredDialects(&context);
SmallVector<sdy::MeshAxisAttr> axes;
axes.emplace_back(mlir::sdy::MeshAxisAttr::get(&context, "x", 1));
axes.emplace_back(mlir::sdy::MeshAxisAttr::get(&context, "y", 4));
axes.emplace_back(mlir::sdy::MeshAxisAttr::get(&context, "z", 2));
auto mesh = sdy::MeshAttr::get(&context, axes);
TensorShardingAttr sharding = xla::sdy::convertToSdySharding(
xla::HloSharding::IotaTile({4, 2}),
mesh,
llvm::SmallDenseMap<int64_t, mlir::StringRef>(), 2,
true);
EXPECT_EQ(attributeToString(sharding),
"#sdy.sharding<@mesh, [{\"y\", ?}, {\"z\", ?}]>");
}
TEST(MhloImportTest, SkipSecondAxisOfSize1) {
MLIRContext context;
loadAllRequiredDialects(&context);
SmallVector<sdy::MeshAxisAttr> axes;
axes.emplace_back(mlir::sdy::MeshAxisAttr::get(&context, "y", 4));
axes.emplace_back(mlir::sdy::MeshAxisAttr::get(&context, "x", 1));
axes.emplace_back(mlir::sdy::MeshAxisAttr::get(&context, "z", 2));
auto mesh = sdy::MeshAttr::get(&context, axes);
TensorShardingAttr sharding = xla::sdy::convertToSdySharding(
xla::HloSharding::IotaTile({4, 2}),
mesh,
llvm::SmallDenseMap<int64_t, mlir::StringRef>(), 2,
true);
EXPECT_EQ(attributeToString(sharding),
"#sdy.sharding<@mesh, [{\"y\", ?}, {\"z\", ?}]>");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/shardy/mhlo_round_trip/mhlo_import.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/shardy/mhlo_round_trip/mhlo_import_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f347f78b-eaa6-4792-ac4e-ae5992680692 | cpp | tensorflow/tensorflow | memory_space_assignment | third_party/xla/xla/service/memory_space_assignment/memory_space_assignment.cc | third_party/xla/xla/service/memory_space_assignment/memory_space_assignment_test.cc | #include "xla/service/memory_space_assignment/memory_space_assignment.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iterator>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/buffer_value.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/algorithm.h"
#include "xla/service/memory_space_assignment/allocation.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.pb.h"
#include "xla/service/memory_space_assignment/options.h"
#include "xla/service/memory_space_assignment/simulator.h"
#include "xla/service/memory_space_assignment/slice.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace memory_space_assignment {
namespace {
absl::Status InsertInstructionAndEnsureOperandsInserted(
HloInstruction* new_instruction, HloInstructionSequence* new_sequence,
absl::flat_hash_set<HloInstruction*>* inserted_instructions);
absl::Status EnsureInstructionAndOperandsInserted(
HloInstruction* new_instruction, HloInstructionSequence* new_sequence,
absl::flat_hash_set<HloInstruction*>* inserted_instructions) {
if (inserted_instructions->contains(new_instruction)) {
return absl::OkStatus();
}
return InsertInstructionAndEnsureOperandsInserted(
new_instruction, new_sequence, inserted_instructions);
}
absl::Status InsertInstructionAndEnsureOperandsInserted(
HloInstruction* new_instruction, HloInstructionSequence* new_sequence,
absl::flat_hash_set<HloInstruction*>* inserted_instructions) {
for (HloInstruction* operand : new_instruction->operands()) {
TF_RETURN_IF_ERROR(EnsureInstructionAndOperandsInserted(
operand, new_sequence, inserted_instructions));
}
VLOG(4) << "inserting: " << new_instruction->ToShortString();
new_sequence->push_back(new_instruction);
TF_RET_CHECK(inserted_instructions->insert(new_instruction).second);
return absl::OkStatus();
}
std::string InstructionScheduleToString(const HloLiveRange& hlo_live_range) {
const absl::flat_hash_map<const HloInstruction*, HloLiveRange::LogicalTime>&
instruction_schedule = hlo_live_range.instruction_schedule();
std::vector<std::pair<int64_t, const HloInstruction*>> instructions;
instructions.reserve(instruction_schedule.size());
for (const auto& instruction : instruction_schedule) {
instructions.push_back({instruction.second, instruction.first});
}
std::string instruction_schedule_str = "\n";
absl::c_sort(instructions);
for (auto& instruction : instructions) {
absl::StrAppend(&instruction_schedule_str,
"LogicalTime: ", instruction.first, " ",
instruction.second->ToString(), "\n");
}
return instruction_schedule_str;
}
void EnsureParentAllocationIsAvailableForCopy(CopyAllocation* copy_allocation) {
Allocation& parent_allocation = copy_allocation->mutable_prev_allocation();
parent_allocation.Extend(copy_allocation->copy_done_schedule_before());
if (parent_allocation.is_copy_allocation()) {
auto parent_copy_allocation =
tensorflow::down_cast<CopyAllocation*>(&parent_allocation);
parent_copy_allocation->set_copy_done_schedule_before(
std::min(parent_copy_allocation->copy_done_schedule_before(),
copy_allocation->start_time()));
parent_copy_allocation->set_copy_start_schedule_after(
std::min(parent_copy_allocation->copy_start_schedule_after(),
parent_copy_allocation->copy_done_schedule_before() - 1));
}
}
void MakeCopyAllocationJitForSingleUse(CopyAllocation* copy_allocation,
int64_t use_time) {
copy_allocation->set_start_time(use_time - 1);
copy_allocation->set_copy_start_schedule_after(use_time - 1);
copy_allocation->set_end_time(use_time);
copy_allocation->set_copy_done_schedule_before(use_time);
EnsureParentAllocationIsAvailableForCopy(copy_allocation);
}
int64_t GetUseTime(const HloUse& use, const HloLiveRange& hlo_live_range) {
return hlo_live_range.instruction_schedule().at(use.instruction);
}
void ProcessPrefetchesToAlternateMemory(AllocationSequence& allocations,
const HloLiveRange& hlo_live_range) {
std::vector<Allocation*> allocations_in_raw_pointers =
GetAllocationSequenceInRawPointers(allocations);
for (auto allocation : allocations_in_raw_pointers) {
if (allocation->is_copy_allocation() && allocation->is_in_alternate_mem() &&
!allocation->uses().empty()) {
CopyAllocation* prefetch =
tensorflow::down_cast<CopyAllocation*>(allocation);
std::vector<HloUse> uses = prefetch->uses();
prefetch->clear_uses();
prefetch->AddUse(uses[0]);
MakeCopyAllocationJitForSingleUse(prefetch,
GetUseTime(uses[0], hlo_live_range));
for (size_t use_index = 1; use_index < uses.size(); ++use_index) {
const HloUse& use = uses[use_index];
int64_t use_time = GetUseTime(use, hlo_live_range);
auto jit_single_use_prefetch = std::make_unique<CopyAllocation>(
prefetch->mutable_prev_allocation(), MemorySpace::kAlternate,
prefetch->chunk(), use_time - 1, use_time, use_time);
jit_single_use_prefetch->set_copy_start_schedule_after(use_time - 1);
jit_single_use_prefetch->AddUse(use);
EnsureParentAllocationIsAvailableForCopy(jit_single_use_prefetch.get());
allocations.push_back(std::move(jit_single_use_prefetch));
}
}
}
}
void MakeEvictionImmediate(CopyAllocation* eviction) {
const Allocation& parent_allocation = eviction->prev_allocation();
eviction->set_start_time(parent_allocation.start_time());
eviction->set_copy_start_schedule_after(parent_allocation.start_time());
eviction->set_copy_done_schedule_before(parent_allocation.start_time() + 1);
eviction->Extend(parent_allocation.start_time() + 1);
}
absl::flat_hash_map<Allocation*, CopyAllocation*> GetEvictionsMap(
std::vector<Allocation*>& allocations) {
absl::flat_hash_map<Allocation*, CopyAllocation*> evictions_map;
for (auto& allocation : allocations) {
if (allocation->is_copy_allocation() && allocation->is_in_default_mem()) {
auto eviction = tensorflow::down_cast<CopyAllocation*>(allocation);
Allocation& parent_allocation = eviction->mutable_prev_allocation();
if (!parent_allocation.is_copy_allocation()) {
evictions_map[&parent_allocation] = eviction;
}
}
}
return evictions_map;
}
void ProcessBuffersProducedInAlternateMemory(
AllocationSequence& allocations, const HloLiveRange& hlo_live_range) {
std::vector<Allocation*> allocations_in_raw_pointers =
GetAllocationSequenceInRawPointers(allocations);
absl::flat_hash_map<Allocation*, CopyAllocation*> evictions_map =
GetEvictionsMap(allocations_in_raw_pointers);
for (auto& [_, eviction] : evictions_map) {
MakeEvictionImmediate(eviction);
}
if (VLOG_IS_ON(2)) {
LOG(INFO) << "AllocationSequence after making spills immediate spills\n";
XLA_LOG_LINES(INFO, AllocationSequenceToString(allocations, true));
}
for (auto allocation : allocations_in_raw_pointers) {
if (!allocation->is_copy_allocation() &&
allocation->is_in_alternate_mem()) {
std::vector<HloUse> uses = allocation->uses();
allocation->clear_uses();
allocation->set_end_time(allocation->start_time() + 1);
for (const HloUse& use : uses) {
int64_t use_time = GetUseTime(use, hlo_live_range);
if (allocation->start_time() + 1 == use_time) {
allocation->AddUse(use);
continue;
}
if (!evictions_map.contains(allocation)) {
auto eviction_unique_ptr = std::make_unique<CopyAllocation>(
*allocation, MemorySpace::kDefault, std::nullopt,
allocation->start_time(), allocation->start_time() + 1,
allocation->start_time() + 1);
eviction_unique_ptr->set_copy_start_schedule_after(
allocation->start_time());
evictions_map[allocation] = eviction_unique_ptr.get();
allocations.push_back(std::move(eviction_unique_ptr));
}
CopyAllocation* eviction = evictions_map[allocation];
auto jit_single_use_prefetch = std::make_unique<CopyAllocation>(
*eviction, MemorySpace::kAlternate, allocation->chunk(),
use_time - 1, use_time, use_time);
jit_single_use_prefetch->set_copy_start_schedule_after(use_time - 1);
jit_single_use_prefetch->AddUse(use);
EnsureParentAllocationIsAvailableForCopy(jit_single_use_prefetch.get());
allocations.push_back(std::move(jit_single_use_prefetch));
}
}
}
}
void TransformAllocationSequenceToSpill(AllocationSequence& allocations,
const HloLiveRange& hlo_live_range) {
if (VLOG_IS_ON(2)) {
LOG(INFO) << "InstructionSchedule before transform\n";
XLA_LOG_LINES(INFO, InstructionScheduleToString(hlo_live_range));
LOG(INFO) << "AllocationSequence before transform\n";
XLA_LOG_LINES(INFO, AllocationSequenceToString(allocations, true));
}
ProcessPrefetchesToAlternateMemory(allocations, hlo_live_range);
if (VLOG_IS_ON(2)) {
LOG(INFO) << "AllocationSequence after processing prefetches\n";
XLA_LOG_LINES(INFO, AllocationSequenceToString(allocations, true));
}
ProcessBuffersProducedInAlternateMemory(allocations, hlo_live_range);
if (VLOG_IS_ON(2)) {
VLOG(2) << "AllocationSequence after processing buffers produced in kAlt\n";
XLA_LOG_LINES(INFO, AllocationSequenceToString(allocations, true));
}
SortAllocationSequence(allocations);
}
}
absl::StatusOr<MemorySpaceAssignment::AsyncCopyStats>
MemorySpaceAssignment::CalculateAsyncCopyStats() const {
AsyncCopyStats stats;
int64_t current_copies = 0;
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloDataflowAnalysis> dataflow_analysis,
HloDataflowAnalysis::Run(*module_));
for (const HloComputation* computation :
module_->MakeNonfusionComputations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kCopyStart ||
(instruction->opcode() == HloOpcode::kAsyncStart &&
instruction->async_wrapped_instruction()->opcode() ==
HloOpcode::kSlice)) {
current_copies++;
} else if (instruction->opcode() == HloOpcode::kCopyDone ||
(instruction->opcode() == HloOpcode::kAsyncDone &&
instruction->async_wrapped_instruction()->opcode() ==
HloOpcode::kSlice)) {
current_copies--;
int64_t size =
options_.size_fn(dataflow_analysis->GetUniqueValueAt(instruction));
if (instruction->shape().layout().memory_space() ==
options_.alternate_memory_space) {
++stats.num_prefetches;
stats.prefetch_bytes += size;
if (instruction->opcode() == HloOpcode::kAsyncDone &&
instruction->async_wrapped_instruction()->opcode() ==
HloOpcode::kSlice) {
++stats.num_sliced_prefetch_slices;
}
} else {
++stats.num_evictions;
stats.eviction_bytes += size;
}
} else if (instruction->IsCustomCall(kConcatBitcastCustomCall)) {
++stats.num_sliced_prefetches;
}
stats.max_outstanding_async_copies =
std::max(stats.max_outstanding_async_copies, current_copies);
}
}
return stats;
}
absl::StatusOr<std::unique_ptr<PresetAssignments>>
MemorySpaceAssignment::Run(HloModule* module,
const HloLiveRange& hlo_live_range,
const HloAliasAnalysis& alias_analysis,
const Options& options) {
CHECK(module->has_schedule());
if (VLOG_IS_ON(3)) {
LOG(INFO) << "Module before memory space assignment: ";
XLA_LOG_LINES(INFO, module->ToString());
LOG(INFO) << "Schedule: " << module->schedule().ToString();
}
MemorySpaceAssignment memory_space_assignment(module, options,
hlo_live_range);
return memory_space_assignment.RunMemorySpaceAssignment(hlo_live_range,
alias_analysis);
}
absl::StatusOr<std::unique_ptr<PresetAssignments>>
MemorySpaceAssignment::RunMemorySpaceAssignment(
const HloLiveRange& hlo_live_range,
const HloAliasAnalysis& alias_analysis) {
TF_RETURN_IF_ERROR(FindAllocationSequence(hlo_live_range, alias_analysis));
std::optional<RuntimeSimulator> runtime_simulator = std::nullopt;
if (options_.cost_analysis) {
runtime_simulator.emplace(options_.cost_analysis,
options_.alternate_memory_space);
float estimated_time =
runtime_simulator->SimulateElapsedTimeWithoutAsyncCopyLikes(
hlo_live_range, allocations_);
VLOG(1) << "Estimated elapsed time without async copies (sec): "
<< estimated_time;
}
TF_RETURN_IF_ERROR(Process(hlo_live_range));
ScheduleAsynchronousCopies();
TF_RETURN_IF_ERROR(SimplifyGraph());
TF_RETURN_IF_ERROR(FixSchedule());
TF_RETURN_IF_ERROR(ExportAndColorBuffers());
if (runtime_simulator.has_value()) {
float estimated_time =
runtime_simulator->SimulateElapsedTime(module_, allocations_);
VLOG(1) << "Estimated elapsed time with async copies (sec): "
<< estimated_time;
}
if (VLOG_IS_ON(3)) {
LOG(INFO) << "Module after memory space assignment: ";
XLA_LOG_LINES(INFO, module_->ToString());
}
TF_CHECK_OK(module_->schedule().Verify());
TF_ASSIGN_OR_RETURN(AsyncCopyStats stats, CalculateAsyncCopyStats());
VLOG(1) << "Maximum number of outstanding async copies/slices: "
<< stats.max_outstanding_async_copies;
VLOG(1) << "Number of prefetches: " << stats.num_prefetches
<< ", in bytes: " << stats.prefetch_bytes;
VLOG(1) << "Number of sliced prefetches: " << stats.num_sliced_prefetches
<< ", consuming number of slices: "
<< stats.num_sliced_prefetch_slices;
VLOG(1) << "Number of evictions: " << stats.num_evictions
<< ", in bytes: " << stats.eviction_bytes;
TF_RETURN_IF_ERROR(VerifyAndExportHeapSimulatorTrace());
return std::move(preset_assignments_);
}
absl::Status MemorySpaceAssignment::FindAllocationSequence(
const HloLiveRange& hlo_live_range,
const HloAliasAnalysis& alias_analysis) {
auto algorithm = std::make_unique<MsaAlgorithm>(
&allocations_, options_, alias_analysis, hlo_live_range);
HeapSimulator::Options heap_simulator_options;
heap_simulator_options.may_reuse_operand_buffers = false;
heap_simulator_options.alloc_constants = true;
TF_RETURN_IF_ERROR(HeapSimulator::Run(std::move(algorithm), *module_,
module_->schedule(), alias_analysis,
options_.size_fn,
heap_simulator_options)
.status());
return absl::OkStatus();
}
absl::Status MemorySpaceAssignment::Process(
const HloLiveRange& hlo_live_range) {
VLOG(1) << "Processing assigned buffers...";
absl::flat_hash_set<const Allocation*> needed_allocations;
if (options_.always_spill_to_default_memory) {
TransformAllocationSequenceToSpill(allocations_, hlo_live_range);
}
for (auto& allocation : allocations_) {
allocation->MarkIfNeeded(needed_allocations);
}
for (auto& allocation : allocations_) {
VLOG(3) << "Processing: " << allocation->ToString();
if (!needed_allocations.contains(allocation.get())) {
VLOG(3) << "Allocation not needed.";
continue;
}
TF_RETURN_IF_ERROR(allocation->Process());
if (allocation->is_scoped_allocation()) {
CHECK(allocation->memory_space() == MemorySpace::kAlternate);
scoped_memory_assignments_.emplace_back(
allocation->defining_position().instruction, allocation->chunk());
alternate_memory_size_ =
std::max(alternate_memory_size_, allocation->chunk().chunk_end());
} else if (allocation->memory_space() == MemorySpace::kAlternate) {
if (allocation->is_sliced_copy_allocation()) {
const SlicedCopyAllocation& sliced_copy_allocation =
*static_cast<const SlicedCopyAllocation*>(allocation.get());
for (const SlicedCopyAllocation::SliceDetail& details :
sliced_copy_allocation.slice_details_sorted_by_start_time()) {
alternate_memory_assignments_.push_back(
{{details.copy_done, {}}, details.slice_decision.chunk});
alternate_memory_size_ = std::max(
alternate_memory_size_, details.slice_decision.chunk.chunk_end());
}
CHECK(
!sliced_copy_allocation.cross_program_prefetch_index().has_value());
}
alternate_memory_assignments_.emplace_back(
allocation->defining_position(), allocation->chunk());
alternate_memory_size_ =
std::max(alternate_memory_size_, allocation->chunk().chunk_end());
if (allocation->cross_program_prefetch_index().has_value()) {
TF_RETURN_IF_ERROR(module_->SetCrossProgramPrefetchOffset(
*allocation->cross_program_prefetch_index(),
allocation->chunk().offset));
}
}
}
absl::flat_hash_set<HloPosition> seen_pinned_positions;
for (auto& allocation : allocations_) {
if (needed_allocations.contains(allocation.get())) {
VLOG(3) << "Post-Processing: " << allocation->ToString();
TF_RETURN_IF_ERROR(allocation->PostProcess());
if (allocation->is_pinned_allocation() &&
!allocation->is_scoped_allocation()) {
auto [it, inserted] =
seen_pinned_positions.insert(allocation->defining_position());
TF_RET_CHECK(inserted)
<< "Multiple pinned allocations defined for position "
<< allocation->defining_position().ToString();
}
}
}
return absl::OkStatus();
}
absl::Status MemorySpaceAssignment::ExportAndColorBuffers() {
VLOG(1) << "Exporting buffers...";
TF_ASSIGN_OR_RETURN(auto alias_analysis, HloAliasAnalysis::Run(module_));
absl::flat_hash_map<int64_t, int64_t> seen_buffer_offsets;
VLOG(3) << "Exported alternate memory allocations:";
for (const auto& position_and_chunk : alternate_memory_assignments_) {
const HloPosition& defining_position = position_and_chunk.first;
const HeapSimulator::Chunk& chunk = position_and_chunk.second;
const HloBuffer& buffer = alias_analysis->GetUniqueBufferAt(
defining_position.instruction, defining_position.index);
auto seen_buffer_offset_it = seen_buffer_offsets.find(buffer.id());
if (seen_buffer_offset_it != seen_buffer_offsets.end()) {
CHECK_EQ(chunk.offset, seen_buffer_offset_it->second)
<< "Mismatch in offset for positions that map to the same value: "
<< buffer.ToString() << ", pos: " << defining_position.ToString();
} else {
VLOG(3) << " [" << chunk.offset << ", " << chunk.size
<< "] : " << defining_position.ToString() << " ("
<< buffer.ToString() << ")";
preset_assignments_->add_chunk(defining_position, chunk);
seen_buffer_offsets[buffer.id()] = chunk.offset;
}
}
VLOG(3) << "Exported scoped allocations in alternate memory:";
for (const auto& instruction_and_chunk : scoped_memory_assignments_) {
HloInstruction* instruction = instruction_and_chunk.first;
const HeapSimulator::Chunk& chunk = instruction_and_chunk.second;
VLOG(3) << " [" << chunk.offset << ", " << chunk.size
<< "] : " << instruction->name();
preset_assignments_->add_scoped_allocation_chunk(instruction, chunk);
}
if (!preset_assignments_->chunks().empty() ||
!preset_assignments_->scoped_allocation_chunks().empty()) {
preset_assignments_
->assignment_information_for_space(options_.alternate_memory_space)
->size = alternate_memory_size_;
}
VLOG(3) << "Exported alternate memory sizes:";
for (auto& pair : preset_assignments_->assignment_informations()) {
VLOG(3) << " space: " << pair.first << ", size: " << pair.second.size;
}
VLOG(1) << "Coloring buffers...";
for (const auto& defining_position_and_chunk :
preset_assignments_->chunks()) {
const HloPosition& defining_position = defining_position_and_chunk.first;
for (auto& buffer : alias_analysis->ComputeBuffersAt(
defining_position.instruction, defining_position.index)) {
for (auto& value : buffer->values()) {
for (auto& position : value->positions()) {
VLOG(4) << "Coloring " << position.ToString();
Shape* shape = ShapeUtil::GetMutableSubshape(
position.instruction->mutable_shape(), position.index);
CHECK(shape->IsArray()) << "Coloring a shape that is not an array: "
<< position.ToString();
shape->mutable_layout()->set_memory_space(
options_.alternate_memory_space);
}
}
}
}
return absl::OkStatus();
}
void MemorySpaceAssignment::RemoveAssignmentForInstruction(
const HloInstruction* instruction) {
auto it = alternate_memory_assignments_.begin();
auto end = alternate_memory_assignments_.end();
while (it != end) {
const HloPosition& position = it->first;
if (position.instruction == instruction) {
VLOG(3) << "Removing instruction from alternate memory assignments.";
if (std::next(it) == end) {
alternate_memory_assignments_.pop_back();
break;
} else {
*it = alternate_memory_assignments_.back();
alternate_memory_assignments_.pop_back();
end = alternate_memory_assignments_.end();
}
} else {
++it;
}
}
}
absl::Status MemorySpaceAssignment::SimplifyGraph() {
VLOG(1) << "Simplifying graph...";
for (HloComputation* computation : module_->MakeNonfusionComputations()) {
if (!computations_in_schedule_.contains(computation)) {
VLOG(4) << "Not simplifying " << computation->name()
<< " because it's not in the schedule.";
continue;
}
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
TF_RETURN_IF_ERROR(instruction->DropAllControlDeps());
}
bool computation_modified = true;
while (computation_modified) {
computation_modified = false;
VLOG(4) << "Running simplify graph loop over " << computation->name();
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (computation->IsSafelyRemovable(instruction) &&
instruction->IsDead() && !instruction->HasSideEffect() &&
instruction->opcode() != HloOpcode::kCopyStart &&
instruction->opcode() != HloOpcode::kCopyDone) {
VLOG(4) << "Instruction removed: " << instruction->ToString();
RemoveAssignmentForInstruction(instruction);
auto instruction_it =
absl::c_find(flattened_instructions_, instruction);
if (instruction_it != flattened_instructions_.end()) {
*instruction_it = nullptr;
}
TF_RETURN_IF_ERROR(computation->RemoveInstruction(instruction));
computation_modified = true;
} else if (instruction->opcode() == HloOpcode::kGetTupleElement) {
HloInstruction* operand = instruction->mutable_operand(0);
if (operand->opcode() == HloOpcode::kTuple) {
HloInstruction* forwarded_instruction =
operand->mutable_operand(instruction->tuple_index());
VLOG(4) << "Replacing uses of " << instruction->ToString()
<< " with " << forwarded_instruction->ToString();
TF_RETURN_IF_ERROR(
instruction->ReplaceAllUsesWith(forwarded_instruction));
computation_modified = true;
}
} else if (instruction->opcode() == HloOpcode::kTuple) {
bool can_replace =
instruction->operand_count() > 0 &&
instruction->operand(0)->opcode() ==
HloOpcode::kGetTupleElement &&
instruction->operand(0)
->operand(0)
->shape()
.tuple_shapes_size() == instruction->operand_count();
for (int operand_number = 0;
operand_number < instruction->operand_count();
++operand_number) {
const HloInstruction* operand =
instruction->operand(operand_number);
if (operand->opcode() != HloOpcode::kGetTupleElement ||
operand->tuple_index() != operand_number ||
operand->operand(0) != instruction->operand(0)->operand(0)) {
can_replace = false;
break;
}
}
if (can_replace) {
HloInstruction* forwarded_instruction =
instruction->mutable_operand(0)->mutable_operand(0);
VLOG(4) << "Replacing uses of " << instruction->ToString()
<< " with " << forwarded_instruction->ToString();
TF_RETURN_IF_ERROR(
instruction->ReplaceAllUsesWith(forwarded_instruction));
computation_modified = true;
}
}
}
}
}
return absl::OkStatus();
}
namespace {
class AsyncCopyStep {
public:
struct StartPhase {
int64_t schedule_after_time;
HloInstruction* instruction;
};
struct DonePhase {
int64_t schedule_before_time;
HloInstruction* instruction;
};
virtual ~AsyncCopyStep() = default;
bool operator<(const AsyncCopyStep& rhs) const {
std::optional<StartPhase> lhs_start_phase = start_phase();
auto lhs_tuple = std::make_tuple(
done_phase().schedule_before_time,
(lhs_start_phase.has_value() ? lhs_start_phase->schedule_after_time
: done_phase().schedule_before_time));
std::optional<StartPhase> rhs_start_phase = rhs.start_phase();
auto rhs_tuple = std::make_tuple(
rhs.done_phase().schedule_before_time,
(rhs_start_phase.has_value() ? rhs_start_phase->schedule_after_time
: rhs.done_phase().schedule_before_time));
return lhs_tuple < rhs_tuple;
}
virtual HloPosition defining_position() const = 0;
virtual std::optional<StartPhase> start_phase() const = 0;
virtual void set_start_phase_schedule_after_time(int64_t schedule_after) = 0;
virtual DonePhase done_phase() const = 0;
protected:
AsyncCopyStep() = default;
};
class AsyncCopyStepForCopyAllocation : public AsyncCopyStep {
public:
explicit AsyncCopyStepForCopyAllocation(CopyAllocation* copy_allocation)
: AsyncCopyStep(), copy_allocation_(copy_allocation) {}
~AsyncCopyStepForCopyAllocation() override = default;
HloPosition defining_position() const override {
return copy_allocation_->defining_position();
}
std::optional<StartPhase> start_phase() const override {
StartPhase phase{copy_allocation_->copy_start_schedule_after(),
copy_allocation_->copy_start()};
return phase;
}
void set_start_phase_schedule_after_time(int64_t schedule_after) override {
copy_allocation_->set_copy_start_schedule_after(schedule_after);
}
DonePhase done_phase() const override {
return {copy_allocation_->copy_done_schedule_before(),
copy_allocation_->copy_done()};
}
private:
CopyAllocation* copy_allocation_ = nullptr;
};
class AsyncCopyStepForSlice : public AsyncCopyStep {
public:
AsyncCopyStepForSlice(SlicedCopyAllocation* sliced_copy_allocation,
size_t slice_index)
: AsyncCopyStep(),
sliced_copy_allocation_(sliced_copy_allocation),
slice_index_(slice_index) {}
~AsyncCopyStepForSlice() override = default;
HloPosition defining_position() const override {
return sliced_copy_allocation_->defining_position();
}
std::optional<StartPhase> start_phase() const override {
const SlicedCopyAllocation::SliceDetail& slice_details =
sliced_copy_allocation_
->slice_details_sorted_by_start_time()[slice_index_];
StartPhase phase{slice_details.copy_start_after_time,
slice_details.copy_start};
return phase;
}
void set_start_phase_schedule_after_time(int64_t schedule_after) override {
sliced_copy_allocation_
->mutable_slice_details_sorted_by_start_time()[slice_index_]
.copy_start_after_time = schedule_after;
}
DonePhase done_phase() const override {
const SlicedCopyAllocation::SliceDetail& slice_details =
sliced_copy_allocation_
->slice_details_sorted_by_start_time()[slice_index_];
DonePhase phase{slice_details.copy_done_before_time,
slice_details.copy_done};
return phase;
}
private:
SlicedCopyAllocation* sliced_copy_allocation_ = nullptr;
size_t slice_index_;
};
class AsyncCopyStepForSliceConcat : public AsyncCopyStep {
public:
explicit AsyncCopyStepForSliceConcat(
SlicedCopyAllocation* sliced_copy_allocation)
: AsyncCopyStep(), sliced_copy_allocation_(sliced_copy_allocation) {}
~AsyncCopyStepForSliceConcat() override = default;
HloPosition defining_position() const override {
return sliced_copy_allocation_->defining_position();
}
std::optional<StartPhase> start_phase() const override {
return std::nullopt;
}
void set_start_phase_schedule_after_time(int64_t schedule_after) override {}
DonePhase done_phase() const override {
return {sliced_copy_allocation_->earliest_available_time(),
sliced_copy_allocation_->concat()};
}
private:
SlicedCopyAllocation* sliced_copy_allocation_ = nullptr;
};
}
void MemorySpaceAssignment::ScheduleAsynchronousCopies() {
VLOG(1) << "Scheduling asynchronous copies...";
for (MemorySpace memory_space :
{MemorySpace::kDefault, MemorySpace::kAlternate}) {
std::vector<std::unique_ptr<AsyncCopyStep>> async_copy_steps;
for (auto& allocation : allocations_) {
if (allocation->memory_space() != memory_space) {
continue;
}
if (allocation->is_copy_allocation()) {
auto copy_allocation = static_cast<CopyAllocation*>(allocation.get());
async_copy_steps.push_back(
std::make_unique<AsyncCopyStepForCopyAllocation>(copy_allocation));
} else if (allocation->is_sliced_copy_allocation()) {
auto sliced_copy_allocation =
static_cast<SlicedCopyAllocation*>(allocation.get());
for (int i = 0; i < sliced_copy_allocation
->mutable_slice_details_sorted_by_start_time()
.size();
++i) {
async_copy_steps.push_back(std::make_unique<AsyncCopyStepForSlice>(
sliced_copy_allocation, i));
}
async_copy_steps.push_back(
std::make_unique<AsyncCopyStepForSliceConcat>(
sliced_copy_allocation));
}
}
absl::c_stable_sort(
async_copy_steps,
[](const std::unique_ptr<AsyncCopyStep>& lhs,
const std::unique_ptr<AsyncCopyStep>& rhs) { return *lhs < *rhs; });
for (std::unique_ptr<AsyncCopyStep>& async_copy_step : async_copy_steps) {
std::optional<AsyncCopyStep::StartPhase> start_phase =
async_copy_step->start_phase();
if (start_phase.has_value()) {
int64_t copy_start_schedule_after = start_phase->schedule_after_time;
while (
async_copy_step->defining_position().instruction->parent() !=
flattened_instructions_[
std::max<int64_t>(0, copy_start_schedule_after)]
->parent()) {
VLOG(4) << "Delaying CopyStart (" << copy_start_schedule_after
<< " to " << (copy_start_schedule_after + 1) << ") for "
<< start_phase->instruction->ToString()
<< " because it is not in the correct computation.";
async_copy_step->set_start_phase_schedule_after_time(
++copy_start_schedule_after);
}
start_phase = async_copy_step->start_phase();
schedule_after_[start_phase->schedule_after_time].push_back(
start_phase->instruction);
}
AsyncCopyStep::DonePhase done_phase = async_copy_step->done_phase();
schedule_before_[done_phase.schedule_before_time].push_back(
done_phase.instruction);
}
}
}
absl::Status MemorySpaceAssignment::FixSchedule() {
VLOG(1) << "Fixing schedule...";
TF_RET_CHECK(module_->has_schedule());
HloSchedule& schedule = module_->schedule();
for (const HloComputation* computation :
module_->MakeNonfusionComputations()) {
if (!computations_in_schedule_.contains(computation)) {
if (computation->IsAsyncComputation()) {
VLOG(4) << "Created a dummy schedule for async computation "
<< computation->name();
schedule.GetOrCreateSequence(computation);
continue;
}
VLOG(4) << "Not scheduling " << computation->name()
<< " because it's not in the schedule.";
continue;
}
TF_RET_CHECK(schedule.is_computation_scheduled(computation));
HloInstructionSequence new_sequence;
absl::flat_hash_set<HloInstruction*> inserted_instructions;
VLOG(4) << "Scheduling: " << computation->ToString();
for (int64_t instruction_index = -1;; ++instruction_index) {
auto insts_before_iter = schedule_before_.find(instruction_index);
if (insts_before_iter != schedule_before_.end()) {
for (HloInstruction* new_instruction : insts_before_iter->second) {
if (new_instruction->parent() == computation) {
VLOG(4) << "before " << instruction_index << ": "
<< new_instruction->name();
TF_RETURN_IF_ERROR(InsertInstructionAndEnsureOperandsInserted(
new_instruction, &new_sequence, &inserted_instructions));
}
}
}
if (instruction_index != -1) {
if (instruction_index >= flattened_instructions_.size()) {
break;
}
HloInstruction* instruction =
flattened_instructions_[instruction_index];
if (instruction != nullptr && instruction->parent() == computation &&
instruction->opcode() != HloOpcode::kBitcast &&
instruction->opcode() != HloOpcode::kTuple &&
!inserted_instructions.contains(instruction)) {
VLOG(4) << "inst " << instruction_index << ": "
<< instruction->name();
TF_RETURN_IF_ERROR(InsertInstructionAndEnsureOperandsInserted(
instruction, &new_sequence, &inserted_instructions));
}
}
auto insts_after_iter = schedule_after_.find(instruction_index);
if (insts_after_iter != schedule_after_.end()) {
for (HloInstruction* new_instruction : insts_after_iter->second) {
if (new_instruction->parent() == computation) {
VLOG(4) << "after " << instruction_index << ": "
<< new_instruction->name();
TF_RETURN_IF_ERROR(InsertInstructionAndEnsureOperandsInserted(
new_instruction, &new_sequence, &inserted_instructions));
}
}
}
}
TF_RETURN_IF_ERROR(EnsureInstructionAndOperandsInserted(
computation->root_instruction(), &new_sequence,
&inserted_instructions));
CHECK_EQ(new_sequence.size(), computation->instruction_count())
<< "New sequence for computation " << computation->name() << " has "
<< new_sequence.size() << " instructions, expects "
<< computation->instruction_count() << ".";
schedule.set_sequence(computation, new_sequence);
}
TF_RETURN_IF_ERROR(schedule.Update());
return absl::OkStatus();
}
absl::Status MemorySpaceAssignment::VerifyAndExportHeapSimulatorTrace() {
VLOG(1) << "Verifying...";
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module_));
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloLiveRange> hlo_live_range,
HloLiveRange::Run(module_->schedule(), *alias_analysis,
module_->entry_computation()));
BufferIntervalTree interval_tree;
absl::flat_hash_set<int64_t> seen_buffers;
std::map<std::tuple<int64_t, bool, int64_t>,
std::tuple<const HloValue*, HeapSimulator::Chunk,
HeapSimulatorTrace::Event::Kind>>
events;
auto add_allocation_and_verify = [&](int64_t start_time, int64_t end_time,
const HeapSimulator::Chunk& chunk,
const HloValue* value) -> absl::Status {
events[std::make_tuple(start_time, false, value->id())] =
std::make_tuple(value, chunk, HeapSimulatorTrace::Event::ALLOC);
events[std::make_tuple(end_time, true, value->id())] =
std::make_tuple(value, chunk, HeapSimulatorTrace::Event::FREE);
for (const HeapSimulator::Chunk& overlapping_chunk :
interval_tree.ChunksOverlappingInTime(start_time, end_time - 1)) {
if (chunk.OverlapsWith(overlapping_chunk)) {
return Internal(
("Value %s (%d, %d) off: %d size: %d overlaps with another chunk"
" off: %d size: %d"),
value->ToShortString(), start_time, end_time, chunk.offset,
chunk.size, overlapping_chunk.offset, overlapping_chunk.size);
}
}
interval_tree.Add(start_time, end_time - 1, chunk);
return absl::OkStatus();
};
for (const HloComputation* computation :
module_->MakeNonfusionComputations()) {
for (const HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kCopyStart) {
int64_t from_memory_space =
ShapeUtil::GetSubshape(instruction->shape(), {1})
.layout()
.memory_space();
int64_t to_memory_space =
ShapeUtil::GetSubshape(instruction->shape(), {0})
.layout()
.memory_space();
CHECK_NE(from_memory_space, to_memory_space)
<< "Asynchronous copy to the same memory space: "
<< instruction->ToString();
}
}
}
for (const auto& position_and_chunk : preset_assignments_->chunks()) {
const HloPosition& position = position_and_chunk.first;
const HeapSimulator::Chunk& chunk = position_and_chunk.second;
const HloBuffer& buffer =
alias_analysis->GetUniqueBufferAt(position.instruction, position.index);
CHECK(!seen_buffers.contains(buffer.id()))
<< "Multiple preset assignments for the same buffer: "
<< buffer.ToString() << ", pos: " << position.ToString()
<< ", off: " << chunk.offset << ", size: " << chunk.size;
seen_buffers.insert(buffer.id());
for (const HloValue* value : buffer.values()) {
const HloLiveRange::TimeBound& time_bound =
hlo_live_range->buffer_live_ranges().at(value);
const HloInstruction* last_use_instruction = nullptr;
int64_t last_use_time = time_bound.start;
for (const HloUse& use : value->GetUses()) {
int64_t use_time =
hlo_live_range->instruction_schedule().at(use.instruction);
if (use_time > last_use_time) {
last_use_time = use_time;
last_use_instruction = use.instruction;
}
}
std::function<absl::Status(const HloInstruction*, int64_t, int64_t,
absl::string_view)>
split_conditional_buffer;
split_conditional_buffer = [&](const HloInstruction* use_instruction,
int64_t start_time, int64_t end_time,
absl::string_view indent_string) {
VLOG(3) << indent_string
<< "Splitting conditional buffer: " << buffer.ToString()
<< " value: " << value->ToShortString() << ": (" << start_time
<< ", " << end_time << ") off: " << chunk.offset
<< ", size: " << chunk.size;
int64_t earliest_computation_start_time = end_time;
for (const HloComputation* called_computation :
use_instruction->called_computations()) {
int64_t computation_start_time =
hlo_live_range->computation_span_times()
.at(called_computation)
.start;
earliest_computation_start_time =
std::min(earliest_computation_start_time, computation_start_time);
int64_t last_use_time = -1;
const HloInstruction* last_use_instruction = nullptr;
for (const HloUse& use : value->GetUses()) {
int64_t use_time =
hlo_live_range->instruction_schedule().at(use.instruction);
if (use.instruction->parent() == called_computation &&
use_time > last_use_time) {
last_use_time = use_time;
last_use_instruction = use.instruction;
}
}
if (last_use_time != -1) {
VLOG(3) << indent_string
<< " computation: " << called_computation->name() << ": ("
<< computation_start_time << ", " << last_use_time << ")";
CHECK(last_use_instruction);
last_use_time = std::min(last_use_time, end_time);
if (last_use_instruction->opcode() == HloOpcode::kConditional) {
TF_RETURN_IF_ERROR(split_conditional_buffer(
last_use_instruction, computation_start_time, last_use_time,
absl::StrCat(indent_string, " ")));
} else {
TF_RETURN_IF_ERROR(add_allocation_and_verify(
computation_start_time, last_use_time, chunk, value));
}
}
}
VLOG(3) << indent_string << " from beginning until first computation: ("
<< start_time << ", " << (earliest_computation_start_time - 1)
<< ")";
TF_RETURN_IF_ERROR(add_allocation_and_verify(
start_time, earliest_computation_start_time - 1, chunk, value));
return absl::OkStatus();
};
if (last_use_instruction &&
last_use_instruction->opcode() == HloOpcode::kConditional) {
TF_RETURN_IF_ERROR(split_conditional_buffer(
last_use_instruction, time_bound.start, time_bound.end, " "));
} else if (!value->GetUses().empty()) {
last_use_time = std::min(last_use_time, time_bound.end);
VLOG(3) << " buffer: " << buffer.ToString()
<< " value: " << value->ToShortString() << ": ("
<< time_bound.start << ", " << last_use_time
<< ") off: " << chunk.offset << ", size: " << chunk.size;
TF_RETURN_IF_ERROR(add_allocation_and_verify(
time_bound.start, last_use_time, chunk, value));
}
}
}
HeapSimulatorTrace* heap_trace =
&preset_assignments_
->assignment_information_for_space(options_.alternate_memory_space)
->heap_simulator_trace;
int64_t memory_usage = 0;
int64_t max_memory_usage = 0;
int64_t prev_time = 0;
int64_t prev_memory_usage = 0;
for (const auto& event : events) {
int64_t time;
bool is_free;
int64_t buffer_id;
std::tie(time, is_free, buffer_id) = event.first;
const HloValue* value;
HeapSimulator::Chunk chunk;
HeapSimulatorTrace::Event::Kind kind;
std::tie(value, chunk, kind) = event.second;
HeapSimulatorTrace::Event* heap_trace_event = heap_trace->add_events();
heap_trace_event->set_kind(kind);
heap_trace_event->set_buffer_id(buffer_id);
*heap_trace_event->mutable_instruction_name() =
std::string(value->instruction()->name());
*heap_trace_event->mutable_computation_name() =
std::string(value->instruction()->parent()->name());
if (prev_time != time) {
VLOG(2) << "Memory usage: " << std::max(memory_usage, prev_memory_usage)
<< " at time: " << prev_time << " ("
<< hlo_live_range->flattened_instruction_sequence()
.instructions()
.at(prev_time)
->name()
<< ")";
prev_time = time;
prev_memory_usage = memory_usage;
}
if (kind == HeapSimulatorTrace::Event::ALLOC) {
memory_usage += chunk.size;
} else {
CHECK_EQ(kind, HeapSimulatorTrace::Event::FREE);
memory_usage -= chunk.size;
}
prev_memory_usage = std::max(prev_memory_usage, memory_usage);
max_memory_usage = std::max(max_memory_usage, memory_usage);
VLOG(4) << "Memory usage: " << memory_usage << " at time: " << time;
}
VLOG(1) << "Max memory usage ignoring fragmentation: " << max_memory_usage;
return absl::OkStatus();
}
}
} | #include "xla/service/memory_space_assignment/memory_space_assignment.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
#include <ostream>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/service/buffer_value.h"
#include "xla/service/heap_simulator/allocation_block.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/service/instruction_hoister.h"
#include "xla/service/memory_space_assignment/algorithm.h"
#include "xla/service/memory_space_assignment/allocation.h"
#include "xla/service/memory_space_assignment/buffer_interval_comparator.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.pb.h"
#include "xla/service/memory_space_assignment/options.h"
#include "xla/service/memory_space_assignment/prefetch_interval_picker.h"
#include "xla/service/memory_space_assignment/repacking.h"
#include "xla/service/memory_space_assignment/slice.h"
#include "xla/service/memory_space_assignment/testing_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace memory_space_assignment {
namespace {
namespace op = xla::testing::opcode_matchers;
using Chunk = HeapSimulator::Chunk;
using ::testing::_;
using ::testing::Return;
using ::testing::UnorderedElementsAre;
constexpr int64_t kPointerSize = 8;
constexpr float kAsyncCopyBandwidth = 100;
constexpr float kAlternateMemBandwidth = 1000;
constexpr float kBytesPerSecond = 100;
constexpr float kFlopsPerSecond = 1000;
constexpr float kTranscendentalsPerSecond = 10;
int64_t ShapeSize(const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
}
int64_t SizeFunction(const BufferValue& value) {
return ShapeSize(value.shape());
}
int64_t ReservedScopedMemoryFn(
const HloInstruction* instruction,
const absl::flat_hash_set<std::pair<int, ShapeIndex>>&
operands_in_alternate_memory,
const absl::flat_hash_set<ShapeIndex>& outputs_in_alternate_memory) {
return 0;
}
class TestBufferIntervalComparator : public BufferIntervalComparator {
public:
explicit TestBufferIntervalComparator(MsaBufferIntervalCompare compare_method)
: BufferIntervalComparator(), compare_method_(compare_method) {}
~TestBufferIntervalComparator() override = default;
std::string DescribeComparisonCriteria() const override {
return "internal to test";
}
std::string CriteriaToString(
const MsaBufferInterval& buffer_interval) override {
return "internal to test";
}
bool LessThan(const MsaBufferInterval& lhs,
const MsaBufferInterval& rhs) override {
return compare_method_(lhs, rhs);
}
private:
MsaBufferIntervalCompare compare_method_;
};
class MemorySpaceAssignmentTestBase : public HloTestBase {
protected:
const int64_t kDefaultMemorySpace = 0;
const int64_t kAlternateMemorySpace = 1;
HloCostAnalysis::Options DefaultHloCostAnalysisOptions() {
HloCostAnalysis::Options options;
options.shape_size = ShapeSize;
options.set_flops_per_second(kFlopsPerSecond);
options.set_bytes_per_second(kBytesPerSecond);
options.set_transcendentals_per_second(kTranscendentalsPerSecond);
return options;
}
Options DefaultMemorySpaceOptions() {
Options options;
options.max_size_in_bytes = 128;
options.alignment_in_bytes = 8;
options.verify = true;
options.alternate_memory_space = kAlternateMemorySpace;
options.max_outstanding_prefetches = -1;
options.max_outstanding_evictions = -1;
return options;
}
CostAnalysisOptions DefaultCostAnalysisOptions() {
CostAnalysisOptions options;
options.async_copy_bandwidth_bytes_per_second = kAsyncCopyBandwidth;
options.alternate_mem_bandwidth_bytes_per_second = kAlternateMemBandwidth;
return options;
}
Options UpdateMaxAsyncCopies(Options options, int64_t max_async_copies) {
options.max_outstanding_prefetches = max_async_copies;
options.max_outstanding_evictions = max_async_copies;
return options;
}
std::unique_ptr<PresetAssignments> AssignMemorySpaceUsingCostAnalysis(
HloModule* module,
std::optional<Options> memory_space_options_override = std::nullopt,
std::optional<CostAnalysisOptions> cost_analysis_options_override =
std::nullopt,
std::optional<HloCostAnalysis::Options> hlo_cost_options_override =
std::nullopt,
std::optional<MsaSortOrderOverrides> optional_msa_sort_order_overrides =
std::nullopt) {
HloCostAnalysis::Options hlo_cost_options = DefaultHloCostAnalysisOptions();
if (hlo_cost_options_override) {
hlo_cost_options = *hlo_cost_options_override;
}
HloCostAnalysis hlo_cost_analysis(hlo_cost_options);
for (HloComputation* computation : module->MakeNonfusionComputations()) {
TF_CHECK_OK(computation->Accept(&hlo_cost_analysis));
}
auto alias_analysis = HloAliasAnalysis::Run(module).value();
Options memory_space_options = DefaultMemorySpaceOptions();
if (memory_space_options_override) {
memory_space_options = *memory_space_options_override;
}
CostAnalysisOptions cost_analysis_options = DefaultCostAnalysisOptions();
if (cost_analysis_options_override) {
cost_analysis_options = *cost_analysis_options_override;
}
HloCostAnalysisCosts hlo_cost_analysis_costs(hlo_cost_analysis);
auto cost_analysis = CostAnalysis::Create(hlo_cost_analysis_costs,
cost_analysis_options, *module)
.value();
memory_space_options.cost_analysis = cost_analysis.get();
CostAnalysisPrefetchIntervalPicker prefetch_interval_picker(
CostAnalysisPrefetchIntervalPicker(
*cost_analysis, 0.8,
1.5,
10.0,
memory_space_options.max_size_in_bytes));
MsaSortOrderOverrides msa_sort_order_overrides;
if (optional_msa_sort_order_overrides.has_value()) {
msa_sort_order_overrides = optional_msa_sort_order_overrides.value();
}
MemoryBoundednessBufferIntervalComparator comparator(
*cost_analysis, &cache_, msa_sort_order_overrides);
return AssignMemorySpace(
module, memory_space_options,
[&comparator](const MsaBufferInterval& lhs,
const MsaBufferInterval& rhs) {
return comparator.LessThan(lhs, rhs);
},
&prefetch_interval_picker);
}
std::unique_ptr<PresetAssignments> AssignMemorySpace(
HloModule* module, std::optional<Options> options_override = std::nullopt,
int64_t max_prefetch_interval = 10, int64_t min_prefetch_interval = 2) {
InstructionHoister instruction_hoister;
TF_CHECK_OK(instruction_hoister.Run(module).status());
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(
min_prefetch_interval, max_prefetch_interval);
return AssignMemorySpace(module, options_override,
{},
&prefetch_interval_picker);
}
std::unique_ptr<PresetAssignments> AssignMemorySpace(
HloModule* module, std::optional<Options> options_override,
std::optional<MsaBufferIntervalCompare> buffer_interval_compare,
PrefetchIntervalPicker* prefetch_interval_picker) {
auto status_or = AssignMemorySpaceAndReturnStatus(module, options_override,
buffer_interval_compare,
prefetch_interval_picker);
TF_EXPECT_OK(status_or.status());
return std::move(status_or.value());
}
absl::StatusOr<std::unique_ptr<PresetAssignments>>
AssignMemorySpaceAndReturnStatus(
HloModule* module, std::optional<Options> options_override,
std::optional<MsaBufferIntervalCompare> buffer_interval_compare,
PrefetchIntervalPicker* prefetch_interval_picker) {
auto size_fn = [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), 8);
};
auto is_allowed_in_alternate_mem = [](const HloValue& value) {
HloInstruction* instruction = value.instruction();
HloComputation* computation = instruction->parent();
bool in_entry_computation =
(computation == computation->parent()->entry_computation());
if (in_entry_computation &&
instruction->opcode() == HloOpcode::kParameter) {
return false;
}
return true;
};
bool check_parameters_in_default_memory = true;
for (const HloInstruction* parameter :
module->entry_computation()->parameter_instructions()) {
ShapeUtil::ForEachSubshape(
parameter->shape(),
[&](const Shape& subshape, const ShapeIndex& ) {
if (subshape.has_layout() &&
subshape.layout().memory_space() == kAlternateMemorySpace) {
check_parameters_in_default_memory = false;
}
});
}
Options options = DefaultMemorySpaceOptions();
if (options_override) {
options = *options_override;
}
std::unique_ptr<TestBufferIntervalComparator> test_comparator;
if (buffer_interval_compare.has_value()) {
test_comparator = std::make_unique<TestBufferIntervalComparator>(
*buffer_interval_compare);
options.buffer_interval_comparator = test_comparator.get();
}
options.prefetch_interval_picker = prefetch_interval_picker;
options.size_fn = size_fn;
if (options.is_allowed_in_alternate_mem_fn == nullptr) {
options.is_allowed_in_alternate_mem_fn = is_allowed_in_alternate_mem;
}
TF_ASSIGN_OR_RETURN(auto alias_analysis, HloAliasAnalysis::Run(module));
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloLiveRange> hlo_live_range,
HloLiveRange::Run(module->schedule(), *alias_analysis,
module->entry_computation()));
TF_ASSIGN_OR_RETURN(std::unique_ptr<PresetAssignments> preset_assignments,
MemorySpaceAssignment::Run(module, *hlo_live_range,
*alias_analysis, options));
if (check_parameters_in_default_memory) {
CheckParametersInDefaultMemory(module);
}
CheckRootInDefaultMemory(module);
CheckPresetAssignments(preset_assignments.get());
return preset_assignments;
}
void CheckPresetAssignments(const PresetAssignments* preset_assignments) {
std::set<HloPosition> positions_in_preset_assignments;
for (auto& position_and_chunk : preset_assignments->chunks()) {
HloPosition position = position_and_chunk.first;
EXPECT_EQ(positions_in_preset_assignments.find(position),
positions_in_preset_assignments.end());
positions_in_preset_assignments.insert(position);
const Shape& subshape =
ShapeUtil::GetSubshape(position.instruction->shape(), position.index);
EXPECT_EQ(subshape.layout().memory_space(), kAlternateMemorySpace)
<< "Exported position is not in alternate mem: "
<< position.ToString();
}
}
void CheckParametersInDefaultMemory(const HloModule* module) {
const HloComputation* entry_computation = module->entry_computation();
for (const HloInstruction* parameter :
entry_computation->parameter_instructions()) {
ShapeUtil::ForEachSubshape(
parameter->shape(),
[&](const Shape& subshape, const ShapeIndex& ) {
if (subshape.has_layout()) {
EXPECT_NE(subshape.layout().memory_space(), kAlternateMemorySpace)
<< "Parameter not in default memory: "
<< parameter->ToString();
}
});
}
}
void CheckRootInDefaultMemory(const HloModule* module) {
const HloInstruction* root =
module->entry_computation()->root_instruction();
if (root->shape().IsArray()) {
EXPECT_EQ(root->shape().layout().memory_space(), kDefaultMemorySpace);
}
}
struct OutstandingAsyncCopies {
int64_t max_copies;
int64_t max_prefetches;
int64_t max_evictions;
};
OutstandingAsyncCopies CountMaximumOutstandingAsyncCopies(
const HloModule& module) {
OutstandingAsyncCopies copies{0, 0, 0};
int64_t current_copies = 0;
int64_t current_prefetches = 0;
int64_t current_evictions = 0;
for (HloInstruction* instruction : module.schedule()
.sequence(module.entry_computation())
.instructions()) {
if (instruction->opcode() == HloOpcode::kCopyStart) {
current_copies++;
if (ShapeUtil::GetSubshape(instruction->shape(), {0})
.layout()
.memory_space() == kAlternateMemorySpace) {
current_prefetches++;
} else {
current_evictions++;
}
} else if (instruction->opcode() == HloOpcode::kCopyDone) {
current_copies--;
if (instruction->shape().layout().memory_space() ==
kAlternateMemorySpace) {
current_prefetches--;
} else {
current_evictions--;
}
}
copies.max_copies = std::max(copies.max_copies, current_copies);
copies.max_prefetches =
std::max(copies.max_prefetches, current_prefetches);
copies.max_prefetches = std::max(copies.max_evictions, current_evictions);
}
return copies;
}
int64_t GetAlternateMemoryOffset(const PresetAssignments& preset_assignments,
const HloInstruction* instruction,
const ShapeIndex& index = {}) const {
const HloModule* module = instruction->GetModule();
auto alias_analysis = HloAliasAnalysis::Run(module).value();
HloBuffer& buffer = alias_analysis->GetUniqueBufferAt(instruction, index);
for (auto& pos_and_chunk : preset_assignments.chunks()) {
for (auto& value : buffer.values()) {
if (pos_and_chunk.first == value->defining_position()) {
return pos_and_chunk.second.offset;
}
}
}
return -1;
}
std::unique_ptr<HloModule> CreateEvictAndPrefetchModule() {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* tanh = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, tanh));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kSubtract, p0, p1));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, p0, p1));
HloInstruction* d = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kSubtract, p0, p1));
HloInstruction* e = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, a, b));
HloInstruction* f = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, a, c));
HloInstruction* g = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, a, d));
HloInstruction* h = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, b, c));
HloInstruction* i = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, b, d));
HloInstruction* j = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, c, d));
HloInstruction* k = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, e, f));
HloInstruction* l = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, g, h));
HloInstruction* m = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, i, j));
HloInstruction* n = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, k, l));
HloInstruction* o = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, n, m));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, o, tanh));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, tanh, a, b, c, d, e, f, g, h, i,
j, k, l, m, n, o, add});
TF_CHECK_OK(module->set_schedule(schedule));
return module;
}
CostAnalysis::Cache cache_;
};
using MemorySpaceAssignmentTest = MemorySpaceAssignmentTestBase;
TEST_F(MemorySpaceAssignmentTest, ParameterOnly) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
}
TEST_F(MemorySpaceAssignmentTest, Simple) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, p1));
HloInstruction* sub = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kSubtract, p0, p1));
HloInstruction* mul = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, add, sub));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, add, sub, mul});
TF_CHECK_OK(module->set_schedule(schedule));
auto preset_assignments = AssignMemorySpace(module.get());
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0},
{},
1,
0, kAlternateMemorySpace);
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
EXPECT_THAT(mul, op::ShapeWithLayout(shape));
EXPECT_THAT(add, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(sub, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_EQ(preset_assignments->chunks().size(), 3);
EXPECT_EQ(preset_assignments->assignment_informations().size(), 1);
EXPECT_NE(preset_assignments->chunks()[0].second.offset,
preset_assignments->chunks()[1].second.offset);
}
TEST_F(MemorySpaceAssignmentTest, NegateChain) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, negate2,
negate3, negate4, negate5, negate6, add});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
EXPECT_THAT(add, op::Add(op::Negate(), op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::Parameter(1))));
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0},
{},
1,
0, kAlternateMemorySpace);
EXPECT_THAT(negate0, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate1, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate2, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate3, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate4, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate5, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate6, op::ShapeWithLayout(shape_in_alternate_mem));
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
EXPECT_THAT(sequence.instructions()[0], op::Parameter(0));
EXPECT_THAT(sequence.instructions()[1], op::Parameter(1));
EXPECT_THAT(sequence.instructions()[2], op::CopyStart());
EXPECT_THAT(sequence.instructions()[10], op::CopyDone());
}
TEST_F(MemorySpaceAssignmentTest,
SyncCopyReplacementRedundantCopyAfterPrefetch) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[2,3]{1,0} parameter(0)
p1 = f32[2,3]{1,0} parameter(1)
negate0 = f32[2,3]{1,0} negate(p1)
negate1 = f32[2,3]{1,0} negate(negate0)
negate2 = f32[2,3]{1,0} negate(negate1)
negate3 = f32[2,3]{1,0} negate(negate2)
negate4 = f32[2,3]{1,0} negate(negate3)
negate5 = f32[2,3]{1,0} negate(negate4)
negate6 = f32[2,3]{1,0} negate(negate5)
negate7 = f32[2,3]{1,0} negate(negate6)
p0_copy = f32[2,3]{1,0} copy(p0)
ROOT add0 = f32[2,3]{1,0} add(p0_copy, negate7)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.enable_sync_copy_replacement = true;
AssignMemorySpace(module.get(), options);
HloInstruction* add0 = FindInstruction(module.get(), "add0");
ASSERT_NE(add0, nullptr);
HloInstruction* p0 = FindInstruction(module.get(), "p0");
ASSERT_NE(p0, nullptr);
EXPECT_THAT(add0->operand(0),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace, p0));
}
TEST_F(MemorySpaceAssignmentTest,
SyncCopyReplacementWouldNeedMoreThanOneAsyncCopy) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[2,3]{1,0} parameter(0)
p1 = f32[2,3]{1,0} parameter(1)
negate0 = f32[2,3]{1,0} negate(p1)
negate1 = f32[2,3]{1,0} negate(negate0)
negate2 = f32[2,3]{1,0} negate(negate1)
negate3 = f32[2,3]{1,0} negate(negate2)
negate4 = f32[2,3]{1,0} negate(negate3)
negate5 = f32[2,3]{1,0} negate(negate4)
negate6 = f32[2,3]{1,0} negate(negate5)
negate7 = f32[2,3]{1,0} negate(negate6)
p0_copy = f32[2,3]{1,0} copy(p0)
ROOT tuple0 = tuple(negate7, p0, p0_copy)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.enable_sync_copy_replacement = true;
AssignMemorySpace(module.get(), options);
HloInstruction* tuple0 = FindInstruction(module.get(), "tuple0");
ASSERT_NE(tuple0->operand(1), tuple0->operand(2));
}
TEST_F(MemorySpaceAssignmentTest, SyncCopyReplacementOperandHasMultipleUses) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[2,3]{1,0} parameter(0)
p1 = f32[2,3]{1,0} parameter(1)
negate0 = f32[2,3]{1,0} negate(p1)
negate1 = f32[2,3]{1,0} negate(negate0)
negate2 = f32[2,3]{1,0} negate(negate1)
negate3 = f32[2,3]{1,0} negate(negate2)
negate4 = f32[2,3]{1,0} negate(negate3)
negate5 = f32[2,3]{1,0} negate(negate4)
negate6 = f32[2,3]{1,0} negate(negate5)
negate7 = f32[2,3]{1,0} negate(negate6)
p0_copy = f32[2,3]{1,0} copy(p0)
add0 = add(p0_copy, p0)
ROOT tuple = tuple(negate7, add0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.enable_sync_copy_replacement = true;
AssignMemorySpace(module.get(), options);
HloInstruction* add0 = FindInstruction(module.get(), "add0");
ASSERT_EQ(add0->operand(0), add0->operand(1));
}
TEST_F(MemorySpaceAssignmentTest, AlwaysSpillJitPrefetchTest) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[2,3]{1,0} parameter(0)
p1 = f32[2,3]{1,0} parameter(1)
negate0 = f32[2,3]{1,0} negate(p0)
negate1 = f32[2,3]{1,0} negate(negate0)
negate2 = f32[2,3]{1,0} negate(negate1)
negate3 = f32[2,3]{1,0} negate(negate2)
negate4 = f32[2,3]{1,0} negate(negate3)
negate5 = f32[2,3]{1,0} negate(negate4)
negate6 = f32[2,3]{1,0} negate(negate5)
ROOT add = f32[2,3]{1,0} add(negate6, p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.always_spill_to_default_memory = true;
AssignMemorySpace(module.get(), options);
const HloInstructionSequence& sequence =
module->schedule().sequence(module->entry_computation());
for (int i = 0; i < sequence.instructions().size(); ++i) {
VLOG(2) << i << " " << sequence.instructions()[i]->ToString();
}
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module.get()));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloLiveRange> live_range,
HloLiveRange::Run(module->schedule(), *alias_analysis,
module->entry_computation()));
const HloInstruction* add = FindInstruction(module.get(), "add");
const HloInstruction* cd = add->operand(1);
EXPECT_THAT(cd, op::CopyDone());
EXPECT_EQ(live_range->instruction_schedule().at(add),
live_range->instruction_schedule().at(cd) + 1);
const HloInstruction* cs = cd->operand(0);
EXPECT_THAT(cs, op::CopyStart());
EXPECT_EQ(live_range->instruction_schedule().at(add),
live_range->instruction_schedule().at(cs) + 2);
EXPECT_THAT(add, op::Add(op::Negate(), op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::Parameter(1))));
}
TEST_F(MemorySpaceAssignmentTest, AlwaysSpillPrefetchForSecondUseTest) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[2,3]{1,0} parameter(0)
p1 = f32[2,3]{1,0} parameter(1)
negate0 = f32[2,3]{1,0} negate(p0)
negate1 = f32[2,3]{1,0} negate(negate0)
negate2 = f32[2,3]{1,0} negate(negate1)
negate3 = f32[2,3]{1,0} negate(negate2)
negate4 = f32[2,3]{1,0} negate(negate3)
negate5 = f32[2,3]{1,0} negate(negate4)
add0 = f32[2,3]{1,0} add(negate5, negate0)
ROOT add1 = f32[2,3]{1,0} add(add0, p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.always_spill_to_default_memory = true;
AssignMemorySpace(module.get(), options);
const HloInstructionSequence& sequence =
module->schedule().sequence(module->entry_computation());
for (int i = 0; i < sequence.instructions().size(); ++i) {
VLOG(2) << i << " " << sequence.instructions()[i]->ToString();
}
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module.get()));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloLiveRange> live_range,
HloLiveRange::Run(module->schedule(), *alias_analysis,
module->entry_computation()));
const HloInstruction* add1 = FindInstruction(module.get(), "add1");
const HloInstruction* cd1 = add1->operand(1);
EXPECT_THAT(cd1, op::CopyDone());
EXPECT_EQ(live_range->instruction_schedule().at(add1),
live_range->instruction_schedule().at(cd1) + 1);
const HloInstruction* cs1 = cd1->operand(0);
EXPECT_THAT(cs1, op::CopyStart());
EXPECT_EQ(live_range->instruction_schedule().at(add1),
live_range->instruction_schedule().at(cs1) + 2);
EXPECT_EQ(cd1->shape().layout().memory_space(), kAlternateMemorySpace);
const HloInstruction* add0 = FindInstruction(module.get(), "add0");
const HloInstruction* cd0 = add0->operand(1);
EXPECT_THAT(cd0, op::CopyDone());
EXPECT_EQ(live_range->instruction_schedule().at(add0),
live_range->instruction_schedule().at(cd0) + 1);
const HloInstruction* cs0 = cd0->operand(0);
EXPECT_THAT(cs0, op::CopyStart());
EXPECT_EQ(live_range->instruction_schedule().at(add0),
live_range->instruction_schedule().at(cs0) + 2);
EXPECT_EQ(cd0->shape().layout().memory_space(), kAlternateMemorySpace);
const HloInstruction* eviction_done = cs0->operand(0);
EXPECT_EQ(eviction_done->shape().layout().memory_space(),
kDefaultMemorySpace);
const HloInstruction* evection_start = eviction_done->operand(0);
const HloInstruction* negate0 = evection_start->operand(0);
EXPECT_EQ(live_range->instruction_schedule().at(evection_start),
live_range->instruction_schedule().at(negate0) + 1);
EXPECT_EQ(live_range->instruction_schedule().at(eviction_done),
live_range->instruction_schedule().at(negate0) + 2);
EXPECT_EQ(negate0->name(), "negate0");
}
TEST_F(MemorySpaceAssignmentTest, AlwaysSpillEvictionTest) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[4,3]{1,0} parameter(0)
tanh0 = f32[4,3]{1,0} tanh(p0)
add0 = f32[4,3]{1,0} add(p0, p0)
add1 = f32[4,3]{1,0} add(add0, p0)
add2 = f32[4,3]{1,0} add(add1, p0)
add3 = f32[4,3]{1,0} add(add2, p0)
add4 = f32[4,3]{1,0} add(add3, p0)
add5 = f32[4,3]{1,0} add(add4, tanh0)
negate0 = f32[4,3]{1,0} negate(add5)
tanh1 = f32[4,3]{1,0} tanh(negate0)
negate1 = f32[4,3]{1,0} negate(negate0)
tanh2 = f32[4,3]{1,0} tanh(tanh1)
negate2 = f32[4,3]{1,0} negate(negate1)
ROOT tuple = tuple(tanh0, tanh2, negate2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.always_spill_to_default_memory = true;
AssignMemorySpace(module.get(), options);
const HloInstructionSequence& sequence =
module->schedule().sequence(module->entry_computation());
for (int i = 0; i < sequence.instructions().size(); ++i) {
VLOG(2) << i << " " << sequence.instructions()[i]->ToString();
}
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module.get()));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloLiveRange> live_range,
HloLiveRange::Run(module->schedule(), *alias_analysis,
module->entry_computation()));
const HloInstruction* tuple = FindInstruction(module.get(), "tuple");
const HloInstruction* tanh0_eviction_done = tuple->operand(0);
const HloInstruction* tanh0_eviction_start = tanh0_eviction_done->operand(0);
const HloInstruction* tanh0 = tanh0_eviction_start->operand(0);
EXPECT_EQ(tanh0->name(), "tanh0");
EXPECT_EQ(tanh0_eviction_done->shape().layout().memory_space(),
kDefaultMemorySpace);
EXPECT_EQ(live_range->instruction_schedule().at(tanh0_eviction_start),
live_range->instruction_schedule().at(tanh0) + 1);
EXPECT_EQ(live_range->instruction_schedule().at(tanh0_eviction_done),
live_range->instruction_schedule().at(tanh0) + 2);
const HloInstruction* add5 = FindInstruction(module.get(), "add5");
const HloInstruction* tanh0_prefetch_done = add5->operand(1);
const HloInstruction* tanh0_prefetch_start = tanh0_prefetch_done->operand(0);
EXPECT_EQ(tanh0_prefetch_done->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_EQ(live_range->instruction_schedule().at(add5),
live_range->instruction_schedule().at(tanh0_prefetch_done) + 1);
EXPECT_EQ(live_range->instruction_schedule().at(add5),
live_range->instruction_schedule().at(tanh0_prefetch_start) + 2);
EXPECT_EQ(tanh0_eviction_done, tanh0_prefetch_start->operand(0));
}
TEST_F(MemorySpaceAssignmentTest, FilterUpdatePreferredPrefetchTest) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, negate2,
negate3, negate4, negate5, negate6, add});
TF_CHECK_OK(module->set_schedule(schedule));
Options options = DefaultMemorySpaceOptions();
const std::string text_proto = R"pb(
overrides {
hlo_operand_filter { size_lte: 24 size_gte: 24 }
override_options { prefetch_eagerness: 0.5 }
})pb";
TF_ASSERT_OK_AND_ASSIGN(
options.preferred_prefetch_overrides,
ParseTextProto<PreferredPrefetchOverrides>(text_proto));
AssignMemorySpace(module.get(), options);
EXPECT_THAT(add, op::Add(op::Negate(), op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::Parameter(1))));
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0},
{},
1,
0, kAlternateMemorySpace);
EXPECT_THAT(negate0, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate1, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate2, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate3, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate4, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate5, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate6, op::ShapeWithLayout(shape_in_alternate_mem));
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
EXPECT_THAT(sequence.instructions()[0], op::Parameter(0));
EXPECT_THAT(sequence.instructions()[1], op::Parameter(1));
EXPECT_THAT(sequence.instructions()[6], op::CopyStart());
EXPECT_THAT(sequence.instructions()[10], op::CopyDone());
}
TEST_F(MemorySpaceAssignmentTest, FilterUpdateConfigExactMatchBeforeTest) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, negate2,
negate3, negate4, negate5, negate6, add});
TF_CHECK_OK(module->set_schedule(schedule));
Options options = DefaultMemorySpaceOptions();
const std::string text_proto = R"pb(
overrides {
hlo_operand_filter { instruction_name_regex: "add" operand_number: 1 }
override_options { before_instruction_name: "negate.3" }
})pb";
TF_ASSERT_OK_AND_ASSIGN(
options.preferred_prefetch_overrides,
ParseTextProto<PreferredPrefetchOverrides>(text_proto));
AssignMemorySpace(module.get(), options);
EXPECT_THAT(add, op::Add(op::Negate(), op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::Parameter(1))));
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0},
{},
1,
0, kAlternateMemorySpace);
EXPECT_THAT(negate0, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate1, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate2, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate3, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate4, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate5, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate6, op::ShapeWithLayout(shape_in_alternate_mem));
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
EXPECT_THAT(sequence.instructions()[0], op::Parameter(0));
EXPECT_THAT(sequence.instructions()[1], op::Parameter(1));
EXPECT_THAT(sequence.instructions()[5], op::CopyStart());
EXPECT_THAT(sequence.instructions()[10], op::CopyDone());
}
TEST_F(MemorySpaceAssignmentTest, FilterUpdateConfigExactMatchAfterTest) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, negate2,
negate3, negate4, negate5, negate6, add});
TF_CHECK_OK(module->set_schedule(schedule));
Options options = DefaultMemorySpaceOptions();
const std::string text_proto = R"pb(
overrides {
hlo_operand_filter { instruction_name_regex: "add" operand_number: 1 }
override_options { after_instruction_name: "negate.1" }
})pb";
TF_ASSERT_OK_AND_ASSIGN(
options.preferred_prefetch_overrides,
ParseTextProto<PreferredPrefetchOverrides>(text_proto));
AssignMemorySpace(module.get(), options);
EXPECT_THAT(add, op::Add(op::Negate(), op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::Parameter(1))));
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0},
{},
1,
0, kAlternateMemorySpace);
EXPECT_THAT(negate0, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate1, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate2, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate3, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate4, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate5, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate6, op::ShapeWithLayout(shape_in_alternate_mem));
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
EXPECT_THAT(sequence.instructions()[0], op::Parameter(0));
EXPECT_THAT(sequence.instructions()[1], op::Parameter(1));
EXPECT_THAT(sequence.instructions()[4], op::CopyStart());
EXPECT_THAT(sequence.instructions()[10], op::CopyDone());
}
TEST_F(MemorySpaceAssignmentTest, FilterUpdateConfigExactMatchTooLateTest) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, negate2,
negate3, negate4, negate5, negate6, add});
TF_CHECK_OK(module->set_schedule(schedule));
Options options = DefaultMemorySpaceOptions();
const std::string text_proto = R"pb(
overrides {
hlo_operand_filter { instruction_name_regex: "add" operand_number: 1 }
override_options { after_instruction_name: "negate.5" }
})pb";
TF_ASSERT_OK_AND_ASSIGN(
options.preferred_prefetch_overrides,
ParseTextProto<PreferredPrefetchOverrides>(text_proto));
AssignMemorySpace(module.get(), options);
EXPECT_THAT(add, op::Add(op::Negate(), op::Parameter(1)));
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0},
{},
1,
0, kAlternateMemorySpace);
EXPECT_THAT(negate0, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate1, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate2, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate3, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate4, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate5, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate6, op::ShapeWithLayout(shape_in_alternate_mem));
}
TEST_F(MemorySpaceAssignmentTest, FilterUpdateConfigPrecedenceTest) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, negate2,
negate3, negate4, negate5, negate6, add});
TF_CHECK_OK(module->set_schedule(schedule));
Options options = DefaultMemorySpaceOptions();
const std::string text_proto = R"pb(
overrides {
hlo_operand_filter { size_lte: 24 size_gte: 24 }
override_options { prefetch_eagerness: 0.5 }
}
overrides {
hlo_operand_filter { instruction_name_regex: "add" operand_number: 1 }
override_options { after_instruction_name: "negate.1" }
})pb";
TF_ASSERT_OK_AND_ASSIGN(
options.preferred_prefetch_overrides,
ParseTextProto<PreferredPrefetchOverrides>(text_proto));
AssignMemorySpace(module.get(), options);
EXPECT_THAT(add, op::Add(op::Negate(), op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::Parameter(1))));
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0},
{},
1,
0, kAlternateMemorySpace);
EXPECT_THAT(negate0, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate1, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate2, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate3, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate4, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate5, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate6, op::ShapeWithLayout(shape_in_alternate_mem));
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
EXPECT_THAT(sequence.instructions()[0], op::Parameter(0));
EXPECT_THAT(sequence.instructions()[1], op::Parameter(1));
EXPECT_THAT(sequence.instructions()[6], op::CopyStart());
EXPECT_THAT(sequence.instructions()[10], op::CopyDone());
}
TEST_F(MemorySpaceAssignmentTest, FilterUpdateConfigExactMatchPrecedenceTest) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, negate2,
negate3, negate4, negate5, negate6, add});
TF_CHECK_OK(module->set_schedule(schedule));
Options options = DefaultMemorySpaceOptions();
const std::string text_proto = R"pb(
overrides {
hlo_operand_filter { instruction_name_regex: "add" operand_number: 1 }
override_options { after_instruction_name: "negate.1" }
}
overrides {
hlo_operand_filter { size_lte: 24 size_gte: 24 }
override_options { prefetch_eagerness: 0.5 }
}
)pb";
TF_ASSERT_OK_AND_ASSIGN(
options.preferred_prefetch_overrides,
ParseTextProto<PreferredPrefetchOverrides>(text_proto));
AssignMemorySpace(module.get(), options);
EXPECT_THAT(add, op::Add(op::Negate(), op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::Parameter(1))));
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0},
{},
1,
0, kAlternateMemorySpace);
EXPECT_THAT(negate0, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate1, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate2, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate3, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate4, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate5, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate6, op::ShapeWithLayout(shape_in_alternate_mem));
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
EXPECT_THAT(sequence.instructions()[0], op::Parameter(0));
EXPECT_THAT(sequence.instructions()[1], op::Parameter(1));
EXPECT_THAT(sequence.instructions()[4], op::CopyStart());
EXPECT_THAT(sequence.instructions()[10], op::CopyDone());
}
TEST_F(MemorySpaceAssignmentTest, FilterUpdatePreferredPrefetchNoMatchTest) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, negate2,
negate3, negate4, negate5, negate6, add});
TF_CHECK_OK(module->set_schedule(schedule));
Options options = DefaultMemorySpaceOptions();
const std::string text_proto = R"pb(
overrides {
hlo_operand_filter { size_lte: 24 size_gte: 25 }
override_options { prefetch_eagerness: 0.5 }
}
)pb";
TF_ASSERT_OK_AND_ASSIGN(
options.preferred_prefetch_overrides,
ParseTextProto<PreferredPrefetchOverrides>(text_proto));
AssignMemorySpace(module.get(), options);
EXPECT_THAT(add, op::Add(op::Negate(), op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::Parameter(1))));
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0},
{},
1,
0, kAlternateMemorySpace);
EXPECT_THAT(negate0, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate1, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate2, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate3, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate4, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate5, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate6, op::ShapeWithLayout(shape_in_alternate_mem));
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
EXPECT_THAT(sequence.instructions()[0], op::Parameter(0));
EXPECT_THAT(sequence.instructions()[1], op::Parameter(1));
EXPECT_THAT(sequence.instructions()[2], op::CopyStart());
EXPECT_THAT(sequence.instructions()[10], op::CopyDone());
}
TEST_F(MemorySpaceAssignmentTest, EvictAndPrefetch) {
std::unique_ptr<HloModule> module = CreateEvictAndPrefetchModule();
AssignMemorySpace(module.get());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Add(op::Add(),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::AsyncCopy(kDefaultMemorySpace,
kAlternateMemorySpace, op::Tanh()))));
}
TEST_F(MemorySpaceAssignmentTest, EvictAndPrefetchLimitAsyncCopies0) {
std::unique_ptr<HloModule> module = CreateEvictAndPrefetchModule();
AssignMemorySpace(module.get(),
UpdateMaxAsyncCopies(DefaultMemorySpaceOptions(), 0));
EXPECT_LE(CountMaximumOutstandingAsyncCopies(*module).max_prefetches, 0);
EXPECT_LE(CountMaximumOutstandingAsyncCopies(*module).max_evictions, 0);
}
TEST_F(MemorySpaceAssignmentTest, EvictAndPrefetchLimitAsyncCopies1) {
std::unique_ptr<HloModule> module = CreateEvictAndPrefetchModule();
AssignMemorySpace(module.get(),
UpdateMaxAsyncCopies(DefaultMemorySpaceOptions(), 1));
EXPECT_LE(CountMaximumOutstandingAsyncCopies(*module).max_prefetches, 1);
EXPECT_LE(CountMaximumOutstandingAsyncCopies(*module).max_evictions, 1);
}
TEST_F(MemorySpaceAssignmentTest, EvictAndPrefetchLimitAsyncCopies2) {
std::unique_ptr<HloModule> module = CreateEvictAndPrefetchModule();
AssignMemorySpace(module.get(),
UpdateMaxAsyncCopies(DefaultMemorySpaceOptions(), 2));
EXPECT_LE(CountMaximumOutstandingAsyncCopies(*module).max_prefetches, 2);
EXPECT_LE(CountMaximumOutstandingAsyncCopies(*module).max_evictions, 2);
}
TEST_F(MemorySpaceAssignmentTest,
DISABLED_DontEvictWhenThereIsDefaultMemAllocation) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* tanh = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, tanh));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kSubtract, p0, p1));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, p0, p1));
HloInstruction* d = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kSubtract, p0, p1));
HloInstruction* e = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, a, b));
HloInstruction* f = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, a, c));
HloInstruction* g = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, a, d));
HloInstruction* h = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, b, c));
HloInstruction* i = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, b, d));
HloInstruction* j = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, c, d));
HloInstruction* k = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, e, f));
HloInstruction* l = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, g, h));
HloInstruction* m = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, i, j));
HloInstruction* n = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, k, l));
HloInstruction* o = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, n, m));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, o, tanh));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, tanh, a, b, c, d, e, f, g, h, i,
j, k, l, m, n, o, add});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get(),
UpdateMaxAsyncCopies(DefaultMemorySpaceOptions(), 1));
EXPECT_THAT(f, op::Multiply(op::Add(), op::CopyDone()));
EXPECT_THAT(h, op::Multiply(op::Subtract(), op::Multiply()));
}
TEST_F(MemorySpaceAssignmentTest, EvictAndPrefetchAndPrefetch) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* tanh = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, tanh));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kSubtract, p0, p1));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, p0, p1));
HloInstruction* d = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kSubtract, p0, p1));
HloInstruction* e = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, a, b));
HloInstruction* f = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, a, c));
HloInstruction* g = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, a, d));
HloInstruction* h = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, b, c));
HloInstruction* i = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, b, d));
HloInstruction* j = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, c, d));
HloInstruction* k = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, e, f));
HloInstruction* l = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, g, h));
HloInstruction* m = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, i, j));
HloInstruction* n = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, k, l));
HloInstruction* o = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, n, m));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, o, tanh));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, add0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* negate7 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate6));
HloInstruction* negate8 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate7));
HloInstruction* negate9 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate8));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate9, tanh));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
computation,
{p0, p1, tanh, a, b, c, d, e,
f, g, h, i, j, k, l, m,
n, o, add0, negate0, negate1, negate2, negate3, negate4,
negate5, negate6, negate7, negate8, negate9, add1});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
EXPECT_THAT(
add0,
op::Add(op::Add(),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::AsyncCopy(kDefaultMemorySpace,
kAlternateMemorySpace, op::Tanh()))));
EXPECT_THAT(
add1,
op::Add(op::Negate(),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::AsyncCopy(kDefaultMemorySpace,
kAlternateMemorySpace, op::Tanh()))));
}
TEST_F(MemorySpaceAssignmentTest, While) {
auto module = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(xla::F32, {2, 3});
Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, scalar_shape});
auto cond_builder = HloComputation::Builder("WhileCond");
HloInstruction* cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "cond_param"));
HloInstruction* cond_iter = cond_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, cond_param, 1));
HloInstruction* cond_limit = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(50.f)));
HloInstruction* cond_lt = cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), cond_iter,
cond_limit, ComparisonDirection::kLt));
HloComputation* cond_computation =
module->AddEmbeddedComputation(cond_builder.Build());
auto body_builder = HloComputation::Builder("WhileBody");
HloInstruction* body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "body_param"));
HloInstruction* body_iter = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, body_param, 1));
HloInstruction* body_data = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, body_param, 0));
HloInstruction* body_iter_increment = body_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.f)));
HloInstruction* body_iter_next =
body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, body_iter, body_iter_increment));
HloInstruction* body_data_increment =
body_builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.f, 2.f, 3.f}, {4.f, 5.f, 6.f}})));
HloInstruction* body_data_mul =
body_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, body_data, body_data));
HloInstruction* body_data_add =
body_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, body_data, body_data_increment));
HloInstruction* body_data_next =
body_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, body_data_add, body_data_mul));
HloInstruction* body_out = body_builder.AddInstruction(
HloInstruction::CreateTuple({body_data_next, body_iter_next}));
HloComputation* body_computation =
module->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* data = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param_iter"));
HloInstruction* iter = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "param_data"));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({data, iter}));
HloInstruction* while_op = builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, cond_computation, body_computation, tuple));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(cond_computation,
{cond_param, cond_iter, cond_limit, cond_lt});
schedule.set_sequence(body_computation,
{body_param, body_iter, body_data, body_iter_increment,
body_iter_next, body_data_increment, body_data_mul,
body_data_add, body_data_next, body_out});
schedule.set_sequence(entry_computation, {iter, data, tuple, while_op});
TF_CHECK_OK(module->set_schedule(schedule));
LOG(INFO) << module->ToString(HloPrintOptions::ShortParsable());
AssignMemorySpace(module.get());
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0}, {},
1, 0,
kAlternateMemorySpace);
EXPECT_THAT(body_data_mul, op::ShapeWithLayout(shape_in_alternate_mem));
}
TEST_F(MemorySpaceAssignmentTest, Tuple) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape inner_tuple_shape = ShapeUtil::MakeTupleShape({shape});
Shape tuple_shape =
ShapeUtil::MakeTupleShape({shape, shape, inner_tuple_shape});
HloInstruction* p = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p"));
HloInstruction* p0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, p, 0));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* p1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, p, 1));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
HloInstruction* p2 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(inner_tuple_shape, p, 2));
HloInstruction* p2_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, p2, 0));
HloInstruction* mul = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, add, p2_0));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
computation, {p, p0, negate0, negate1, negate2, negate3, negate4, negate5,
negate6, p1, add, p2, p2_0, mul});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
EXPECT_THAT(
mul,
op::Multiply(op::Add(op::Negate(), op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::GetTupleElement())),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::GetTupleElement(op::GetTupleElement()))));
}
TEST_F(MemorySpaceAssignmentTest, Bitcast) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape param_shape = ShapeUtil::MakeShape(F32, {6});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, param_shape, "p1"));
HloInstruction* negate = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* bitcast = builder.AddInstruction(
HloInstruction::CreateBitcast(param_shape, negate));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(param_shape, HloOpcode::kAdd, bitcast, p1));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate, bitcast, add});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
bitcast = add->mutable_operand(0);
EXPECT_EQ(bitcast->opcode(), HloOpcode::kBitcast);
EXPECT_EQ(bitcast->shape().layout().memory_space(), kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, Bitcast2) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape param_shape = ShapeUtil::MakeShape(F32, {6});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, param_shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* bitcast =
builder.AddInstruction(HloInstruction::CreateBitcast(shape, p1));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, bitcast, negate4));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, negate2,
negate3, negate4, bitcast, add});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
EXPECT_EQ(add->operand(0)->shape().layout().memory_space(),
kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, Bitcast3) {
HloComputation::Builder builder(TestName());
Shape shape1 = ShapeUtil::MakeShape(F32, {2, 3});
Shape shape2 = ShapeUtil::MakeShape(F32, {3, 2});
Shape shape3 = ShapeUtil::MakeShape(F32, {1, 6});
Shape param_shape = ShapeUtil::MakeShape(F32, {6});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape1, "p0"));
HloInstruction* p1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, param_shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape1, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape1, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape1, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape1, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape1, HloOpcode::kNegate, negate3));
HloInstruction* bitcast1 =
builder.AddInstruction(HloInstruction::CreateBitcast(shape1, p1));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape1, HloOpcode::kAdd, bitcast1, negate4));
HloInstruction* bitcast2 =
builder.AddInstruction(HloInstruction::CreateBitcast(shape3, p1));
HloInstruction* bitcast3 =
builder.AddInstruction(HloInstruction::CreateBitcast(shape2, bitcast2));
HloInstruction* bitcast4 =
builder.AddInstruction(HloInstruction::CreateBitcast(shape2, add));
HloInstruction* mul = builder.AddInstruction(HloInstruction::CreateBinary(
shape2, HloOpcode::kMultiply, bitcast3, bitcast4));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation,
{p0, p1, negate0, negate1, negate2, negate3, negate4,
bitcast1, add, bitcast2, bitcast3, bitcast4, mul});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
EXPECT_THAT(
mul,
op::Multiply(
op::Bitcast(op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::Parameter(1))),
op::Bitcast(op::Add(
op::Bitcast(op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace, op::Parameter(1))),
op::Negate()))));
EXPECT_EQ(add->operand(0)->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_EQ(add->shape().layout().memory_space(), kAlternateMemorySpace);
EXPECT_EQ(mul->operand(0)->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_EQ(mul->operand(1)->shape().layout().memory_space(),
kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, BitcastTuple) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape param_shape = ShapeUtil::MakeShape(F32, {6});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, shape});
auto module = CreateNewVerifiedModule();
HloComputation::Builder fusion_builder("fusion");
HloInstruction* fusion_param = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p"));
HloInstruction* fusion_element0 = fusion_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion_param, 0));
HloInstruction* fusion_element1 = fusion_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion_param, 1));
fusion_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, fusion_element0, fusion_element1));
HloComputation* fusion_computation =
module->AddEmbeddedComputation(fusion_builder.Build());
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, param_shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* bitcast =
builder.AddInstruction(HloInstruction::CreateBitcast(shape, p1));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({bitcast, p0}));
HloInstruction* fusion = builder.AddInstruction(HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kCustom, {tuple}, fusion_computation));
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation,
{p0, p1, negate0, negate1, negate2, negate3, negate4,
bitcast, tuple, fusion});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, BitcastGetTupleElementTuple) {
absl::string_view hlo_string = R"(
HloModule DoIt_S64_10_0_5_1.3, is_scheduled=true
ENTRY %DoIt_S64_10_0_5_1.3 (p0.1: (u32[10], u32[10])) -> (u32[5], u32[5]) {
%p0.1 = (u32[10]{0:T(128)}, u32[10]{0:T(128)}) parameter(0)
%get-tuple-element.1 = u32[10]{0:T(128)} get-tuple-element((u32[10]{0:T(128)}, u32[10]{0:T(128)}) %p0.1), index=1
%bitcast.1 = u32[5]{0:T(128)} bitcast(u32[10]{0:T(128)} %get-tuple-element.1)
%get-tuple-element = u32[10]{0:T(128)} get-tuple-element((u32[10]{0:T(128)}, u32[10]{0:T(128)}) %p0.1), index=0
%bitcast = u32[5]{0:T(128)} bitcast(u32[10]{0:T(128)} %get-tuple-element)
%tuple.1 = (u32[5]{0:T(128)}, u32[5]{0:T(128)}) tuple(u32[5]{0:T(128)} %bitcast, u32[5]{0:T(128)} %bitcast.1)
%tuple.3 = ((u32[5]{0:T(128)}, u32[5]{0:T(128)}), (u32[5]{0:T(128)}, u32[5]{0:T(128)})) tuple(%tuple.1, %tuple.1)
%get-tuple-element.4 = u32[5]{0:T(128)} get-tuple-element((u32[5]{0:T(128)}, u32[5]{0:T(128)}) %tuple.1), index=0
%get-tuple-element.5 = (u32[5]{0:T(128)}, u32[5]{0:T(128)}) get-tuple-element(%tuple.3), index=0
%get-tuple-element.6 = u32[5]{0:T(128)} get-tuple-element((u32[5]{0:T(128)}, u32[5]{0:T(128)}) %get-tuple-element.5), index=1
%copy.2 = u32[5]{0:T(128)} copy(u32[5]{0:T(128)} %get-tuple-element.4)
%copy.3 = u32[5]{0:T(128)} copy(u32[5]{0:T(128)} %get-tuple-element.6)
ROOT %tuple.2 = (u32[5]{0:T(128)}, u32[5]{0:T(128)}) tuple(u32[5]{0:T(128)} %copy.2, u32[5]{0:T(128)} %copy.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, GetSimplifiedOperandBug) {
absl::string_view hlo_string = R"(
HloModule sort.16, is_scheduled=true
ENTRY %sort.16 (param.0.1: s32[1], param.1.2: f32[1], param.2.3: u32[1], param.3.4: s32[1]) -> (s32[1], f32[1], u32[1], s32[1]) {
%param.3.4 = s32[1]{0:T(128)} parameter(3)
%param.2.3 = u32[1]{0:T(128)} parameter(2)
%param.1.2 = f32[1]{0:T(128)} parameter(1)
%param.0.1 = s32[1]{0:T(128)} parameter(0)
%tuple.1 = (s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) tuple(s32[1]{0:T(128)} %param.0.1, f32[1]{0:T(128)} %param.1.2, u32[1]{0:T(128)} %param.2.3, s32[1]{0:T(128)} %param.3.4)
%get-tuple-element.4 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) %tuple.1), index=0
%get-tuple-element.5 = f32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) %tuple.1), index=1
%get-tuple-element.6 = u32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) %tuple.1), index=2
%get-tuple-element.7 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) %tuple.1), index=3
%copy.4 = s32[1]{0:T(128)} copy(s32[1]{0:T(128)} %get-tuple-element.4)
%copy.5 = f32[1]{0:T(128)} copy(f32[1]{0:T(128)} %get-tuple-element.5)
%copy.6 = u32[1]{0:T(128)} copy(u32[1]{0:T(128)} %get-tuple-element.6)
%copy.7 = s32[1]{0:T(128)} copy(s32[1]{0:T(128)} %get-tuple-element.7)
ROOT %tuple.2 = (s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) tuple(s32[1]{0:T(128)} %copy.4, f32[1]{0:T(128)} %copy.5, u32[1]{0:T(128)} %copy.6, s32[1]{0:T(128)} %copy.7)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, BitcastMultiUse) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape param_shape = ShapeUtil::MakeShape(F32, {6});
HloInstruction* p0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "p1"));
HloInstruction* bitcast =
builder.AddInstruction(HloInstruction::CreateBitcast(shape, p0));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, bitcast));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, bitcast, negate4));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, bitcast, negate0, negate1, negate2,
negate3, negate4, add});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0}, {},
1, 0,
kAlternateMemorySpace);
EXPECT_THAT(negate0->operand(0), op::ShapeWithLayout(shape));
EXPECT_THAT(add->operand(0), op::ShapeWithLayout(shape_in_alternate_mem));
}
TEST_F(MemorySpaceAssignmentTest, BitcastMultiUseTuple) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape param_shape = ShapeUtil::MakeShape(F32, {6});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, shape});
auto module = CreateNewVerifiedModule();
HloComputation::Builder fusion_builder("fusion");
HloInstruction* fusion_param = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p"));
HloInstruction* fusion_element0 = fusion_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion_param, 0));
HloInstruction* fusion_element1 = fusion_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion_param, 1));
fusion_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, fusion_element0, fusion_element1));
HloComputation* fusion_computation =
module->AddEmbeddedComputation(fusion_builder.Build());
HloInstruction* p0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "p1"));
HloInstruction* bitcast =
builder.AddInstruction(HloInstruction::CreateBitcast(shape, p0));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, bitcast));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({bitcast, negate4}));
HloInstruction* fusion = builder.AddInstruction(HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kCustom, {tuple}, fusion_computation));
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, bitcast, negate0, negate1, negate2,
negate3, negate4, tuple, fusion});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0}, {},
1, 0,
kAlternateMemorySpace);
EXPECT_THAT(negate0->operand(0), op::ShapeWithLayout(shape));
EXPECT_THAT(fusion->operand(0)->operand(0),
op::ShapeWithLayout(shape_in_alternate_mem));
}
TEST_F(MemorySpaceAssignmentTest, BitcastScheduleBug) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape param_shape = ShapeUtil::MakeShape(F32, {6});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, param_shape, "p1"));
HloInstruction* bitcast =
builder.AddInstruction(HloInstruction::CreateBitcast(shape, p1));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* negate7 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate6));
HloInstruction* negate8 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate7));
HloInstruction* negate9 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate8));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, bitcast, negate9));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
computation, {p0, p1, bitcast, negate0, negate1, negate2, negate3,
negate4, negate5, negate6, negate7, negate8, negate9, add});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(),
5, 4);
EXPECT_EQ(add->operand(0)->shape().layout().memory_space(),
kAlternateMemorySpace);
const auto& instructions =
module->schedule().sequence(module->entry_computation()).instructions();
for (int i = 0; i < instructions.size(); ++i) {
if (instructions.at(i)->opcode() == HloOpcode::kCopyStart) {
EXPECT_EQ(instructions.at(i - 1)->opcode(), HloOpcode::kNegate);
EXPECT_EQ(instructions.at(i + 1)->opcode(), HloOpcode::kNegate);
} else if (instructions.at(i)->opcode() == HloOpcode::kCopyDone) {
EXPECT_EQ(instructions.at(i - 1)->opcode(), HloOpcode::kNegate);
}
}
}
TEST_F(MemorySpaceAssignmentTest, AddDependency) {
absl::string_view hlo_string = R"(
HloModule AddDependency, is_scheduled=true
ENTRY %AddDependency (p: f32[3]) -> f32[3] {
%p = f32[3]{0} parameter(0)
%neg0 = f32[3]{0} negate(f32[3]{0} %p)
%neg1 = f32[3]{0} negate(f32[3]{0} %neg0)
%neg2 = f32[3]{0} negate(f32[3]{0} %neg1)
%neg3 = f32[3]{0} negate(f32[3]{0} %neg2)
%neg4 = f32[3]{0} negate(f32[3]{0} %neg3)
%neg5 = f32[3]{0} negate(f32[3]{0} %neg4)
%neg6 = f32[3]{0} negate(f32[3]{0} %neg5)
%token0 = token[] after-all()
%add_dep = f32[3]{0} add-dependency(f32[3]{0} %p, token[] %token0)
ROOT %add = f32[3]{0} add(f32[3]{0} %add_dep, f32[3]{0} %neg6)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Add(op::AddDependency(), op::Negate()));
}
TEST_F(MemorySpaceAssignmentTest, WhileAllocationBug) {
absl::string_view hlo_string = R"(
HloModule WhileAllocationBug, is_scheduled=true
%WhileBody (body_param: (f32[4,3], f32[])) -> (f32[4,3], f32[]) {
%body_param = (f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element.1 = f32[] get-tuple-element((f32[4,3]{1,0}, f32[]) %body_param), index=1
%get-tuple-element.2 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[]) %body_param), index=0
%constant.1 = f32[] constant(1)
%add = f32[] add(f32[] %get-tuple-element.1, f32[] %constant.1)
%constant.2 = f32[4,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 }, { 1, 2, 3 }, { 4, 5, 6 } })
%multiply = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %get-tuple-element.2, f32[4,3]{1,0} %get-tuple-element.2)
%multiply2 = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %multiply, f32[4,3]{1,0} %multiply)
%add.1 = f32[4,3]{1,0} add(f32[4,3]{1,0} %get-tuple-element.2, f32[4,3]{1,0} %constant.2)
%add.2 = f32[4,3]{1,0} add(f32[4,3]{1,0} %add.1, f32[4,3]{1,0} %multiply2)
ROOT %tuple = (f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} %add.2, f32[] %add)
}
%WhileCond (cond_param: (f32[4,3], f32[])) -> pred[] {
%cond_param = (f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element = f32[] get-tuple-element((f32[4,3]{1,0}, f32[]) %cond_param), index=1
%constant = f32[] constant(50)
ROOT %compare = pred[] compare(f32[] %get-tuple-element, f32[] %constant), direction=LT
}
ENTRY %Entry (param_iter: f32[4,3], param_data: f32[], p2: f32[4,3]) -> f32[4,3] {
%param_data = f32[] parameter(1)
%param_iter = f32[4,3]{1,0} parameter(0)
%p2 = f32[4,3]{1,0} parameter(2)
%tanh = f32[4,3]{1,0} tanh(f32[4,3]{1,0} %param_iter)
%neg0 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %p2)
%neg1 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg0)
%neg2 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg1)
%neg3 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg2)
%neg4 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg3)
%neg5 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg4)
%neg6 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg5)
%add.4 = f32[4,3]{1,0} add(f32[4,3]{1,0} %neg6, f32[4,3]{1,0} %tanh)
%tuple.1 = (f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} %tanh, f32[] %param_data)
%while = (f32[4,3]{1,0}, f32[]) while((f32[4,3]{1,0}, f32[]) %tuple.1), condition=%WhileCond, body=%WhileBody
%get-tuple-element.3 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[]) %while), index=0
ROOT %add.3 = f32[4,3]{1,0} add(f32[4,3]{1,0} %get-tuple-element.3, f32[4,3]{1,0} %add.4)
}
)";
MsaBufferIntervalCompare buffer_interval_compare =
[](const MsaBufferInterval& a, const MsaBufferInterval& b) {
bool a_is_mul =
a.buffer->defining_instruction()->opcode() == HloOpcode::kMultiply;
bool b_is_mul =
b.buffer->defining_instruction()->opcode() == HloOpcode::kMultiply;
if (a_is_mul && !b_is_mul) {
return true;
}
if (!a_is_mul && b_is_mul) {
return false;
}
bool a_is_tanh =
a.buffer->defining_instruction()->opcode() == HloOpcode::kTanh;
bool b_is_tanh =
b.buffer->defining_instruction()->opcode() == HloOpcode::kTanh;
if (a_is_tanh && !b_is_tanh) {
return true;
}
if (!a_is_tanh && b_is_tanh) {
return false;
}
return a.buffer->id() < b.buffer->id();
};
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(2, 10);
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(),
buffer_interval_compare, &prefetch_interval_picker);
for (const HloInstruction* instruction :
module->entry_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kWhile) {
const Shape& while_subshape =
ShapeUtil::GetSubshape(instruction->shape(), {0});
if (while_subshape.layout().memory_space() == kAlternateMemorySpace) {
const HloInstruction* body_param =
instruction->while_body()->parameter_instruction(0);
const HloInstruction* gte = nullptr;
for (const HloInstruction* user : body_param->users()) {
if (user->opcode() == HloOpcode::kGetTupleElement &&
user->tuple_index() == 0) {
gte = user;
break;
}
}
EXPECT_NE(gte, nullptr);
const HloInstruction* copy_start = nullptr;
for (const HloInstruction* user : gte->users()) {
if (user->opcode() == HloOpcode::kCopyStart) {
copy_start = user;
break;
}
}
EXPECT_NE(copy_start, nullptr);
const Shape& copy_start_subshape =
ShapeUtil::GetSubshape(copy_start->shape(), {0});
EXPECT_NE(copy_start_subshape.layout().memory_space(),
kAlternateMemorySpace);
}
}
}
}
TEST_F(MemorySpaceAssignmentTest, ConsecutiveWhileLoops) {
absl::string_view hlo_string = R"(
HloModule WhileAllocationBug, is_scheduled=true
%WhileBody (body_param: (f32[4,3], f32[4,3], f32[])) -> (f32[4,3], f32[4,3], f32[]) {
%body_param = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element.1 = f32[] get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=2
%get-tuple-element.2 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=0
%get-tuple-element.3 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=1
%constant.1 = f32[] constant(1)
%add = f32[] add(f32[] %get-tuple-element.1, f32[] %constant.1)
%constant.2 = f32[4,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 }, { 1, 2, 3 }, { 4, 5, 6 } })
%multiply = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %get-tuple-element.2, f32[4,3]{1,0} %get-tuple-element.3)
%multiply2 = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %multiply, f32[4,3]{1,0} %multiply)
%add.1 = f32[4,3]{1,0} add(f32[4,3]{1,0} %get-tuple-element.2, f32[4,3]{1,0} %constant.2)
%add.2 = f32[4,3]{1,0} add(f32[4,3]{1,0} %add.1, f32[4,3]{1,0} %multiply2)
ROOT %tuple = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} %add.2, f32[4,3]{1,0} %get-tuple-element.3, f32[] %add)
}
%WhileCond (cond_param: (f32[4,3], f32[4,3], f32[])) -> pred[] {
%cond_param = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element = f32[] get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %cond_param), index=2
%constant = f32[] constant(50)
ROOT %compare = pred[] compare(f32[] %get-tuple-element, f32[] %constant), direction=LT
}
%WhileBody2 (body_param: (f32[4,3], f32[4,3], f32[])) -> (f32[4,3], f32[4,3], f32[]) {
%body_param = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element.1 = f32[] get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=2
%get-tuple-element.2 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=0
%get-tuple-element.3 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=1
%constant.1 = f32[] constant(1)
%add = f32[] add(f32[] %get-tuple-element.1, f32[] %constant.1)
%constant.2 = f32[4,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 }, { 1, 2, 3 }, { 4, 5, 6 } })
%multiply = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %get-tuple-element.2, f32[4,3]{1,0} %get-tuple-element.3)
%multiply2 = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %multiply, f32[4,3]{1,0} %multiply)
%add.1 = f32[4,3]{1,0} add(f32[4,3]{1,0} %get-tuple-element.2, f32[4,3]{1,0} %constant.2)
%add.2 = f32[4,3]{1,0} add(f32[4,3]{1,0} %add.1, f32[4,3]{1,0} %multiply2)
ROOT %tuple = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} %add.2, f32[4,3]{1,0} %get-tuple-element.3, f32[] %add)
}
%WhileCond2 (cond_param: (f32[4,3], f32[4,3], f32[])) -> pred[] {
%cond_param = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element = f32[] get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %cond_param), index=2
%constant = f32[] constant(50)
ROOT %compare = pred[] compare(f32[] %get-tuple-element, f32[] %constant), direction=LT
}
ENTRY %Entry (param_data: f32[4,3], param_iter: f32[], p2: f32[4,3]) -> f32[4,3] {
%param_iter = f32[] parameter(1)
%param_data = f32[4,3]{1,0} parameter(0)
%p2 = f32[4,3]{1,0} parameter(2)
%neg0 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %p2)
%neg1 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg0)
%neg2 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg1)
%neg3 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg2)
%neg4 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg3)
%neg5 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg4)
%neg6 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg5)
%add.4 = f32[4,3]{1,0} add(f32[4,3]{1,0} %neg6, f32[4,3]{1,0} %p2)
%tuple.1 = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} add.4, f32[4,3]{1,0} param_data, f32[] %param_iter)
%while = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) while((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %tuple.1), condition=%WhileCond, body=%WhileBody
%get-tuple-element.4 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %while), index=0
%add.3 = f32[4,3]{1,0} add(f32[4,3]{1,0} %get-tuple-element.4, f32[4,3]{1,0} %add.4)
%get-tuple-element.5 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %while), index=1
%tuple.2 = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} add.3, f32[4,3]{1,0} get-tuple-element.5, f32[] %param_iter)
%while.1 = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) while((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %tuple.2), condition=%WhileCond2, body=%WhileBody2
%get-tuple-element.6 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %while.1), index=0
ROOT %add.5 = f32[4,3]{1,0} add(f32[4,3]{1,0} %get-tuple-element.6, f32[4,3]{1,0} %add.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, WhileLiveRangeBug) {
absl::string_view hlo_string = R"(
HloModule WhileAllocationBug, is_scheduled=true
%WhileBody (body_param: (f32[4,3], f32[4,3], f32[])) -> (f32[4,3], f32[4,3], f32[]) {
%body_param = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element.1 = f32[] get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=2
%get-tuple-element.2 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=0
%get-tuple-element.3 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=1
%neg10 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %get-tuple-element.2)
%neg11 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg10)
%neg12 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg11)
%neg13 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg12)
%neg14 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg13)
%neg15 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg14)
%neg16 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg15)
%neg17 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg16)
%neg18 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg17)
%neg19 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg18)
%neg20 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg19)
%constant.1 = f32[] constant(1)
%add = f32[] add(f32[] %get-tuple-element.1, f32[] %constant.1)
%constant.2 = f32[4,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 }, { 1, 2, 3 }, { 4, 5, 6 } })
%multiply = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %neg20, f32[4,3]{1,0} %neg20)
%multiply2 = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %multiply, f32[4,3]{1,0} %multiply)
%add.1 = f32[4,3]{1,0} add(f32[4,3]{1,0} get-tuple-element.3, f32[4,3]{1,0} %constant.2)
%add.2 = f32[4,3]{1,0} add(f32[4,3]{1,0} %add.1, f32[4,3]{1,0} %multiply2)
ROOT %tuple = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} %add.2, f32[4,3]{1,0} %get-tuple-element.3, f32[] %add)
}
%WhileCond (cond_param: (f32[4,3], f32[4,3], f32[])) -> pred[] {
%cond_param = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element = f32[] get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %cond_param), index=2
%constant = f32[] constant(50)
ROOT %compare = pred[] compare(f32[] %get-tuple-element, f32[] %constant), direction=LT
}
ENTRY %Entry (param_data: f32[4,3], param_iter: f32[], p2: f32[4,3]) -> f32[4,3] {
%param_iter = f32[] parameter(1)
%param_data = f32[4,3]{1,0} parameter(0)
%p2 = f32[4,3]{1,0} parameter(2)
%neg0 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %p2)
%neg1 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg0)
%neg2 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg1)
%neg3 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg2)
%neg4 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg3)
%neg5 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg4)
%neg6 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg5)
%add.4 = f32[4,3]{1,0} add(f32[4,3]{1,0} %neg6, f32[4,3]{1,0} %p2)
%tuple.1 = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} add.4, f32[4,3]{1,0} param_data, f32[] %param_iter)
%while = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) while((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %tuple.1), condition=%WhileCond, body=%WhileBody
%get-tuple-element.4 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %while), index=0
%get-tuple-element.5 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %while), index=1
%add.3 = f32[4,3]{1,0} add(f32[4,3]{1,0} %get-tuple-element.4, f32[4,3]{1,0} %add.4)
ROOT %add.5 = f32[4,3]{1,0} add(f32[4,3]{1,0} %get-tuple-element.5, f32[4,3]{1,0} %add.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, ConsecutiveWhileLoopsOneBuffer) {
absl::string_view hlo_string = R"(
HloModule WhileAllocationBug, is_scheduled=true
%WhileBody (body_param: (f32[4,3], f32[4,3], f32[])) -> (f32[4,3], f32[4,3], f32[]) {
%body_param = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element.1 = f32[] get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=2
%get-tuple-element.2 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=0
%get-tuple-element.3 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=1
%neg10 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %get-tuple-element.2)
%neg11 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg10)
%neg12 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg11)
%neg13 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg12)
%neg14 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg13)
%neg15 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg14)
%neg16 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg15)
%neg17 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg16)
%neg18 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg17)
%neg19 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg18)
%neg20 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg19)
%constant.1 = f32[] constant(1)
%add = f32[] add(f32[] %get-tuple-element.1, f32[] %constant.1)
%constant.2 = f32[4,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 }, { 1, 2, 3 }, { 4, 5, 6 } })
%multiply = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %neg20, f32[4,3]{1,0} %neg20)
%multiply2 = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %multiply, f32[4,3]{1,0} %multiply)
%add.1 = f32[4,3]{1,0} add(f32[4,3]{1,0} get-tuple-element.3, f32[4,3]{1,0} %constant.2)
%add.2 = f32[4,3]{1,0} add(f32[4,3]{1,0} %add.1, f32[4,3]{1,0} %multiply2)
ROOT %tuple = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} %add.2, f32[4,3]{1,0} %get-tuple-element.3, f32[] %add)
}
%WhileCond (cond_param: (f32[4,3], f32[4,3], f32[])) -> pred[] {
%cond_param = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element = f32[] get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %cond_param), index=2
%constant = f32[] constant(50)
ROOT %compare = pred[] compare(f32[] %get-tuple-element, f32[] %constant), direction=LT
}
%WhileBody2 (body_param: (f32[4,3], f32[4,3], f32[])) -> (f32[4,3], f32[4,3], f32[]) {
%body_param = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element.1 = f32[] get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=2
%get-tuple-element.2 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=0
%get-tuple-element.3 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=1
%neg10 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %get-tuple-element.2)
%neg11 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg10)
%neg12 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg11)
%neg13 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg12)
%neg14 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg13)
%neg15 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg14)
%neg16 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg15)
%neg17 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg16)
%neg18 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg17)
%neg19 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg18)
%neg20 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg19)
%constant.1 = f32[] constant(1)
%add = f32[] add(f32[] %get-tuple-element.1, f32[] %constant.1)
%constant.2 = f32[4,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 }, { 1, 2, 3 }, { 4, 5, 6 } })
%multiply = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %neg20, f32[4,3]{1,0} %neg20)
%multiply2 = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %multiply, f32[4,3]{1,0} %multiply)
%add.1 = f32[4,3]{1,0} add(f32[4,3]{1,0} get-tuple-element.3, f32[4,3]{1,0} %constant.2)
%add.2 = f32[4,3]{1,0} add(f32[4,3]{1,0} %add.1, f32[4,3]{1,0} %multiply2)
ROOT %tuple = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} %add.2, f32[4,3]{1,0} %get-tuple-element.3, f32[] %add)
}
%WhileCond2 (cond_param: (f32[4,3], f32[4,3], f32[])) -> pred[] {
%cond_param = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element = f32[] get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %cond_param), index=2
%constant = f32[] constant(50)
ROOT %compare = pred[] compare(f32[] %get-tuple-element, f32[] %constant), direction=LT
}
ENTRY %Entry (param_data: f32[4,3], param_iter: f32[], p2: f32[4,3]) -> f32[4,3] {
%param_iter = f32[] parameter(1)
%param_data = f32[4,3]{1,0} parameter(0)
%p2 = f32[4,3]{1,0} parameter(2)
%neg0 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %p2)
%neg1 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg0)
%neg2 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg1)
%neg3 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg2)
%neg4 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg3)
%neg5 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg4)
%neg6 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg5)
%add.4 = f32[4,3]{1,0} add(f32[4,3]{1,0} %neg6, f32[4,3]{1,0} %p2)
%tuple.1 = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} add.4, f32[4,3]{1,0} param_data, f32[] %param_iter)
%while = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) while((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %tuple.1), condition=%WhileCond, body=%WhileBody
%get-tuple-element.4 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %while), index=0
%add.3 = f32[4,3]{1,0} add(f32[4,3]{1,0} %get-tuple-element.4, f32[4,3]{1,0} %add.4)
%tuple.2 = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} add.3, f32[4,3]{1,0} param_data, f32[] %param_iter)
%while.1 = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) while((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %tuple.2), condition=%WhileCond2, body=%WhileBody2
%get-tuple-element.5 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %while.1), index=0
%get-tuple-element.6 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %while.1), index=1
ROOT %add.5 = f32[4,3]{1,0} add(f32[4,3]{1,0} %get-tuple-element.5, f32[4,3]{1,0} %get-tuple-element.6)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, WhileCondAliasBug) {
absl::string_view hlo_string = R"(
HloModule WhileWithPrngScalarResult.18, is_scheduled=true
%fused_computation (param_0.1: s32[6], param_1.3: s32[1], param_2.3: s32[5]) -> s32[6] {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)} parameter(0)
ROOT %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
}
%body.3 (prev.4: s32[6]) -> s32[6] {
%constant.7 = s32[]{:T(128)} constant(100)
%constant.6 = s32[]{:T(128)} constant(0)
%constant.5 = s32[1]{0:T(128)} constant({1})
%prev.4 = s32[6]{0:T(128)} parameter(0)
%rng.8 = s32[5]{0:T(128)} rng(s32[]{:T(128)} %constant.6, s32[]{:T(128)} %constant.7), distribution=rng_uniform
%neg = s32[1]{0:T(128)} negate(s32[1]{0:T(128)} %constant.5)
ROOT %fusion = s32[6]{0:T(128)} fusion(s32[6]{0:T(128)} %prev.4, s32[1]{0:T(128)} %neg, s32[5]{0:T(128)} %rng.8), kind=kLoop, calls=%fused_computation
}
%WhileWithPrngScalarResult.11 (prev.12: s32[6]) -> pred[] {
%constant.15 = s32[]{:T(128)} constant(1)
%prev.12 = s32[6]{0:T(128)} parameter(0)
%bitcast.1 = s32[1]{0:T(128)} bitcast(s32[6]{0:T(128)} %prev.12)
%bitcast = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %bitcast.1)
ROOT %compare.16 = pred[]{:T(128)} compare(s32[]{:T(128)} %constant.15, s32[]{:T(128)} %bitcast), direction=GT
}
ENTRY %WhileWithPrngScalarResult.18 () -> s32[6] {
%constant.1 = s32[]{:T(128)} constant(0)
%broadcast.2 = s32[6]{0:T(128)} broadcast(s32[]{:T(128)} %constant.1), dimensions={}
ROOT %while.17 = s32[6]{0:T(128)} while(s32[6]{0:T(128)} %broadcast.2), condition=%WhileWithPrngScalarResult.11, body=%body.3
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, WhileInPlaceBuffer) {
absl::string_view hlo_string = R"(
HloModule Module, is_scheduled=true
fused_computation {
param0 = f32[2,3] parameter(0)
constant.1 = f32[] constant(0)
broadcast = f32[2,1] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[2,3] dynamic-update-slice(param0, broadcast, constant.3, constant.3)
}
%WhileBody (body_param: (f32[2,3], f32[2,3], f32[])) -> (f32[2,3], f32[2,3], f32[]) {
%body_param = (f32[2,3]{1,0}, f32[2,3]{1,0}, f32[]) parameter(0)
%get-tuple-element.1 = f32[] get-tuple-element((f32[2,3]{1,0}, f32[2,3]{1,0}, f32[]) %body_param), index=2
%get-tuple-element.2 = f32[2,3]{1,0} get-tuple-element((f32[2,3]{1,0}, f32[2,3]{1,0}, f32[]) %body_param), index=0
%get-tuple-element.3 = f32[2,3]{1,0} get-tuple-element((f32[2,3]{1,0}, f32[2,3]{1,0}, f32[]) %body_param), index=1
%fusion = f32[2,3]{1,0} fusion(get-tuple-element.3), kind=kLoop, calls=fused_computation
%multiply = f32[2,3]{1,0} multiply(f32[2,3]{1,0} %get-tuple-element.2, f32[2,3]{1,0} %fusion)
ROOT %tuple = (f32[2,3]{1,0}, f32[2,3]{1,0}, f32[]) tuple(f32[2,3]{1,0} %multiply, f32[2,3]{1,0} %fusion, f32[] %get-tuple-element.1)
}
%WhileCond (cond_param: (f32[2,3], f32[2,3], f32[])) -> pred[] {
%cond_param = (f32[2,3]{1,0}, f32[2,3]{1,0}, f32[]) parameter(0)
%get-tuple-element = f32[] get-tuple-element((f32[2,3]{1,0}, f32[2,3]{1,0}, f32[]) %cond_param), index=2
%constant = f32[] constant(50)
ROOT %compare = pred[] compare(f32[] %get-tuple-element, f32[] %constant), direction=LT
}
ENTRY %Entry (param_data: f32[2,3], param_iter: f32[], p2: f32[2,3]) -> f32[2,3] {
%param_iter = f32[] parameter(1)
%param_data = f32[2,3]{1,0} parameter(0)
%p2 = f32[2,3]{1,0} parameter(2)
%copy1 = f32[2,3]{1,0} copy(param_data)
%copy2 = f32[2,3]{1,0} copy(p2)
%tuple.1 = (f32[2,3]{1,0}, f32[2,3]{1,0}, f32[]) tuple(f32[2,3]{1,0} copy1, f32[2,3]{1,0} copy2, f32[] %param_iter)
%while = (f32[2,3]{1,0}, f32[2,3]{1,0}, f32[]) while((f32[2,3]{1,0}, f32[2,3]{1,0}, f32[]) %tuple.1), condition=%WhileCond, body=%WhileBody
%get-tuple-element.4 = f32[2,3]{1,0} get-tuple-element((f32[2,3]{1,0}, f32[2,3]{1,0}, f32[]) %while), index=0
ROOT %copy3 = f32[2,3]{1,0} copy(get-tuple-element.4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
const HloInstruction* while_op =
module->entry_computation()->GetInstructionWithName("while");
EXPECT_EQ(
ShapeUtil::GetSubshape(while_op->shape(), {1}).layout().memory_space(),
kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, WhileSharedBufferVerificationBug) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
p0 = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=3
}
while_body {
p0 = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
gte2 = f32[3]{0} get-tuple-element(p0), index=2
gte3 = pred[] get-tuple-element(p0), index=3
add = f32[3]{0} add(gte0, gte0)
negate0 = f32[3]{0} negate(add)
negate1 = f32[3]{0} negate(negate0)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
negate15 = f32[3]{0} negate(negate14)
negate16 = f32[3]{0} negate(negate15)
ROOT tuple = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) tuple(gte0, gte0, negate16, gte3)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy0 = f32[3]{0} copy(p0)
copy1 = f32[3]{0} copy(p0)
tuple = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) tuple(copy0, copy0, copy1, p1)
while = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
ROOT gte = f32[3]{0} get-tuple-element(while), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, b228599972) {
absl::string_view hlo_string = R"(
HloModule entry, is_scheduled=true
fused_computation {
%p0 = f32[2,3]{1,0} parameter(0)
%result0 = f32[2,3]{1,0} copy(%p0)
%result1 = f32[2,3]{1,0} copy(%p0)
ROOT tuple = (f32[2,3]{1,0}, f32[2,3]{1,0}) tuple(%result0, %result1)
}
ENTRY entry {
%p0 = f32[2,3]{1,0} parameter(0)
%p1 = f32[2,3]{1,0} parameter(1)
%unused = (f32[2,3]{1,0}, f32[2,3]{1,0}) fusion(%p0), kind=kLoop, calls=%fused_computation
%unused.0 = f32[2,3]{1,0} get-tuple-element(%unused), index=0
%unused.1 = f32[2,3]{1,0} get-tuple-element(%unused), index=1
%negate.0 = f32[2,3]{1,0} negate(f32[2,3]{1,0} %unused.0)
%negate.1 = f32[2,3]{1,0} negate(f32[2,3]{1,0} %unused.1)
ROOT %result = f32[2,3]{1,0} negate(%p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, b172243149) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
p0 = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=3
}
while_body {
p0 = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
gte2 = f32[3]{0} get-tuple-element(p0), index=2
gte3 = pred[] get-tuple-element(p0), index=3
add = f32[3]{0} add(gte1, gte2)
negate0 = f32[3]{0} negate(add)
negate1 = f32[3]{0} negate(negate0)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
negate15 = f32[3]{0} negate(negate14)
negate16 = f32[3]{0} negate(negate15)
ROOT tuple = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) tuple(gte0, add, negate16, gte3)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy0 = f32[3]{0} copy(p0)
copy1 = f32[3]{0} copy(p0)
copy2 = f32[3]{0} copy(p0)
negate = f32[3]{0} negate(copy0)
tuple = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) tuple(copy0, copy1, copy2, p1)
while = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
gte = f32[3]{0} get-tuple-element(while), index=2
add0 = f32[3]{0} add(negate, copy0)
ROOT add1 = f32[3]{0} add(add0, gte)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, ControlPredecessorsBug) {
absl::string_view hlo_string = R"(
HloModule sort.16, is_scheduled=true
ENTRY %sort.16 (param.0.1: s32[1], param.1.2: f32[1], param.2.3: u32[1], param.3.4: s32[1]) -> (s32[1], f32[1], u32[1], s32[1]) {
%param.3.4 = s32[1]{0:T(128)} parameter(3)
%param.2.3 = u32[1]{0:T(128)} parameter(2)
%param.1.2 = f32[1]{0:T(128)} parameter(1)
%param.0.1 = s32[1]{0:T(128)} parameter(0)
%tuple.1 = (s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) tuple(s32[1]{0:T(128)} %param.0.1, f32[1]{0:T(128)} %param.1.2, u32[1]{0:T(128)} %param.2.3, s32[1]{0:T(128)} %param.3.4), control-predecessors={%param.0.1}
%get-tuple-element.4 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) %tuple.1), index=0
%get-tuple-element.5 = f32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) %tuple.1), index=1
%get-tuple-element.6 = u32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) %tuple.1), index=2
%get-tuple-element.7 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) %tuple.1), index=3
%copy.4 = s32[1]{0:T(128)} copy(s32[1]{0:T(128)} %get-tuple-element.4)
%copy.5 = f32[1]{0:T(128)} copy(f32[1]{0:T(128)} %get-tuple-element.5)
%copy.6 = u32[1]{0:T(128)} copy(u32[1]{0:T(128)} %get-tuple-element.6)
%copy.7 = s32[1]{0:T(128)} copy(s32[1]{0:T(128)} %get-tuple-element.7)
ROOT %tuple.2 = (s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) tuple(s32[1]{0:T(128)} %copy.4, f32[1]{0:T(128)} %copy.5, u32[1]{0:T(128)} %copy.6, s32[1]{0:T(128)} %copy.7)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, ConditionalShouldBeAllocatedInAlternateMem) {
absl::string_view hlo_string = R"(
HloModule CondAllocation, is_scheduled=true
true_computation {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg1 = f32[3]{0} negate(gte)
}
false_computation {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg2 = f32[3]{0} negate(gte)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
tuple = (f32[3]{0}) tuple(copy)
ROOT conditional = f32[3]{0} conditional(p1, tuple, tuple), true_computation=true_computation, false_computation=false_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
auto copy =
module->GetComputationWithName("entry")->GetInstructionWithName("copy");
EXPECT_EQ(copy->shape().layout().memory_space(), kAlternateMemorySpace);
auto neg1 = module->GetComputationWithName("true_computation")
->GetInstructionWithName("neg1");
auto neg1_operand = neg1->operand(0);
EXPECT_EQ(neg1_operand->shape().layout().memory_space(),
kAlternateMemorySpace);
auto neg2 = module->GetComputationWithName("false_computation")
->GetInstructionWithName("neg2");
auto neg2_operand = neg2->operand(0);
EXPECT_EQ(neg2_operand->shape().layout().memory_space(),
kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, ConditionalAvoidsUnnecessaryPrefetch) {
absl::string_view hlo_string = R"(
HloModule CondAllocation, is_scheduled=true
true_computation {
p0 = (f32[3]{0}, f32[3]{0}) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
neg0 = f32[3]{0} negate(gte0)
neg1 = f32[3]{0} negate(neg0)
neg2 = f32[3]{0} negate(neg1)
neg3 = f32[3]{0} negate(neg2)
neg4 = f32[3]{0} negate(neg3)
neg5 = f32[3]{0} negate(neg4)
neg6 = f32[3]{0} negate(neg5)
neg7 = f32[3]{0} negate(neg6)
neg8 = f32[3]{0} negate(neg7)
neg9 = f32[3]{0} negate(neg8)
gte1 = f32[3]{0} get-tuple-element(p0), index=1
ROOT add = f32[3]{0} add(neg9, gte1)
}
false_computation {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg = f32[3]{0} negate(gte)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy0 = f32[3]{0} copy(p0)
copy1 = f32[3]{0} copy(p0)
tuple0 = (f32[3]{0}, f32[3]{0}) tuple(copy0, copy1)
tuple1 = (f32[3]{0}) tuple(copy0)
ROOT conditional = f32[3]{0} conditional(p1, tuple0, tuple1), true_computation=true_computation, false_computation=false_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
auto copy0 =
module->GetComputationWithName("entry")->GetInstructionWithName("copy0");
EXPECT_EQ(copy0->shape().layout().memory_space(), kAlternateMemorySpace);
auto copy1 =
module->GetComputationWithName("entry")->GetInstructionWithName("copy1");
EXPECT_EQ(copy1->shape().layout().memory_space(), kDefaultMemorySpace);
auto add = module->GetComputationWithName("true_computation")
->GetInstructionWithName("add");
auto add_operand = add->operand(1);
EXPECT_EQ(add_operand->shape().layout().memory_space(),
kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, ConditionalMultiUse) {
absl::string_view hlo_string = R"(
HloModule CondAllocation, is_scheduled=true
true_computation {
p0 = (f32[3]{0}, f32[3]{0}) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
add0 = f32[3]{0} add(gte0, gte1)
neg0 = f32[3]{0} negate(add0)
neg1 = f32[3]{0} negate(neg0)
neg2 = f32[3]{0} negate(neg1)
neg3 = f32[3]{0} negate(neg2)
neg4 = f32[3]{0} negate(neg3)
neg5 = f32[3]{0} negate(neg4)
neg6 = f32[3]{0} negate(neg5)
neg7 = f32[3]{0} negate(neg6)
neg8 = f32[3]{0} negate(neg7)
ROOT neg9 = f32[3]{0} negate(neg8)
}
false_computation {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg = f32[3]{0} negate(gte)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy0 = f32[3]{0} copy(p0)
copy1 = f32[3]{0} copy(p0)
tuple0 = (f32[3]{0}, f32[3]{0}) tuple(copy0, copy1)
tuple1 = (f32[3]{0}) tuple(copy0)
conditional = f32[3]{0} conditional(p1, tuple0, tuple1), true_computation=true_computation, false_computation=false_computation
ROOT add1 = f32[3]{0} add(copy1, conditional)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
auto copy1 =
module->GetComputationWithName("entry")->GetInstructionWithName("copy1");
EXPECT_EQ(copy1->shape().layout().memory_space(), kAlternateMemorySpace);
auto add0 = module->GetComputationWithName("true_computation")
->GetInstructionWithName("add0");
auto add0_operand = add0->operand(1);
EXPECT_EQ(add0_operand->shape().layout().memory_space(),
kAlternateMemorySpace);
auto add1 =
module->GetComputationWithName("entry")->GetInstructionWithName("add1");
auto add1_operand = add1->operand(0);
EXPECT_EQ(add1_operand->shape().layout().memory_space(), kDefaultMemorySpace);
EXPECT_EQ(add1_operand->opcode(), HloOpcode::kCopyDone);
}
TEST_F(MemorySpaceAssignmentTest, ConditionalMultiUseInWhile) {
absl::string_view hlo_string = R"(
HloModule CondAllocation, is_scheduled=true
true_computation {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg1 = f32[3]{0} negate(gte)
}
false_computation {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg2 = f32[3]{0} negate(gte)
}
while_cond {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
gte2 = pred[] get-tuple-element(p0), index=2
cond_tuple = (f32[3]{0}) tuple(gte0)
conditional = f32[3]{0} conditional(gte2, cond_tuple, cond_tuple), true_computation=true_computation, false_computation=false_computation
add = f32[3]{0} add(conditional, gte1)
neg0 = f32[3]{0} negate(add)
neg1 = f32[3]{0} negate(neg0)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(gte0, neg1, gte2)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy0 = f32[3]{0} copy(p0)
copy1 = f32[3]{0} copy(p0)
tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(copy0, copy1, p1)
while = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
ROOT gte = f32[3]{0} get-tuple-element(while), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
auto copy0 =
module->GetComputationWithName("entry")->GetInstructionWithName("copy0");
EXPECT_EQ(copy0->shape().layout().memory_space(), kAlternateMemorySpace);
auto conditional = module->GetComputationWithName("while_body")
->GetInstructionWithName("conditional");
auto conditional_operand = conditional->operand(1);
EXPECT_EQ(ShapeUtil::GetSubshape(conditional_operand->shape(), {0})
.layout()
.memory_space(),
kAlternateMemorySpace);
auto while_root =
module->GetComputationWithName("while_body")->root_instruction();
auto while_root_operand = while_root->operand(0);
EXPECT_THAT(
while_root_operand,
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::AsyncCopy(kDefaultMemorySpace, kAlternateMemorySpace,
op::GetTupleElement(op::Parameter(0)))));
}
TEST_F(MemorySpaceAssignmentTest, NestedConditional) {
absl::string_view hlo_string = R"(
HloModule CondAllocation, is_scheduled=true
true_computation2 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg1 = f32[3]{0} negate(gte)
}
false_computation2 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg2 = f32[3]{0} negate(gte)
}
true_computation1 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
slice = f32[1]{0} slice(gte), slice={[0:1]}
bitcast = f32[] bitcast(slice)
constant = f32[] constant(0.0)
compare = pred[] compare(bitcast, constant), direction=GT
ROOT conditional = f32[3]{0} conditional(compare, p0, p0), true_computation=true_computation2, false_computation=false_computation2
}
false_computation1 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg3 = f32[3]{0} negate(gte)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
tuple = (f32[3]{0}) tuple(copy)
ROOT conditional = f32[3]{0} conditional(p1, tuple, tuple), true_computation=true_computation1, false_computation=false_computation1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
auto copy =
module->GetComputationWithName("entry")->GetInstructionWithName("copy");
EXPECT_EQ(copy->shape().layout().memory_space(), kAlternateMemorySpace);
auto neg1_operand = module->GetComputationWithName("true_computation2")
->GetInstructionWithName("neg1")
->operand(0);
auto neg2_operand = module->GetComputationWithName("false_computation2")
->GetInstructionWithName("neg2")
->operand(0);
auto neg3_operand = module->GetComputationWithName("false_computation1")
->GetInstructionWithName("neg3")
->operand(0);
EXPECT_EQ(neg1_operand->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_EQ(neg2_operand->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_EQ(neg3_operand->shape().layout().memory_space(),
kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, NestedConditionalBufferReuseVerificationBug) {
absl::string_view hlo_string = R"(
HloModule CondAllocation, is_scheduled=true
true_computation2 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
neg1 = f32[3]{0} negate(gte)
neg2 = f32[3]{0} negate(neg1)
ROOT neg3 = f32[3]{0} negate(neg2)
}
false_computation2 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg4 = f32[3]{0} negate(gte)
}
true_computation1 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
slice = f32[1]{0} slice(gte), slice={[0:1]}
bitcast = f32[] bitcast(slice)
constant = f32[] constant(0.0)
compare = pred[] compare(bitcast, constant), direction=GT
tuple = (f32[3]{0}) tuple(gte)
ROOT conditional = f32[3]{0} conditional(compare, tuple, tuple), true_computation=true_computation2, false_computation=false_computation2
}
false_computation1 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg5 = f32[3]{0} negate(gte)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
tuple = (f32[3]{0}) tuple(copy)
ROOT conditional = f32[3]{0} conditional(p1, tuple, tuple), true_computation=true_computation1, false_computation=false_computation1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, WhileInsideNestedConditionalVerificationBug) {
absl::string_view hlo_string = R"(
HloModule CondAllocation, is_scheduled=true
while_cond {
p0 = (f32[3]{0}) parameter(0)
ROOT constant = pred[] constant(true)
}
while_body {
p0 = (f32[3]{0}) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
negate0 = f32[3]{0} negate(gte0)
ROOT tuple = (f32[3]{0}) tuple(negate0)
}
true_computation2 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
tuple = (f32[3]{0}) tuple(gte)
while = (f32[3]{0}) while(tuple), condition=while_cond, body=while_body
while_gte0 = f32[3]{0} get-tuple-element(while), index=0
ROOT root = f32[3]{0} negate(while_gte0)
}
false_computation2 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg3 = f32[3]{0} negate(gte)
}
true_computation1 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
constant = pred[] constant(true)
tuple = (f32[3]{0}) tuple(gte)
ROOT conditional = f32[3]{0} conditional(constant, tuple, tuple), true_computation=true_computation2, false_computation=false_computation2
}
false_computation1 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg3 = f32[3]{0} negate(gte)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
tuple = (f32[3]{0}) tuple(copy)
ROOT conditional = f32[3]{0} conditional(p1, tuple, tuple), true_computation=true_computation1, false_computation=false_computation1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest,
ConditionalComputationBufferOverlapBeforeParam) {
absl::string_view hlo_string = R"(
HloModule CondAllocation, is_scheduled=true
true_computation {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg2 = f32[3]{0} negate(gte)
}
false_computation {
c = f32[3]{0} constant({0.0, 1.0, 2.0})
neg0 = f32[3]{0} negate(c)
neg1 = f32[3]{0} negate(neg0)
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT add = f32[3]{0} add(gte, neg1)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
tuple = (f32[3]{0}) tuple(copy)
ROOT conditional = f32[3]{0} conditional(p1, tuple, tuple), true_computation=true_computation, false_computation=false_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto preset_assignments = AssignMemorySpace(module.get());
auto get_offset = [&](absl::string_view hlo_name) {
for (const auto& chunk : preset_assignments->chunks()) {
if (chunk.first.instruction->name() == hlo_name) {
return chunk.second.offset;
}
}
return static_cast<int64_t>(-1);
};
int64_t copy_offset = get_offset("copy");
int64_t neg0_offset = get_offset("neg0");
EXPECT_NE(copy_offset, -1);
EXPECT_NE(neg0_offset, -1);
EXPECT_NE(copy_offset, neg0_offset);
}
TEST_F(MemorySpaceAssignmentTest,
RequestIdentifierShouldNotBeAllocatedInAlternateMem) {
absl::string_view hlo_string = R"(
HloModule SendRecv, is_scheduled=true
ENTRY %AddDependency (p: f32[3]) -> f32[3] {
%p = f32[3]{0} parameter(0)
%after-all = token[] after-all()
%recv.4 = (f32[3]{0}, u32[], token[]) recv(token[] %after-all), channel_id=7
%recv-done.4 = (f32[3]{0}, token[]) recv-done((f32[3]{0}, u32[], token[]) %recv.4), channel_id=7
%token.1 = token[] get-tuple-element((f32[3]{0}, token[]) %recv-done.4), index=1
%data = f32[3]{0} get-tuple-element((f32[3]{0}, token[]) %recv-done.4), index=0
%send = (f32[3]{0}, u32[], token[]) send(f32[3]{0} %data, token[] %token.1), channel_id=2
%send-done = token[] send-done((f32[3]{0}, u32[], token[]) %send), channel_id=2
ROOT %add = f32[3]{0} add(f32[3]{0} %p, f32[3]{0} %data)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
for (const HloInstruction* instruction :
module->entry_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kSend ||
instruction->opcode() == HloOpcode::kRecv) {
const Shape& request_identifier_shape =
ShapeUtil::GetSubshape(instruction->shape(), {1});
EXPECT_NE(request_identifier_shape.layout().memory_space(),
kAlternateMemorySpace);
}
}
}
TEST_F(MemorySpaceAssignmentTest, SendDoneShouldHaveSendOperand) {
absl::string_view hlo_string = R"(
HloModule SendRecv, is_scheduled=true
ENTRY %AddDependency (p: f32[3]) -> f32[3] {
%p0 = f32[3]{0} parameter(0)
%p1 = f32[3]{0} parameter(1)
%neg0 = f32[3]{0} negate(f32[3]{0} %p1)
%neg1 = f32[3]{0} negate(f32[3]{0} %neg0)
%neg2 = f32[3]{0} negate(f32[3]{0} %neg1)
%neg3 = f32[3]{0} negate(f32[3]{0} %neg2)
%neg4 = f32[3]{0} negate(f32[3]{0} %neg3)
%neg5 = f32[3]{0} negate(f32[3]{0} %neg4)
%neg6 = f32[3]{0} negate(f32[3]{0} %neg5)
%after-all = token[] after-all()
%send = (f32[3]{0}, u32[], token[]) send(f32[3]{0} %p0, token[] %after-all), channel_id=2
%send-done = token[] send-done((f32[3]{0}, u32[], token[]) %send), channel_id=2
ROOT %add = f32[3]{0} add(f32[3]{0} %p0, f32[3]{0} %neg6)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, SendAndSendDoneShouldGetSameAllocation) {
absl::string_view hlo_string = R"(
HloModule SendRecv, is_scheduled=true
ENTRY %AddDependency (p: f32[3]) -> f32[3] {
%p0 = f32[3]{0} parameter(0)
%p1 = f32[3]{0} parameter(1)
%after-all = token[] after-all()
%send = (f32[3]{0}, u32[], token[]) send(f32[3]{0} %p0, token[] %after-all), channel_id=2
%neg0 = f32[3]{0} negate(f32[3]{0} %p1)
%neg1 = f32[3]{0} negate(f32[3]{0} %neg0)
%neg2 = f32[3]{0} negate(f32[3]{0} %neg1)
%neg3 = f32[3]{0} negate(f32[3]{0} %neg2)
%neg4 = f32[3]{0} negate(f32[3]{0} %neg3)
%neg5 = f32[3]{0} negate(f32[3]{0} %neg4)
%neg6 = f32[3]{0} negate(f32[3]{0} %neg5)
%send-done = token[] send-done((f32[3]{0}, u32[], token[]) %send), channel_id=2
ROOT %add = f32[3]{0} add(f32[3]{0} %p0, f32[3]{0} %neg6)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(),
10, 4);
}
TEST_F(MemorySpaceAssignmentTest, LastUseOpt) {
HloComputation::Builder builder(TestName());
Shape shape1 = ShapeUtil::MakeShape(F32, {2, 3});
Shape shape2 = ShapeUtil::MakeShape(F32, {2, 4});
PaddingConfig padding_config = MakeEdgePaddingConfig({{0, 0}, {0, 1}});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape1, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape2, "p1"));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape1, HloOpcode::kAdd, p0, p0));
HloInstruction* sub1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape1, HloOpcode::kSubtract, p0, add1));
HloInstruction* mul1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape2, HloOpcode::kMultiply, p1, p1));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape2, HloOpcode::kAdd, mul1, p1));
HloInstruction* mul2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape1, HloOpcode::kMultiply, add1, sub1));
HloInstruction* padding_value = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(F32)));
HloInstruction* padded_mul2 = builder.AddInstruction(
HloInstruction::CreatePad(shape2, mul2, padding_value, padding_config));
HloInstruction* add3 = builder.AddInstruction(
HloInstruction::CreateBinary(shape2, HloOpcode::kAdd, add2, padded_mul2));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, add1, sub1, mul1, add2, mul2,
padding_value, padded_mul2, add3});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
EXPECT_THAT(
mul2,
op::Multiply(
op::Add(op::Parameter(0), op::Parameter(0)),
op::Subtract(op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::Parameter(0)),
op::Add(op::Parameter(0), op::Parameter(0)))));
}
TEST_F(MemorySpaceAssignmentTest, NonEntryComputationSchedule1) {
auto module = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(xla::F32, {2, 3});
Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, scalar_shape});
auto cond_builder = HloComputation::Builder("WhileCond");
HloInstruction* cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "cond_param"));
HloInstruction* cond_iter = cond_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, cond_param, 1));
HloInstruction* cond_limit = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(50.f)));
HloInstruction* cond_lt = cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), cond_iter,
cond_limit, ComparisonDirection::kLt));
HloComputation* cond_computation =
module->AddEmbeddedComputation(cond_builder.Build());
auto body_builder = HloComputation::Builder("WhileBody");
HloInstruction* body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "body_param"));
HloInstruction* body_iter = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, body_param, 1));
HloInstruction* body_data = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, body_param, 0));
HloInstruction* body_iter_increment = body_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.f)));
HloInstruction* body_iter_next =
body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, body_iter, body_iter_increment));
HloInstruction* body_data_increment =
body_builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.f, 2.f, 3.f}, {4.f, 5.f, 6.f}})));
HloInstruction* body_data_mul =
body_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, body_data, body_data));
HloInstruction* body_data_add =
body_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, body_data, body_data_increment));
HloInstruction* body_data_next =
body_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, body_data_add, body_data_mul));
HloInstruction* body_out = body_builder.AddInstruction(
HloInstruction::CreateTuple({body_data_next, body_iter_next}));
HloComputation* body_computation =
module->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* data = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param_iter"));
HloInstruction* iter = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "param_data"));
HloInstruction* p2 =
builder.AddInstruction(HloInstruction::CreateParameter(2, shape, "p2"));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({data, iter}));
HloInstruction* while_op = builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, cond_computation, body_computation, tuple));
HloInstruction* while_data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, while_op, 0));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, while_data, p2));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(cond_computation,
{cond_param, cond_iter, cond_limit, cond_lt});
schedule.set_sequence(body_computation,
{body_param, body_iter, body_data, body_iter_increment,
body_iter_next, body_data_increment, body_data_mul,
body_data_add, body_data_next, body_out});
schedule.set_sequence(entry_computation,
{iter, data, p2, tuple, while_op, while_data, add});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(), 50);
}
TEST_F(MemorySpaceAssignmentTest, NonEntryComputationSchedule2) {
auto module = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(xla::F32, {2, 3});
Shape shape2 = ShapeUtil::MakeShape(xla::F32, {3, 3});
auto call_builder = HloComputation::Builder("Call");
HloInstruction* call_param = call_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "call_param"));
HloInstruction* call_param2 = call_builder.AddInstruction(
HloInstruction::CreateParameter(1, shape2, "call_param2"));
HloInstruction* slice = call_builder.AddInstruction(
HloInstruction::CreateSlice(shape, call_param2, {0, 0}, {2, 3}, {1, 1}));
HloInstruction* mul =
call_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, call_param, slice));
HloInstruction* negate0 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, mul));
HloInstruction* negate1 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* negate7 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate6));
HloInstruction* add0 =
call_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, call_param, negate7));
HloComputation* call_computation =
module->AddEmbeddedComputation(call_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape2, "p1"));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, p0));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, add1, p0));
HloInstruction* negate8 = builder.AddInstruction(
HloInstruction::CreateUnary(shape2, HloOpcode::kNegate, p1));
HloInstruction* call = builder.AddInstruction(
HloInstruction::CreateCall(shape, {add1, negate8}, call_computation));
HloInstruction* add3 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, add1));
HloInstruction* add4 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, call, add3));
HloInstruction* add5 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, add2, add4));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
call_computation,
{call_param, call_param2, slice, mul, negate0, negate1, negate2, negate3,
negate4, negate5, negate6, negate7, add0});
schedule.set_sequence(entry_computation,
{p0, p1, add1, add2, negate8, call, add3, add4, add5});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(), 5);
}
TEST_F(MemorySpaceAssignmentTest, NonEntryComputationSchedule3) {
auto module = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(xla::F32, {2, 3});
Shape shape2 = ShapeUtil::MakeShape(xla::F32, {3, 3});
auto call_builder = HloComputation::Builder("Call");
HloInstruction* call_param = call_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "call_param"));
HloInstruction* iota =
call_builder.AddInstruction(HloInstruction::CreateIota(shape2, 0));
HloInstruction* slice = call_builder.AddInstruction(
HloInstruction::CreateSlice(shape, iota, {0, 0}, {2, 3}, {1, 1}));
HloInstruction* mul =
call_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, call_param, slice));
HloInstruction* negate0 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, mul));
HloInstruction* negate1 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* negate7 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate6));
HloInstruction* add0 =
call_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, call_param, negate7));
HloComputation* call_computation =
module->AddEmbeddedComputation(call_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, p0));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, add1, p0));
HloInstruction* call = builder.AddInstruction(
HloInstruction::CreateCall(shape, {add1}, call_computation));
HloInstruction* add3 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, call, add1));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
call_computation,
{call_param, iota, slice, mul, negate0, negate1, negate2, negate3,
negate4, negate5, negate6, negate7, add0});
schedule.set_sequence(entry_computation, {p0, add1, add2, call, add3});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(), 5);
}
TEST_F(MemorySpaceAssignmentTest, DISABLED_NonEntryComputationSchedule4) {
auto module = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(xla::F32, {2, 3});
Shape shape2 = ShapeUtil::MakeShape(xla::F32, {3, 3});
auto true_builder = HloComputation::Builder("True");
HloInstruction* true_param = true_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "true_param"));
HloInstruction* iota =
true_builder.AddInstruction(HloInstruction::CreateIota(shape2, 0));
HloInstruction* slice = true_builder.AddInstruction(
HloInstruction::CreateSlice(shape, iota, {0, 0}, {2, 3}, {1, 1}));
HloInstruction* mul =
true_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, true_param, slice));
HloInstruction* negate0 = true_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, mul));
HloInstruction* negate1 = true_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = true_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = true_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = true_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = true_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = true_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* negate7 = true_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate6));
HloInstruction* add0 =
true_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, true_param, negate7));
HloComputation* true_computation =
module->AddEmbeddedComputation(true_builder.Build());
auto false_builder = HloComputation::Builder("False");
HloInstruction* false_param = false_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "false_param"));
HloComputation* false_computation =
module->AddEmbeddedComputation(false_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, p0));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, add1, p0));
HloInstruction* pred = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloInstruction* conditional =
builder.AddInstruction(HloInstruction::CreateConditional(
shape, pred, add1, true_computation, add2, false_computation));
HloInstruction* add3 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, conditional, add1));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
true_computation,
{true_param, iota, slice, mul, negate0, negate1, negate2, negate3,
negate4, negate5, negate6, negate7, add0});
schedule.set_sequence(false_computation, {false_param});
schedule.set_sequence(entry_computation,
{p0, add1, add2, pred, conditional, add3});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(), 5);
}
TEST_F(MemorySpaceAssignmentTest, NonEntryComputationSchedule5) {
auto module = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(xla::F32, {2, 3});
Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
Shape tuple_shape =
ShapeUtil::MakeTupleShape({shape, scalar_shape, scalar_shape});
auto cond_builder = HloComputation::Builder("WhileCond");
HloInstruction* cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "cond_param"));
HloInstruction* cond_iter = cond_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, cond_param, 1));
HloInstruction* cond_limit = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(50.f)));
HloInstruction* cond_lt = cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), cond_iter,
cond_limit, ComparisonDirection::kLt));
HloComputation* cond_computation =
module->AddEmbeddedComputation(cond_builder.Build());
auto body_builder = HloComputation::Builder("WhileBody");
HloInstruction* body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "body_param"));
HloInstruction* body_iter = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, body_param, 1));
HloInstruction* body_data = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, body_param, 0));
HloInstruction* body_iter_increment = body_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.f)));
HloInstruction* body_iter_next =
body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, body_iter, body_iter_increment));
HloInstruction* body_data2 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, body_param, 2));
HloInstruction* body_out = body_builder.AddInstruction(
HloInstruction::CreateTuple({body_data, body_iter_next, body_data2}));
HloComputation* body_computation =
module->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* data = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param_data"));
HloInstruction* iter = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "param_iter"));
HloInstruction* data2 = builder.AddInstruction(
HloInstruction::CreateParameter(2, scalar_shape, "param_data2"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, data));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* negate7 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate6));
HloInstruction* sub = builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kSubtract, iter, data2));
HloInstruction* tuple = builder.AddInstruction(
HloInstruction::CreateTuple({negate7, iter, data2}));
HloInstruction* while_op = builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, cond_computation, body_computation, tuple));
HloInstruction* while_data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, while_op, 1));
HloInstruction* root =
builder.AddInstruction(HloInstruction::CreateTuple({while_data, sub}));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(cond_computation,
{cond_param, cond_iter, cond_limit, cond_lt});
schedule.set_sequence(body_computation,
{body_param, body_iter, body_data, body_iter_increment,
body_iter_next, body_data2, body_out});
schedule.set_sequence(
entry_computation,
{iter, data, data2, negate0, negate1, negate2, negate3, negate4, negate5,
negate6, negate7, sub, tuple, while_op, while_data, root});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(), 20);
}
TEST_F(MemorySpaceAssignmentTest, NonEntryComputationSchedule6) {
auto module = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(xla::F32, {2, 3});
Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, scalar_shape, shape});
auto cond_builder = HloComputation::Builder("WhileCond");
HloInstruction* cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "cond_param"));
HloInstruction* cond_iter = cond_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, cond_param, 1));
HloInstruction* cond_limit = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(50.f)));
HloInstruction* cond_lt = cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), cond_iter,
cond_limit, ComparisonDirection::kLt));
HloComputation* cond_computation =
module->AddEmbeddedComputation(cond_builder.Build());
auto body_builder = HloComputation::Builder("WhileBody");
HloInstruction* body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "body_param"));
HloInstruction* body_iter = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, body_param, 1));
HloInstruction* body_data = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, body_param, 0));
HloInstruction* body_negate0 = body_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, body_data));
HloInstruction* body_negate1 = body_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, body_negate0));
HloInstruction* body_negate2 = body_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, body_negate1));
HloInstruction* body_negate3 = body_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, body_negate2));
HloInstruction* body_negate4 = body_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, body_negate3));
HloInstruction* body_negate5 = body_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, body_negate4));
HloInstruction* body_negate6 = body_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, body_negate5));
HloInstruction* body_negate7 = body_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, body_negate6));
HloInstruction* body_iter_increment = body_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.f)));
HloInstruction* body_iter_next =
body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, body_iter, body_iter_increment));
HloInstruction* body_out = body_builder.AddInstruction(
HloInstruction::CreateTuple({body_data, body_iter_next, body_negate7}));
HloComputation* body_computation =
module->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* data = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param_data"));
HloInstruction* iter = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "param_iter"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, data));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* negate7 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate6));
HloInstruction* tuple = builder.AddInstruction(
HloInstruction::CreateTuple({data, iter, negate7}));
HloInstruction* while_op = builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, cond_computation, body_computation, tuple));
HloInstruction* while_data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, while_op, 0));
HloInstruction* while_data2 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, while_op, 2));
HloInstruction* root = builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, while_data, while_data2));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(cond_computation,
{cond_param, cond_iter, cond_limit, cond_lt});
schedule.set_sequence(
body_computation,
{body_param, body_iter, body_data, body_negate0, body_negate1,
body_negate2, body_negate3, body_negate4, body_negate5, body_negate6,
body_negate7, body_iter_increment, body_iter_next, body_out});
schedule.set_sequence(
entry_computation,
{iter, data, negate0, negate1, negate2, negate3, negate4, negate5,
negate6, negate7, tuple, while_op, while_data, while_data2, root});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(),
25);
*ShapeUtil::GetMutableSubshape(&tuple_shape, {0})->mutable_layout() =
LayoutUtil::MakeLayout(
{1, 0}, {}, {},
{}, {},
1,
PRIMITIVE_TYPE_INVALID,
PRIMITIVE_TYPE_INVALID,
0, kAlternateMemorySpace);
*ShapeUtil::GetMutableSubshape(&tuple_shape, {1})->mutable_layout() =
LayoutUtil::MakeLayout(
{}, {}, {},
{}, {},
1,
PRIMITIVE_TYPE_INVALID,
PRIMITIVE_TYPE_INVALID,
0, kDefaultMemorySpace);
*ShapeUtil::GetMutableSubshape(&tuple_shape, {2})->mutable_layout() =
LayoutUtil::MakeLayout(
{1, 0}, {}, {},
{}, {},
1,
PRIMITIVE_TYPE_INVALID,
PRIMITIVE_TYPE_INVALID,
0, kDefaultMemorySpace);
EXPECT_THAT(while_op, op::ShapeWithLayout(tuple_shape));
EXPECT_THAT(while_op->operand(0), op::ShapeWithLayout(tuple_shape));
EXPECT_THAT(cond_param, op::ShapeWithLayout(tuple_shape));
EXPECT_THAT(body_param, op::ShapeWithLayout(tuple_shape));
EXPECT_THAT(body_out, op::ShapeWithLayout(tuple_shape));
}
TEST_F(MemorySpaceAssignmentTest, DanglingCopy) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, shape});
HloInstruction* p = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p"));
HloInstruction* p0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, p, 0));
HloInstruction* p1a = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, p, 1));
HloInstruction* copy = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kCopy, p1a));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* p1b = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, p, 1));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1b));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
computation, {p, p0, negate0, negate1, negate2, negate3, negate4, negate5,
negate6, p1a, copy, p1b, add});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, MultiOutputFusion) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, shape});
auto module = CreateNewVerifiedModule();
HloComputation::Builder fusion_builder("fusion");
HloInstruction* fusion_param0 = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* fusion_param1 = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "p1"));
fusion_builder.AddInstruction(
HloInstruction::CreateTuple({fusion_param0, fusion_param1}));
HloComputation* fusion_computation =
module->AddEmbeddedComputation(fusion_builder.Build());
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* fusion = builder.AddInstruction(HloInstruction::CreateFusion(
tuple_shape, HloInstruction::FusionKind::kCustom, {p0, p0},
fusion_computation));
HloInstruction* element0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion, 0));
HloInstruction* element1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion, 1));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, element0, element1));
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, fusion, element0, element1, add});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, TupleInput) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, shape});
auto module = CreateNewVerifiedModule();
HloComputation::Builder fusion_builder("fusion");
HloInstruction* fusion_param = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p"));
HloInstruction* fusion_element0 = fusion_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion_param, 0));
HloInstruction* fusion_element1 = fusion_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion_param, 1));
fusion_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, fusion_element0, fusion_element1));
HloComputation* fusion_computation =
module->AddEmbeddedComputation(fusion_builder.Build());
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p1));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({negate0, negate1}));
HloInstruction* fusion = builder.AddInstruction(HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kCustom, {tuple}, fusion_computation));
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, tuple, fusion});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, TupleToTuple1) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, shape});
auto module = CreateNewVerifiedModule();
HloComputation::Builder fusion0_builder("fusion0");
HloInstruction* fusion0_param0 = fusion0_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* fusion0_param1 = fusion0_builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "p1"));
fusion0_builder.AddInstruction(
HloInstruction::CreateTuple({fusion0_param0, fusion0_param1}));
HloComputation* fusion0_computation =
module->AddEmbeddedComputation(fusion0_builder.Build());
HloComputation::Builder fusion1_builder("fusion1");
HloInstruction* fusion1_param = fusion1_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p"));
HloInstruction* fusion1_element0 = fusion1_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion1_param, 0));
HloInstruction* fusion1_element1 = fusion1_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion1_param, 1));
fusion1_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, fusion1_element0, fusion1_element1));
HloComputation* fusion1_computation =
module->AddEmbeddedComputation(fusion1_builder.Build());
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* fusion0 = builder.AddInstruction(HloInstruction::CreateFusion(
tuple_shape, HloInstruction::FusionKind::kCustom, {p0, p0},
fusion0_computation));
HloInstruction* element0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion0, 0));
HloInstruction* element1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion0, 1));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, element0, element1));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, add0, negate6));
HloInstruction* fusion1 = builder.AddInstruction(
HloInstruction::CreateFusion(shape, HloInstruction::FusionKind::kCustom,
{fusion0}, fusion1_computation));
HloInstruction* mul = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, add1, fusion1));
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
computation,
{p0, fusion0, element0, element1, negate0, negate1, negate2, negate3,
negate4, negate5, negate6, add0, add1, fusion1, mul});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(), 5);
EXPECT_THAT(fusion1,
op::Fusion(op::Tuple(
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::GetTupleElement(op::Fusion(), 0)),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::GetTupleElement(op::Fusion(), 1)))));
}
TEST_F(MemorySpaceAssignmentTest, TupleToTuple2) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, shape});
Shape nested_tuple_shape = ShapeUtil::MakeTupleShape({shape, tuple_shape});
auto module = CreateNewVerifiedModule();
HloComputation::Builder fusion0_builder("fusion0");
HloInstruction* fusion0_param0 = fusion0_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* fusion0_param1 = fusion0_builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* fusion0_tuple = fusion0_builder.AddInstruction(
HloInstruction::CreateTuple({fusion0_param0, fusion0_param1}));
fusion0_builder.AddInstruction(
HloInstruction::CreateTuple({fusion0_param0, fusion0_tuple}));
HloComputation* fusion0_computation =
module->AddEmbeddedComputation(fusion0_builder.Build());
HloComputation::Builder fusion1_builder("fusion1");
HloInstruction* fusion1_param = fusion1_builder.AddInstruction(
HloInstruction::CreateParameter(0, nested_tuple_shape, "p"));
HloInstruction* fusion1_element0 = fusion1_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion1_param, 0));
HloInstruction* fusion1_element1 = fusion1_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(tuple_shape, fusion1_param, 1));
HloInstruction* fusion1_element2 = fusion1_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion1_element1, 1));
fusion1_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, fusion1_element0, fusion1_element2));
HloComputation* fusion1_computation =
module->AddEmbeddedComputation(fusion1_builder.Build());
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* fusion0 = builder.AddInstruction(HloInstruction::CreateFusion(
nested_tuple_shape, HloInstruction::FusionKind::kCustom, {p0, p0},
fusion0_computation));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* fusion1 = builder.AddInstruction(
HloInstruction::CreateFusion(shape, HloInstruction::FusionKind::kCustom,
{fusion0}, fusion1_computation));
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
computation, {p0, fusion0, negate0, negate1, negate2, negate3, negate4,
negate5, negate6, fusion1});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(), 5);
EXPECT_THAT(
fusion1,
op::Fusion(op::Tuple(
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::GetTupleElement(op::Fusion(), 0)),
op::Tuple(
op::AsyncCopy(
kAlternateMemorySpace, kDefaultMemorySpace,
op::GetTupleElement(op::GetTupleElement(op::Fusion(), 1), 0)),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::GetTupleElement(
op::GetTupleElement(op::Fusion(), 1), 1))))));
}
TEST_F(MemorySpaceAssignmentTest, TupleToTuple3) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, shape});
auto module = CreateNewVerifiedModule();
HloComputation::Builder fusion0_builder("fusion0");
HloInstruction* fusion0_param0 = fusion0_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* fusion0_param1 = fusion0_builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "p1"));
fusion0_builder.AddInstruction(
HloInstruction::CreateTuple({fusion0_param0, fusion0_param1}));
HloComputation* fusion0_computation =
module->AddEmbeddedComputation(fusion0_builder.Build());
HloComputation::Builder fusion1_builder("fusion1");
HloInstruction* fusion1_param = fusion1_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p"));
HloInstruction* fusion1_element0 = fusion1_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion1_param, 0));
HloInstruction* fusion1_element1 = fusion1_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion1_param, 1));
fusion1_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, fusion1_element0, fusion1_element1));
HloComputation* fusion1_computation =
module->AddEmbeddedComputation(fusion1_builder.Build());
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* fusion0 = builder.AddInstruction(HloInstruction::CreateFusion(
tuple_shape, HloInstruction::FusionKind::kCustom, {p0, p0},
fusion0_computation));
HloInstruction* fusion1 = builder.AddInstruction(
HloInstruction::CreateFusion(shape, HloInstruction::FusionKind::kCustom,
{fusion0}, fusion1_computation));
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, fusion0, fusion1});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
EXPECT_THAT(fusion1, op::Fusion(op::Fusion()));
}
TEST_F(MemorySpaceAssignmentTest, InputOutputAlias) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, shape});
HloInstruction* p = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p"));
HloInstruction* p0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, p, 0));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* p1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, p, 1));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
HloInstruction* negate7 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, add));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({p0, add}));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
computation, {p, p0, negate0, negate1, negate2, negate3, negate4, negate5,
negate6, p1, add, negate7, tuple});
TF_CHECK_OK(module->set_schedule(schedule));
TF_CHECK_OK(module->input_output_alias_config().SetUpAlias({0}, 0, {0}));
TF_CHECK_OK(module->input_output_alias_config().SetUpAlias({1}, 0, {1}));
AssignMemorySpace(module.get());
EXPECT_EQ(p->shape().tuple_shapes(0).layout().memory_space(),
kDefaultMemorySpace);
EXPECT_EQ(p->shape().tuple_shapes(1).layout().memory_space(),
kDefaultMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, CostAnalysis) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, negate2,
negate3, negate4, negate5, negate6, add});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpaceUsingCostAnalysis(module.get());
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0}, {},
1, 0,
kAlternateMemorySpace);
EXPECT_THAT(negate0, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate1, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate2, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate3, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate4, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate5, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate6, op::ShapeWithLayout(shape_in_alternate_mem));
}
TEST_F(MemorySpaceAssignmentTest, MemoryBoundednessBufferIntervalCompare) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* tanh0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p1));
HloInstruction* tanh1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, tanh0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* tanh2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, tanh1));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* tanh3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, tanh2));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* tanh4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, tanh3));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({tanh4, negate4}));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation,
{p0, p1, tanh0, negate0, tanh1, negate1, tanh2, negate2,
tanh3, negate3, tanh4, negate4, tuple});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpaceUsingCostAnalysis(module.get());
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
Shape shape_in_default_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {4, 3},
{1, 0}, {},
1, 0,
kDefaultMemorySpace);
std::vector<HloInstruction*> negate_instructions = {negate0, negate1, negate2,
negate3, negate4};
int64_t num_negates_in_alternate_mem = absl::c_count_if(
negate_instructions, [&](const HloInstruction* instruction) {
return instruction->shape().layout().memory_space() ==
kAlternateMemorySpace;
});
EXPECT_GE(num_negates_in_alternate_mem, 1);
EXPECT_THAT(tanh0, op::ShapeWithLayout(shape_in_default_mem));
EXPECT_THAT(tanh1, op::ShapeWithLayout(shape_in_default_mem));
EXPECT_THAT(tanh2, op::ShapeWithLayout(shape_in_default_mem));
EXPECT_THAT(tanh3, op::ShapeWithLayout(shape_in_default_mem));
EXPECT_THAT(tanh4, op::ShapeWithLayout(shape_in_default_mem));
}
TEST_F(MemorySpaceAssignmentTest,
MemoryBoundednessOverrideSortOrderAssignFirst) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[3,4]{1,0} parameter(0)
p1 = f32[3,4]{1,0} parameter(1)
tanh0 = f32[3,4]{1,0} tanh(p0)
negate0 = f32[3,4]{1,0} negate(p1)
tanh1 = f32[3,4]{1,0} tanh(tanh0)
negate1 = f32[3,4]{1,0} negate(negate0)
tanh2 = f32[3,4]{1,0} tanh(tanh1)
negate2 = f32[3,4]{1,0} negate(negate1)
tanh3 = f32[3,4]{1,0} tanh(tanh2)
negate3 = f32[3,4]{1,0} negate(negate2)
tanh4 = f32[3,4]{1,0} tanh(tanh3)
negate4 = f32[3,4]{1,0} negate(negate3)
ROOT tuple = (f32[3,4]{1,0}, f32[3,4]{1,0}) tuple(tanh4, negate4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
const std::string text_proto = R"pb(
overrides {
hlo_position_matcher { instruction_name_regex: "negate(.*)" }
override_options { assign_first: true }
})pb";
TF_ASSERT_OK_AND_ASSIGN(auto msa_sort_order_overrides,
ParseTextProto<MsaSortOrderOverrides>(text_proto));
AssignMemorySpaceUsingCostAnalysis(
module.get(), std::nullopt,
std::nullopt,
std::nullopt,
msa_sort_order_overrides);
const HloInstruction* p0 = FindInstruction(module.get(), "p0");
EXPECT_EQ(p0->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* p1 = FindInstruction(module.get(), "p1");
EXPECT_EQ(p1->shape().layout().memory_space(), kDefaultMemorySpace);
HloInstruction* negate0 = FindInstruction(module.get(), "negate0");
EXPECT_EQ(negate0->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate1 = FindInstruction(module.get(), "negate1");
EXPECT_EQ(negate1->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate2 = FindInstruction(module.get(), "negate2");
EXPECT_EQ(negate2->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate3 = FindInstruction(module.get(), "negate3");
EXPECT_EQ(negate3->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate4 = FindInstruction(module.get(), "negate4");
EXPECT_EQ(negate4->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh0 = FindInstruction(module.get(), "tanh0");
EXPECT_EQ(tanh0->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh1 = FindInstruction(module.get(), "tanh1");
EXPECT_EQ(tanh1->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh2 = FindInstruction(module.get(), "tanh2");
EXPECT_EQ(tanh2->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh3 = FindInstruction(module.get(), "tanh3");
EXPECT_EQ(tanh3->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh4 = FindInstruction(module.get(), "tanh4");
EXPECT_EQ(tanh4->shape().layout().memory_space(), kDefaultMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest,
MemoryBoundednessOverrideSortOrderAssignLast) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[3,4]{1,0} parameter(0)
p1 = f32[3,4]{1,0} parameter(1)
tanh0 = f32[3,4]{1,0} tanh(p0)
negate0 = f32[3,4]{1,0} negate(p1)
tanh1 = f32[3,4]{1,0} tanh(tanh0)
negate1 = f32[3,4]{1,0} negate(negate0)
tanh2 = f32[3,4]{1,0} tanh(tanh1)
negate2 = f32[3,4]{1,0} negate(negate1)
tanh3 = f32[3,4]{1,0} tanh(tanh2)
negate3 = f32[3,4]{1,0} negate(negate2)
tanh4 = f32[3,4]{1,0} tanh(tanh3)
negate4 = f32[3,4]{1,0} negate(negate3)
ROOT tuple = (f32[3,4]{1,0}, f32[3,4]{1,0}) tuple(tanh4, negate4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
const std::string text_proto = R"pb(
overrides {
hlo_position_matcher { instruction_name_regex: "tanh(.*)" }
override_options { assign_last: true }
}
)pb";
TF_ASSERT_OK_AND_ASSIGN(auto msa_sort_order_overrides,
ParseTextProto<MsaSortOrderOverrides>(text_proto));
AssignMemorySpaceUsingCostAnalysis(
module.get(), std::nullopt,
std::nullopt,
std::nullopt,
msa_sort_order_overrides);
const HloInstruction* p0 = FindInstruction(module.get(), "p0");
EXPECT_EQ(p0->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* p1 = FindInstruction(module.get(), "p1");
EXPECT_EQ(p1->shape().layout().memory_space(), kDefaultMemorySpace);
HloInstruction* negate0 = FindInstruction(module.get(), "negate0");
EXPECT_EQ(negate0->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate1 = FindInstruction(module.get(), "negate1");
EXPECT_EQ(negate1->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate2 = FindInstruction(module.get(), "negate2");
EXPECT_EQ(negate2->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate3 = FindInstruction(module.get(), "negate3");
EXPECT_EQ(negate3->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate4 = FindInstruction(module.get(), "negate4");
EXPECT_EQ(negate4->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh0 = FindInstruction(module.get(), "tanh0");
EXPECT_EQ(tanh0->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh1 = FindInstruction(module.get(), "tanh1");
EXPECT_EQ(tanh1->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh2 = FindInstruction(module.get(), "tanh2");
EXPECT_EQ(tanh2->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh3 = FindInstruction(module.get(), "tanh3");
EXPECT_EQ(tanh3->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh4 = FindInstruction(module.get(), "tanh4");
EXPECT_EQ(tanh4->shape().layout().memory_space(), kDefaultMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest,
MemoryBoundednessOverrideSortOrderBySizeLteAssignFirst) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[3,4]{1,0} parameter(0)
p1 = f32[5,4]{1,0} parameter(1)
tanh0 = f32[3,4]{1,0} tanh(p0)
negate0 = f32[5,4]{1,0} negate(p1)
tanh1 = f32[3,4]{1,0} tanh(tanh0)
negate1 = f32[5,4]{1,0} negate(negate0)
tanh2 = f32[3,4]{1,0} tanh(tanh1)
negate2 = f32[5,4]{1,0} negate(negate1)
tanh3 = f32[3,4]{1,0} tanh(tanh2)
negate3 = f32[5,4]{1,0} negate(negate2)
tanh4 = f32[3,4]{1,0} tanh(tanh3)
negate4 = f32[5,4]{1,0} negate(negate3)
ROOT tuple = (f32[3,4]{1,0}, f32[5,4]{1,0}) tuple(tanh4, negate4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
const std::string text_proto = R"pb(
overrides {
hlo_position_matcher { size_lte: 48 }
override_options { assign_first: true }
}
)pb";
TF_ASSERT_OK_AND_ASSIGN(auto msa_sort_order_overrides,
ParseTextProto<MsaSortOrderOverrides>(text_proto));
Options memory_space_options = DefaultMemorySpaceOptions();
memory_space_options.max_size_in_bytes = 120;
AssignMemorySpaceUsingCostAnalysis(
module.get(), memory_space_options,
std::nullopt,
std::nullopt,
msa_sort_order_overrides);
const HloInstruction* p0 = FindInstruction(module.get(), "p0");
EXPECT_EQ(p0->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* p1 = FindInstruction(module.get(), "p1");
EXPECT_EQ(p1->shape().layout().memory_space(), kDefaultMemorySpace);
HloInstruction* negate0 = FindInstruction(module.get(), "negate0");
EXPECT_EQ(negate0->shape().layout().memory_space(), kDefaultMemorySpace);
HloInstruction* negate1 = FindInstruction(module.get(), "negate1");
EXPECT_EQ(negate1->shape().layout().memory_space(), kDefaultMemorySpace);
HloInstruction* negate2 = FindInstruction(module.get(), "negate2");
EXPECT_EQ(negate2->shape().layout().memory_space(), kDefaultMemorySpace);
HloInstruction* negate3 = FindInstruction(module.get(), "negate3");
EXPECT_EQ(negate3->shape().layout().memory_space(), kDefaultMemorySpace);
HloInstruction* negate4 = FindInstruction(module.get(), "negate4");
EXPECT_EQ(negate4->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh0 = FindInstruction(module.get(), "tanh0");
EXPECT_EQ(tanh0->shape().layout().memory_space(), kAlternateMemorySpace);
const HloInstruction* tanh1 = FindInstruction(module.get(), "tanh1");
EXPECT_EQ(tanh1->shape().layout().memory_space(), kAlternateMemorySpace);
const HloInstruction* tanh2 = FindInstruction(module.get(), "tanh2");
EXPECT_EQ(tanh2->shape().layout().memory_space(), kAlternateMemorySpace);
const HloInstruction* tanh3 = FindInstruction(module.get(), "tanh3");
EXPECT_EQ(tanh3->shape().layout().memory_space(), kAlternateMemorySpace);
const HloInstruction* tanh4 = FindInstruction(module.get(), "tanh4");
EXPECT_EQ(tanh4->shape().layout().memory_space(), kDefaultMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest,
MemoryBoundednessOverrideSortOrderBySizeGteAssignFirst) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[3,4]{1,0} parameter(0)
p1 = f32[5,4]{1,0} parameter(1)
tanh0 = f32[3,4]{1,0} tanh(p0)
negate0 = f32[5,4]{1,0} negate(p1)
tanh1 = f32[3,4]{1,0} tanh(tanh0)
negate1 = f32[5,4]{1,0} negate(negate0)
tanh2 = f32[3,4]{1,0} tanh(tanh1)
negate2 = f32[5,4]{1,0} negate(negate1)
tanh3 = f32[3,4]{1,0} tanh(tanh2)
negate3 = f32[5,4]{1,0} negate(negate2)
tanh4 = f32[3,4]{1,0} tanh(tanh3)
negate4 = f32[5,4]{1,0} negate(negate3)
ROOT tuple = (f32[3,4]{1,0}, f32[5,4]{1,0}) tuple(tanh4, negate4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
const std::string text_proto = R"pb(
overrides {
hlo_position_matcher { size_gte: 80 }
override_options { assign_first: true }
}
)pb";
TF_ASSERT_OK_AND_ASSIGN(auto msa_sort_order_overrides,
ParseTextProto<MsaSortOrderOverrides>(text_proto));
Options memory_space_options = DefaultMemorySpaceOptions();
memory_space_options.max_size_in_bytes = 160;
AssignMemorySpaceUsingCostAnalysis(
module.get(), memory_space_options,
std::nullopt,
std::nullopt,
msa_sort_order_overrides);
const HloInstruction* p0 = FindInstruction(module.get(), "p0");
EXPECT_EQ(p0->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* p1 = FindInstruction(module.get(), "p1");
EXPECT_EQ(p1->shape().layout().memory_space(), kDefaultMemorySpace);
HloInstruction* negate0 = FindInstruction(module.get(), "negate0");
EXPECT_EQ(negate0->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate1 = FindInstruction(module.get(), "negate1");
EXPECT_EQ(negate1->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate2 = FindInstruction(module.get(), "negate2");
EXPECT_EQ(negate2->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate3 = FindInstruction(module.get(), "negate3");
EXPECT_EQ(negate3->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate4 = FindInstruction(module.get(), "negate4");
EXPECT_EQ(negate4->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh0 = FindInstruction(module.get(), "tanh0");
EXPECT_EQ(tanh0->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh1 = FindInstruction(module.get(), "tanh1");
EXPECT_EQ(tanh1->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh2 = FindInstruction(module.get(), "tanh2");
EXPECT_EQ(tanh2->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh3 = FindInstruction(module.get(), "tanh3");
EXPECT_EQ(tanh3->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh4 = FindInstruction(module.get(), "tanh4");
EXPECT_EQ(tanh4->shape().layout().memory_space(), kDefaultMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, SimpleWhileTupleTest) {
Shape s32 = ShapeUtil::MakeShape(xla::S32, {});
Shape f32v1 = ShapeUtil::MakeShape(F32, {1});
Shape t_s32_f32v1 = ShapeUtil::MakeTupleShape({s32, f32v1});
auto module = CreateNewVerifiedModule("SimpleWhile");
HloSchedule schedule(module.get());
HloComputation* cond_computation;
{
auto builder = HloComputation::Builder("WhileCond");
auto const4 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(4)));
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, t_s32_f32v1, "x"));
auto index = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(const4->shape(), param, 0));
auto compare = builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), index,
const4, ComparisonDirection::kLt));
cond_computation = module->AddEmbeddedComputation(builder.Build());
schedule.set_sequence(cond_computation, {const4, param, index, compare});
}
HloComputation* body_computation;
{
auto builder = HloComputation::Builder("WhileBody");
auto const1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(1)));
auto constv = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({1.1f})));
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, t_s32_f32v1, "x"));
auto indexc = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(const1->shape(), param, 0));
auto addc = builder.AddInstruction(HloInstruction::CreateBinary(
indexc->shape(), HloOpcode::kAdd, indexc, const1));
auto indexv = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(constv->shape(), param, 1));
auto addv = builder.AddInstruction(HloInstruction::CreateBinary(
constv->shape(), HloOpcode::kAdd, indexv, constv));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({addc, addv}));
body_computation = module->AddEmbeddedComputation(builder.Build());
schedule.set_sequence(body_computation, {const1, constv, param, indexc,
addc, indexv, addv, tuple});
}
auto builder = HloComputation::Builder("SimpleWhile");
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, t_s32_f32v1, "param"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(s32, param, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32v1, param, 1));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({gte0, gte1}));
auto while0 = builder.AddInstruction(HloInstruction::CreateWhile(
t_s32_f32v1, cond_computation, body_computation, tuple));
HloComputation* computation = module->AddEntryComputation(builder.Build());
schedule.set_sequence(computation, {param, gte0, gte1, tuple, while0});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(),
50);
Shape shape_in_default_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {4, 6},
{1, 0}, {},
1, 0,
kDefaultMemorySpace);
Shape s32_in_default_mem = ShapeUtil::MakeShapeWithDenseLayout(
xla::S32, {},
{}, {},
1, 0,
kDefaultMemorySpace);
Shape f32v1_in_default_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {1},
{0}, {},
1, 0,
kDefaultMemorySpace);
Shape t_s32_f32v1_in_default_mem =
ShapeUtil::MakeTupleShape({s32_in_default_mem, f32v1_in_default_mem});
EXPECT_THAT(param, op::ShapeWithLayout(t_s32_f32v1_in_default_mem));
EXPECT_THAT(while0, op::ShapeWithLayout(t_s32_f32v1_in_default_mem));
}
TEST_F(MemorySpaceAssignmentTest, EvictionsShouldntBeDelayed) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* tanh0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* tanh_redundant0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* tanh_redundant1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* tanh_redundant2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* tanh_redundant3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* tanh_redundant4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* tanh_redundant5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* tanh_redundant6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, tanh0));
HloInstruction* tanh1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, negate0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* tanh2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, tanh1));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* tanh3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, tanh2));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* tuple = builder.AddInstruction(
HloInstruction::CreateTuple({tanh3, negate3, tanh0}));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
computation,
{p0, tanh0, tanh_redundant0, tanh_redundant1, tanh_redundant2,
tanh_redundant3, tanh_redundant4, tanh_redundant5, tanh_redundant6,
negate0, tanh1, negate1, tanh2, negate2, tanh3, negate3, tuple});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpaceUsingCostAnalysis(module.get());
TF_ASSERT_OK_AND_ASSIGN(auto alias_analysis,
HloAliasAnalysis::Run(module.get()));
TF_ASSERT_OK_AND_ASSIGN(auto hlo_live_range,
HloLiveRange::Run(module->schedule(), *alias_analysis,
module->entry_computation()));
std::vector<int> num_live_buffers_in_alternate_mem(
hlo_live_range->flattened_instruction_sequence().size() + 1, 0);
for (const HloValue* value : alias_analysis->dataflow_analysis().values()) {
const Shape& shape = value->shape();
if (!shape.has_layout() ||
shape.layout().memory_space() == kDefaultMemorySpace) {
continue;
}
HloLiveRange::TimeBound time_bound =
hlo_live_range->buffer_live_ranges().at(value);
for (int i = time_bound.start; i <= time_bound.end; ++i) {
++num_live_buffers_in_alternate_mem[i];
}
}
for (int i = 0; i < num_live_buffers_in_alternate_mem.size(); ++i) {
EXPECT_LE(num_live_buffers_in_alternate_mem[i], 2);
}
}
TEST_F(MemorySpaceAssignmentTest,
InputOutputsInAlternateMemShouldntBeAssigned) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0}, {},
1, 0,
kAlternateMemorySpace);
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape_in_alternate_mem, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add = builder.AddInstruction(HloInstruction::CreateBinary(
shape_in_alternate_mem, HloOpcode::kAdd, negate6, p1));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({add, negate5}));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation,
{p0, p1, negate0, negate1, negate2, negate3, negate4,
negate5, negate6, add, tuple});
TF_CHECK_OK(module->set_schedule(schedule));
Options options = DefaultMemorySpaceOptions();
options.is_allowed_in_alternate_mem_fn = [](const HloValue& value) {
return true;
};
std::unique_ptr<PresetAssignments> preset_assignments =
AssignMemorySpace(module.get(), options);
EXPECT_THAT(p1, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(add, op::Add(op::Negate(), op::Parameter(1)));
EXPECT_THAT(add, op::ShapeWithLayout(shape_in_alternate_mem));
for (const auto& position_and_chunk : preset_assignments->chunks()) {
const HloPosition& position = position_and_chunk.first;
EXPECT_NE(position.instruction, p1);
EXPECT_NE(position.instruction, add);
}
}
TEST_F(MemorySpaceAssignmentTest, PendingChunkMemoryCorruptionBug) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
ENTRY %Entry {
%param0 = f32[8,3] parameter(0)
%param1 = f32[2,4] parameter(1)
%a = f32[8,3] sine(%param0)
%b = f32[2,4] cosine(%param1)
%d = f32[8,3] tanh(%a)
%c = f32[8,3] negate(%a)
%e = f32[2,4] negate(%b)
%f = f32[2,4] negate(%e)
%g = f32[2,4] negate(%f)
%h = f32[2,4] negate(%g)
%i = f32[2,4] negate(%h)
%j = f32[2,4] negate(%i)
%k = f32[2,4] negate(%j)
%l = f32[2,4] negate(%k)
%m = f32[8,3] negate(%d)
%n = f32[2,4] sine(%l)
%o = f32[8,3] negate(%d)
%p = f32[2,4] negate(%n)
%q = f32[8,3] negate(%m)
ROOT %tuple = (f32[2,4], f32[8,3], f32[8,3]) tuple(%p, %q, %o)
}
)";
MsaBufferIntervalCompare buffer_interval_compare =
[](const MsaBufferInterval& a, const MsaBufferInterval& b) {
auto get_opcode_priority = [](const HloOpcode& opcode) {
switch (opcode) {
case HloOpcode::kSin:
return 0;
case HloOpcode::kCos:
return 1;
case HloOpcode::kTanh:
return 2;
default:
return 3;
}
};
return get_opcode_priority(a.buffer->defining_instruction()->opcode()) <
get_opcode_priority(b.buffer->defining_instruction()->opcode());
};
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(2, 10);
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(),
buffer_interval_compare, &prefetch_interval_picker);
}
TEST_F(MemorySpaceAssignmentTest, WhileAliasedArgumentRequiredAssignmentBug) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
while_condition {
param1 = (f32[2,4], f32[2,4], f32[2,4]) parameter(0)
ROOT cond = pred[] constant(true)
}
while_body {
param2 = (f32[2,4], f32[2,4], f32[2,4]) parameter(0)
gte2 = f32[2,4] get-tuple-element(param2), index=0
gte3 = f32[2,4] get-tuple-element(param2), index=1
gte4 = f32[2,4] get-tuple-element(param2), index=2
add = f32[2,4] add(gte2, gte3)
ROOT tuple2 = (f32[2,4], f32[2,4], f32[2,4]) tuple(add, gte3, gte4)
}
ENTRY Entry {
param0 = f32[2,4] parameter(0)
a = f32[2,4] negate(param0)
b = f32[2,4] negate(param0)
tuple = (f32[2,4], f32[2,4], f32[2,4]) tuple(a, b, b)
while = (f32[2,4], f32[2,4], f32[2,4]) while(tuple), condition=while_condition, body=while_body
gte1 = f32[2,4] get-tuple-element(while), index=0
gte2 = f32[2,4] get-tuple-element(while), index=1
ROOT root = f32[2,4] add(gte1, gte2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, DisallowedUseBug) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
ENTRY Entry {
param0 = f32[8,3] parameter(0)
param1 = f32[2,4] parameter(1)
a = f32[8,3] cosine(param0)
b = f32[2,4] negate(param1)
d = f32[8,3] negate(a)
c = f32[2,4] negate(b)
e = f32[2,4] negate(c)
f = f32[8,3] tanh(a)
g = f32[2,4] negate(e)
h = f32[2,4] negate(g)
i = f32[2,4] negate(h)
j = f32[2,4] negate(i)
k = f32[2,4] negate(j)
l = f32[2,4] negate(k)
m = f32[2,4] negate(l)
n = f32[2,4] sine(m)
o = f32[8,3] negate(a)
p = f32[2,4] negate(n)
q = f32[8,3] add(o, f)
r = f32[8,3] add(q, d)
ROOT tuple = (f32[2,4], f32[8,3]) tuple(p, r)
}
)";
MsaBufferIntervalCompare buffer_interval_compare =
[](const MsaBufferInterval& a, const MsaBufferInterval& b) {
auto get_opcode_priority = [](const HloOpcode& opcode) {
switch (opcode) {
case HloOpcode::kSin:
return 0;
case HloOpcode::kCos:
return 1;
case HloOpcode::kTanh:
return 2;
default:
return 3;
}
};
return get_opcode_priority(a.buffer->defining_instruction()->opcode()) <
get_opcode_priority(b.buffer->defining_instruction()->opcode());
};
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(2, 10);
Options options = DefaultMemorySpaceOptions();
options.is_use_allowed_in_alternate_mem_fn = [](const HloUse& use) {
return use.instruction->opcode() != HloOpcode::kTanh;
};
AssignMemorySpace(module.get(), options, buffer_interval_compare,
&prefetch_interval_picker);
}
TEST_F(MemorySpaceAssignmentTest, DisallowedUseBugInWhile) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
p0 = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=3
}
while_body {
p0 = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
gte2 = f32[3]{0} get-tuple-element(p0), index=2
gte3 = pred[] get-tuple-element(p0), index=3
add = f32[3]{0} add(gte0, gte0)
negate0 = f32[3]{0} negate(add)
negate1 = f32[3]{0} negate(negate0)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
negate15 = f32[3]{0} negate(gte2)
tanh = f32[3]{0} tanh(gte2)
ROOT tuple = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) tuple(negate14, tanh, gte2, gte3)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy0 = f32[3]{0} copy(p0)
copy1 = f32[3]{0} copy(p0)
tuple = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) tuple(copy0, copy0, copy1, p1)
while = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
ROOT gte = f32[3]{0} get-tuple-element(while), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.is_use_allowed_in_alternate_mem_fn = [](const HloUse& use) {
return use.instruction->opcode() != HloOpcode::kTanh;
};
AssignMemorySpace(module.get(), options);
}
TEST_F(MemorySpaceAssignmentTest, AvoidRedundantEvictionInWhile) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
tanh = f32[3]{0} tanh(gte1)
gte2 = pred[] get-tuple-element(p0), index=2
negate0 = f32[3]{0} negate(gte0)
negate1 = f32[3]{0} negate(negate0)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
add = f32[3]{0} add(negate14, tanh)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(add, gte1, gte2)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(copy, p0, p1)
while = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
gte = f32[3]{0} get-tuple-element(while), index=1
ROOT negate = f32[3]{0} negate(gte)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
const HloInstruction* while_instr = FindInstruction(module.get(), "while");
EXPECT_EQ(while_instr->shape().tuple_shapes(1).layout().memory_space(),
kAlternateMemorySpace);
const HloInstruction* gte1 = FindInstruction(module.get(), "gte1");
EXPECT_EQ(gte1->user_count(), 1);
EXPECT_EQ(gte1->users()[0]->opcode(), HloOpcode::kTanh);
const HloInstruction* while_root =
while_instr->while_body()->root_instruction();
EXPECT_THAT(while_root->operand(1),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::GetTupleElement(op::Parameter(0))));
}
TEST_F(MemorySpaceAssignmentTest,
RedundantEvictionEliminationShouldntAddRedundantParam) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
tanh = f32[3]{0} tanh(gte1)
gte2 = pred[] get-tuple-element(p0), index=2
negate0 = f32[3]{0} negate(gte0)
negate1 = f32[3]{0} negate(negate0)
add = f32[3]{0} add(negate1, tanh)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(add, gte1, gte2)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(copy, p0, p1)
while = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
gte = f32[3]{0} get-tuple-element(while), index=1
ROOT negate = f32[3]{0} negate(gte)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
const HloInstruction* while_instr = FindInstruction(module.get(), "while");
EXPECT_EQ(while_instr->shape().tuple_shapes_size(), 3);
}
TEST_F(MemorySpaceAssignmentTest, AvoidRedundantEvictionInNestedWhile) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond2 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body2 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
tanh = f32[3]{0} tanh(gte1)
gte2 = pred[] get-tuple-element(p0), index=2
negate0 = f32[3]{0} negate(gte0)
negate1 = f32[3]{0} negate(negate0)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
add = f32[3]{0} add(negate14, tanh)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(add, gte1, gte2)
}
while_cond1 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body1 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT while2 = (f32[3]{0}, f32[3]{0}, pred[]) while(p0), condition=while_cond2, body=while_body2
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(copy, p0, p1)
while1 = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple), condition=while_cond1, body=while_body1
gte = f32[3]{0} get-tuple-element(while1), index=1
ROOT negate = f32[3]{0} negate(gte)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
const HloInstruction* while1_instr = FindInstruction(module.get(), "while1");
EXPECT_EQ(while1_instr->shape().tuple_shapes(1).layout().memory_space(),
kAlternateMemorySpace);
const HloInstruction* while2_instr = FindInstruction(module.get(), "while2");
EXPECT_EQ(while2_instr->shape().tuple_shapes(1).layout().memory_space(),
kAlternateMemorySpace);
const HloInstruction* gte1 = FindInstruction(module.get(), "gte1");
EXPECT_EQ(gte1->user_count(), 1);
EXPECT_EQ(gte1->users()[0]->opcode(), HloOpcode::kTanh);
const HloInstruction* while_root =
while2_instr->while_body()->root_instruction();
EXPECT_THAT(while_root->operand(1),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::GetTupleElement(op::Parameter(0))));
}
TEST_F(MemorySpaceAssignmentTest, RedundantEvictionEliminationBug) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
tanh = f32[3]{0} tanh(gte1)
gte2 = pred[] get-tuple-element(p0), index=2
negate0 = f32[3]{0} negate(gte0)
negate1 = f32[3]{0} negate(negate0)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
add0 = f32[3]{0} add(negate14, tanh)
add1 = f32[3]{0} add(add0, gte1)
negate = f32[3]{0} negate(add1)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(add1, negate, gte2)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(copy, p0, p1)
while = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
gte = f32[3]{0} get-tuple-element(while), index=1
ROOT negate = f32[3]{0} negate(gte)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
const HloInstruction* while_instr = FindInstruction(module.get(), "while");
EXPECT_EQ(while_instr->shape().tuple_shapes_size(), 3);
EXPECT_EQ(while_instr->shape().tuple_shapes(1).layout().memory_space(),
kAlternateMemorySpace);
const HloInstruction* gte1 = FindInstruction(module.get(), "gte1");
EXPECT_EQ(gte1->user_count(), 2);
EXPECT_NE(
absl::c_find_if(gte1->users(), HloPredicateIsOp<HloOpcode::kCopyStart>),
gte1->users().end());
}
TEST_F(MemorySpaceAssignmentTest, RedundantEvictionEliminationInChainedWhile) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond1 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body1 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
tanh = f32[3]{0} tanh(gte1)
gte2 = pred[] get-tuple-element(p0), index=2
negate0 = f32[3]{0} negate(gte0)
negate1 = f32[3]{0} negate(negate0)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
add = f32[3]{0} add(negate14, tanh)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(add, gte1, gte2)
}
while_cond2 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body2 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
tanh = f32[3]{0} tanh(gte1)
gte2 = pred[] get-tuple-element(p0), index=2
negate0 = f32[3]{0} negate(gte0)
add = f32[3]{0} add(negate0, tanh)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(add, gte1, gte2)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(copy, p0, p1)
while1 = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple), condition=while_cond1, body=while_body1
while2 = (f32[3]{0}, f32[3]{0}, pred[]) while(while1), condition=while_cond2, body=while_body2
gte = f32[3]{0} get-tuple-element(while2), index=1
ROOT negate = f32[3]{0} negate(gte)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
EXPECT_EQ(
FindInstruction(module.get(), "while1")->shape().tuple_shapes_size(),
FindInstruction(module.get(), "while2")->shape().tuple_shapes_size() + 1);
}
TEST_F(MemorySpaceAssignmentTest, AvoidRedundantEvictionAfterWhile) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
gte2 = pred[] get-tuple-element(p0), index=2
add = f32[3]{0} add(gte0, gte1)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(gte0, add, gte2)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
negate0 = f32[3]{0} negate(p0)
negate1 = f32[3]{0} negate(negate0)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(copy, negate14, p1)
while = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
gte0 = f32[3]{0} get-tuple-element(while), index=0
gte1 = f32[3]{0} get-tuple-element(while), index=1
negate20 = f32[3]{0} negate(gte1)
negate21 = f32[3]{0} negate(negate20)
negate22 = f32[3]{0} negate(negate21)
negate23 = f32[3]{0} negate(negate22)
negate24 = f32[3]{0} negate(negate23)
negate25 = f32[3]{0} negate(negate24)
negate26 = f32[3]{0} negate(negate25)
negate27 = f32[3]{0} negate(negate26)
negate28 = f32[3]{0} negate(negate27)
negate29 = f32[3]{0} negate(negate28)
negate30 = f32[3]{0} negate(negate29)
negate31 = f32[3]{0} negate(negate30)
negate32 = f32[3]{0} negate(negate31)
negate33 = f32[3]{0} negate(negate32)
negate34 = f32[3]{0} negate(negate33)
ROOT add = f32[3]{0} add(negate34, gte0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
EXPECT_THAT(
module->entry_computation()->root_instruction()->operand(1),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace, op::Copy()));
}
TEST_F(MemorySpaceAssignmentTest, AvoidRedundantEvictionAfterWhile2) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond1 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body1 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
gte2 = pred[] get-tuple-element(p0), index=2
add = f32[3]{0} add(gte0, gte1)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(gte0, add, gte2)
}
while_cond2 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body2 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
gte2 = pred[] get-tuple-element(p0), index=2
add = f32[3]{0} add(gte0, gte1)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(gte0, add, gte2)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
tuple1 = (f32[3]{0}, f32[3]{0}, pred[]) tuple(copy, p0, p1)
while1 = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple1), condition=while_cond1, body=while_body1
gte0 = f32[3]{0} get-tuple-element(while1), index=0
gte1 = f32[3]{0} get-tuple-element(while1), index=1
negate0 = f32[3]{0} negate(gte1)
negate1 = f32[3]{0} negate(negate0)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
tuple2 = (f32[3]{0}, f32[3]{0}, pred[]) tuple(gte0, negate14, p1)
while2 = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple2), condition=while_cond2, body=while_body2
gte2 = f32[3]{0} get-tuple-element(while2), index=0
gte3 = f32[3]{0} get-tuple-element(while2), index=1
negate20 = f32[3]{0} negate(gte3)
negate21 = f32[3]{0} negate(negate20)
negate22 = f32[3]{0} negate(negate21)
negate23 = f32[3]{0} negate(negate22)
negate24 = f32[3]{0} negate(negate23)
negate25 = f32[3]{0} negate(negate24)
negate26 = f32[3]{0} negate(negate25)
negate27 = f32[3]{0} negate(negate26)
negate28 = f32[3]{0} negate(negate27)
negate29 = f32[3]{0} negate(negate28)
negate30 = f32[3]{0} negate(negate29)
negate31 = f32[3]{0} negate(negate30)
negate32 = f32[3]{0} negate(negate31)
negate33 = f32[3]{0} negate(negate32)
negate34 = f32[3]{0} negate(negate33)
ROOT add = f32[3]{0} add(negate34, gte2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
EXPECT_THAT(
module->entry_computation()->root_instruction()->operand(1),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::AsyncCopy(kDefaultMemorySpace, kAlternateMemorySpace,
op::GetTupleElement(op::While()))));
}
TEST_F(MemorySpaceAssignmentTest,
AfterWhileRedundantEarlierEvictionModifiedBuffer) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
gte2 = pred[] get-tuple-element(p0), index=2
add = f32[3]{0} add(gte0, gte1)
negate = f32[3]{0} negate(gte0)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(negate, add, gte2)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
negate0 = f32[3]{0} negate(p0)
negate1 = f32[3]{0} negate(negate0)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(copy, negate14, p1)
while = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
gte0 = f32[3]{0} get-tuple-element(while), index=0
gte1 = f32[3]{0} get-tuple-element(while), index=1
negate20 = f32[3]{0} negate(gte1)
negate21 = f32[3]{0} negate(negate20)
negate22 = f32[3]{0} negate(negate21)
negate23 = f32[3]{0} negate(negate22)
negate24 = f32[3]{0} negate(negate23)
negate25 = f32[3]{0} negate(negate24)
negate26 = f32[3]{0} negate(negate25)
negate27 = f32[3]{0} negate(negate26)
negate28 = f32[3]{0} negate(negate27)
negate29 = f32[3]{0} negate(negate28)
negate30 = f32[3]{0} negate(negate29)
negate31 = f32[3]{0} negate(negate30)
negate32 = f32[3]{0} negate(negate31)
negate33 = f32[3]{0} negate(negate32)
negate34 = f32[3]{0} negate(negate33)
ROOT add = f32[3]{0} add(negate34, gte0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
EXPECT_THAT(
module->entry_computation()->root_instruction()->operand(1),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::AsyncCopy(kDefaultMemorySpace, kAlternateMemorySpace,
op::GetTupleElement(op::While()))));
}
TEST_F(MemorySpaceAssignmentTest,
WhileRedundantEvictionWithInefficientAllocationBug) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
tanh = f32[3]{0} tanh(gte1)
gte2 = pred[] get-tuple-element(p0), index=2
negate0 = f32[3]{0} negate(gte0)
negate1 = f32[3]{0} negate(negate0)
add = f32[3]{0} add(negate1, tanh)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(add, gte1, gte2)
}
while_cond1 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body1 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte2 = pred[] get-tuple-element(p0), index=2
negate0 = f32[3]{0} negate(gte0)
negate1 = f32[3]{0} negate(negate0)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
gte1 = f32[3]{0} get-tuple-element(p0), index=1
tanh = f32[3]{0} tanh(gte1)
add = f32[3]{0} add(negate14, tanh)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(add, gte1, gte2)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
p2 = f32[3]{0} parameter(2)
copy = f32[3]{0} copy(p0)
tuple1 = (f32[3]{0}, f32[3]{0}, pred[]) tuple(copy, p0, p1)
while1 = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple1), condition=while_cond, body=while_body
gte0 = f32[3]{0} get-tuple-element(while1), index=0
gte1 = f32[3]{0} get-tuple-element(while1), index=1
negate0_entry = f32[3]{0} negate(gte1)
gte2 = pred[] get-tuple-element(while1), index=2
tuple2 = (f32[3]{0}, f32[3]{0}, pred[]) tuple(gte0, gte1, gte2)
while2 = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple2), condition=while_cond1, body=while_body1
negate1 = f32[3]{0} negate(negate0_entry)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
gte = f32[3]{0} get-tuple-element(while2), index=1
ROOT add = f32[3]{0} add(gte, negate14)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
bool marked_inefficient = false;
options.get_inefficient_allocation_sites_fn =
[&](absl::Span<HloPosition> defining_positions)
-> std::vector<std::variant<HloPosition, HloUse>> {
if (absl::c_find(defining_positions,
HloPosition{FindInstruction(module.get(), "while1"),
{1}}) != defining_positions.end() &&
!marked_inefficient) {
LOG(INFO) << "Marking the use inefficient.";
marked_inefficient = true;
return {HloUse{FindInstruction(module.get(), "negate0_entry"), 0}};
}
return {};
};
AssignMemorySpace(module.get(), options);
}
TEST_F(MemorySpaceAssignmentTest, DisablePrefetch) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = f32[3]{0} parameter(1)
negate1 = f32[3]{0} negate(p1)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
ROOT add = f32[3]{0} add(negate9, p0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.max_outstanding_prefetches = 0;
AssignMemorySpace(module.get(), options);
EXPECT_THAT(module->entry_computation()->root_instruction()->operand(1),
op::Parameter());
}
TEST_F(MemorySpaceAssignmentTest, BitcastRoot) {
absl::string_view hlo_string = R"(
HloModule primitive_computation_gather.4, is_scheduled=true
%while_body {
%param.1 = (s32[], f32[3,3,3]) parameter(0)
%get-tuple-element.32 = s32[] get-tuple-element(%param.1), index=0
%copy.6 = s32[] copy(s32[] %get-tuple-element.32)
%constant.8 = s32[] constant(1)
%add = s32[] add(s32[] %copy.6, s32[] %constant.8)
%get-tuple-element.35 = f32[3,3,3] get-tuple-element(%param.1), index=1
negate = f32[3,3,3] negate(get-tuple-element.35)
ROOT %tuple.10 = (s32[], f32[3,3,3]) tuple(s32[] %add, f32[3,3,3] negate)
}
%while_cond {
%param.0 = (s32[], f32[3,3,3]) parameter(0)
%get-tuple-element = s32[] get-tuple-element(%param.0), index=0
%constant.3 = s32[] constant(3)
ROOT %compare = pred[] compare(s32[] %get-tuple-element, s32[] %constant.3), direction=LT
}
ENTRY %primitive_computation_gather.4 (parameter.1: f32[3,10,5], parameter.2: s32[3,1]) -> f32[3,3,3] {
%constant.1 = s32[] constant(0)
%copy.11 = s32[] copy(s32[] %constant.1)
%constant = f32[] constant(0)
%broadcast = f32[3,3,3] broadcast(f32[] %constant), dimensions={}
%tuple.8 = (s32[], f32[3,3,3]) tuple(s32[] %copy.11, f32[3,3,3] %broadcast)
%while = (s32[], f32[3,3,3]) while(%tuple.8), condition=%while_cond, body=%while_body
%get-tuple-element.7 = f32[3,3,3] get-tuple-element(%while), index=1
ROOT %bitcast.1 = f32[3,3,3] bitcast(f32[3,3,3] %get-tuple-element.7)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_TRUE(!root->shape().has_layout() ||
root->shape().layout().memory_space() == kDefaultMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, PrecoloredBuffer) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
ENTRY Entry {
param0 = f32[8,3] parameter(0)
param1 = f32[2,4] parameter(1)
a = f32[8,3]{1,0:S(1)} cosine(param0)
b = f32[2,4] negate(param1)
d = f32[8,3] negate(a)
c = f32[2,4] negate(b)
e = f32[2,4] negate(c)
f = f32[8,3] negate(d)
g = f32[2,4] negate(e)
h = f32[2,4] negate(g)
i = f32[2,4] negate(h)
j = f32[2,4] negate(i)
k = f32[2,4] negate(j)
l = f32[2,4] negate(k)
m = f32[2,4] negate(l)
n = f32[2,4] negate(m)
o = f32[8,3] negate(f)
p = f32[2,4] negate(n)
q = f32[8,3] add(f, o)
r = f32[8,3] add(q, a)
ROOT tuple = (f32[2,4], f32[8,3]) tuple(p, r)
}
)";
MsaBufferIntervalCompare buffer_interval_compare =
[](const MsaBufferInterval& a, const MsaBufferInterval& b) {
auto get_opcode_priority = [](const HloOpcode& opcode) {
switch (opcode) {
case HloOpcode::kNegate:
return 0;
case HloOpcode::kAdd:
return 1;
case HloOpcode::kCos:
return 2;
default:
return 3;
}
};
return get_opcode_priority(a.buffer->defining_instruction()->opcode()) <
get_opcode_priority(b.buffer->defining_instruction()->opcode());
};
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(2, 10);
Options options = DefaultMemorySpaceOptions();
std::unique_ptr<PresetAssignments> preset_assignments =
AssignMemorySpace(module.get(), options, buffer_interval_compare,
&prefetch_interval_picker);
const HloInstruction* r = FindInstruction(module.get(), "r");
const HloInstruction* d = FindInstruction(module.get(), "d");
const HloInstruction* a = FindInstruction(module.get(), "a");
EXPECT_EQ(r->operand(1), a);
EXPECT_EQ(d->operand(0), a);
EXPECT_EQ(a->shape().layout().memory_space(), kAlternateMemorySpace);
auto a_entry = std::find_if(
preset_assignments->chunks().begin(), preset_assignments->chunks().end(),
[&](std::pair<HloPosition, HeapSimulator::Chunk> position_and_chunk) {
return position_and_chunk.first.instruction == a;
});
EXPECT_NE(a_entry, preset_assignments->chunks().end());
}
TEST_F(MemorySpaceAssignmentTest, PrecoloredBufferOOM) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
ENTRY Entry {
param0 = f32[8,3] parameter(0)
param1 = f32[2,4] parameter(1)
a = f32[8,3]{1,0:S(1)} cosine(param0)
b = f32[2,4] negate(param1)
d = f32[8,3] negate(a)
c = f32[2,4] negate(b)
e = f32[2,4] negate(c)
f = f32[8,3] negate(d)
g = f32[2,4] negate(e)
h = f32[2,4] negate(g)
i = f32[2,4] negate(h)
j = f32[2,4] negate(i)
k = f32[2,4] negate(j)
l = f32[2,4] negate(k)
m = f32[2,4] negate(l)
n = f32[2,4] negate(m)
o = f32[8,3]{1,0:S(1)} negate(f)
p = f32[2,4] negate(n)
q = f32[8,3] add(f, o)
r = f32[8,3] add(q, a)
ROOT tuple = (f32[2,4], f32[8,3]) tuple(p, r)
}
)";
MsaBufferIntervalCompare buffer_interval_compare =
[](const MsaBufferInterval& a, const MsaBufferInterval& b) {
auto get_opcode_priority = [](const HloOpcode& opcode) {
switch (opcode) {
case HloOpcode::kNegate:
return 0;
case HloOpcode::kAdd:
return 1;
case HloOpcode::kCos:
return 2;
default:
return 3;
}
};
return get_opcode_priority(a.buffer->defining_instruction()->opcode()) <
get_opcode_priority(b.buffer->defining_instruction()->opcode());
};
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(2, 10);
Options options = DefaultMemorySpaceOptions();
auto status_or = AssignMemorySpaceAndReturnStatus(module.get(), options,
buffer_interval_compare,
&prefetch_interval_picker);
EXPECT_THAT(
status_or.status(),
tsl::testing::StatusIs(
tsl::error::FAILED_PRECONDITION,
::testing::HasSubstr("requires allocation in the alternate memory, "
"which could not be satisfied")));
}
TEST_F(MemorySpaceAssignmentTest, AsyncOpShortLiveRange) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = bf16[4]{0} parameter(0)
negate0 = bf16[4]{0} negate(param)
collective-permute-start = (bf16[4]{0}, bf16[4]{0}, u32[], u32[]) collective-permute-start(negate0), source_target_pairs={{0,1},{1,2},{2,3}}
negate1 = bf16[4]{0} negate(param)
negate2 = bf16[4]{0} negate(negate1)
negate3 = bf16[4]{0} negate(negate2)
collective-permute-done = bf16[4]{0} collective-permute-done(collective-permute-start)
ROOT add = add(collective-permute-done, negate3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
HloInstruction* collective_permute_start =
module->entry_computation()->GetInstructionWithName(
"collective-permute-start");
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(0)
.layout()
.memory_space() == kAlternateMemorySpace);
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(1)
.layout()
.memory_space() == kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, AsyncOpShortLiveRangeInputBufferConsumer) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = bf16[4]{0} parameter(0)
negate0 = bf16[4]{0} negate(param)
collective-permute-start = (bf16[4]{0}, bf16[4]{0}, u32[], u32[]) collective-permute-start(negate0), source_target_pairs={{0,1},{1,2},{2,3}}
negate1 = bf16[4]{0} negate(negate0)
negate2 = bf16[4]{0} negate(negate1)
negate3 = bf16[4]{0} negate(negate2)
collective-permute-done = bf16[4]{0} collective-permute-done(collective-permute-start)
ROOT add = add(collective-permute-done, negate3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
HloInstruction* collective_permute_start =
module->entry_computation()->GetInstructionWithName(
"collective-permute-start");
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(0)
.layout()
.memory_space() == kDefaultMemorySpace);
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(1)
.layout()
.memory_space() == kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, AsyncOpLongLiveRange) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = bf16[4]{0} parameter(0)
negate0 = bf16[4]{0} negate(param)
collective-permute-start = (bf16[4]{0}, bf16[4]{0}, u32[], u32[]) collective-permute-start(negate0), source_target_pairs={{0,1},{1,2},{2,3}}
negate1 = bf16[4]{0} negate(param)
negate2 = bf16[4]{0} negate(negate1)
negate3 = bf16[4]{0} negate(negate2)
negate4 = bf16[4]{0} negate(negate3)
negate5 = bf16[4]{0} negate(negate4)
negate6 = bf16[4]{0} negate(negate5)
negate7 = bf16[4]{0} negate(negate6)
negate8 = bf16[4]{0} negate(negate7)
negate9 = bf16[4]{0} negate(negate8)
negate10 = bf16[4]{0} negate(negate9)
negate11 = bf16[4]{0} negate(negate10)
negate12 = bf16[4]{0} negate(negate11)
negate13 = bf16[4]{0} negate(negate12)
collective-permute-done = bf16[4]{0} collective-permute-done(collective-permute-start)
ROOT add = add(collective-permute-done, negate13)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
HloInstruction* collective_permute_start =
module->entry_computation()->GetInstructionWithName(
"collective-permute-start");
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(0)
.layout()
.memory_space() == kDefaultMemorySpace);
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(1)
.layout()
.memory_space() == kDefaultMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, AsyncOpLongLiveRangeInputBufferConsumer) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = bf16[4]{0} parameter(0)
negate0 = bf16[4]{0} negate(param)
collective-permute-start = (bf16[4]{0}, bf16[4]{0}, u32[], u32[]) collective-permute-start(negate0), source_target_pairs={{0,1},{1,2},{2,3}}
negate1 = bf16[4]{0} negate(negate0)
negate2 = bf16[4]{0} negate(negate1)
negate3 = bf16[4]{0} negate(negate2)
negate4 = bf16[4]{0} negate(negate3)
negate5 = bf16[4]{0} negate(negate4)
negate6 = bf16[4]{0} negate(negate5)
negate7 = bf16[4]{0} negate(negate6)
negate8 = bf16[4]{0} negate(negate7)
negate9 = bf16[4]{0} negate(negate8)
negate10 = bf16[4]{0} negate(negate9)
negate11 = bf16[4]{0} negate(negate10)
negate12 = bf16[4]{0} negate(negate11)
negate13 = bf16[4]{0} negate(negate12)
collective-permute-done = bf16[4]{0} collective-permute-done(collective-permute-start)
ROOT add = add(collective-permute-done, negate13)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
HloInstruction* collective_permute_start =
module->entry_computation()->GetInstructionWithName(
"collective-permute-start");
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(0)
.layout()
.memory_space() == kDefaultMemorySpace);
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(1)
.layout()
.memory_space() == kDefaultMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, InPlaceAsyncCollectivePermute) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = bf16[4]{0} parameter(0)
negate0 = bf16[4]{0} negate(param)
negate1 = bf16[4]{0} negate(param)
const0 = s32[] constant(0)
const1 = s32[] constant(1)
tuple0 = (s32[]) tuple(const0)
tuple1 = (s32[]) tuple(const1)
collective-permute-start = (bf16[4]{0}, bf16[4]{0}, u32[], u32[]) collective-permute-start(negate0, negate1, tuple0, tuple1), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{1}}
negate2 = bf16[4]{0} negate(param)
negate3 = bf16[4]{0} negate(negate2)
negate4 = bf16[4]{0} negate(negate3)
collective-permute-done = bf16[4]{0} collective-permute-done(collective-permute-start)
ROOT add = add(collective-permute-done, negate4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
HloInstruction* collective_permute_start =
module->entry_computation()->GetInstructionWithName(
"collective-permute-start");
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(0)
.layout()
.memory_space() == kAlternateMemorySpace);
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(1)
.layout()
.memory_space() == kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, InPlaceAsyncCollectivePermuteSameBuffer) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = bf16[4]{0} parameter(0)
negate0 = bf16[4]{0} negate(param)
const0 = s32[] constant(0)
const1 = s32[] constant(1)
tuple0 = (s32[]) tuple(const0)
tuple1 = (s32[]) tuple(const1)
collective-permute-start = (bf16[4]{0}, bf16[4]{0}, u32[], u32[]) collective-permute-start(negate0, negate0, tuple0, tuple1), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{1}}
negate2 = bf16[4]{0} negate(param)
negate3 = bf16[4]{0} negate(negate2)
negate4 = bf16[4]{0} negate(negate3)
collective-permute-done = bf16[4]{0} collective-permute-done(collective-permute-start)
ROOT add = add(collective-permute-done, negate4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
HloInstruction* collective_permute_start =
module->entry_computation()->GetInstructionWithName(
"collective-permute-start");
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(0)
.layout()
.memory_space() == kAlternateMemorySpace);
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(1)
.layout()
.memory_space() == kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest,
InPlaceAsyncCollectivePermuteSameBufferChained) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = bf16[4]{0} parameter(0)
negate0 = bf16[4]{0} negate(param)
const0 = s32[] constant(0)
const1 = s32[] constant(1)
tuple0 = (s32[]) tuple(const0)
tuple1 = (s32[]) tuple(const1)
collective-permute-start.1 = (bf16[4]{0}, bf16[4]{0}, u32[], u32[]) collective-permute-start(negate0, negate0, tuple0, tuple1), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{1}}
negate2 = bf16[4]{0} negate(param)
negate3 = bf16[4]{0} negate(negate2)
negate4 = bf16[4]{0} negate(negate3)
collective-permute-done.1 = bf16[4]{0} collective-permute-done(collective-permute-start.1)
collective-permute-start.2 = (bf16[4]{0}, bf16[4]{0}, u32[], u32[]) collective-permute-start(collective-permute-done.1, collective-permute-done.1, tuple0, tuple1), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{1}}
negate5 = bf16[4]{0} negate(negate4)
negate6 = bf16[4]{0} negate(negate5)
negate7 = bf16[4]{0} negate(negate6)
collective-permute-done.2 = bf16[4]{0} collective-permute-done(collective-permute-start.2)
ROOT add = add(collective-permute-done.2, negate7)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
HloInstruction* collective_permute_start_1 =
module->entry_computation()->GetInstructionWithName(
"collective-permute-start.1");
EXPECT_TRUE(collective_permute_start_1->shape()
.tuple_shapes(0)
.layout()
.memory_space() == kAlternateMemorySpace);
EXPECT_TRUE(collective_permute_start_1->shape()
.tuple_shapes(1)
.layout()
.memory_space() == kAlternateMemorySpace);
HloInstruction* collective_permute_start_2 =
module->entry_computation()->GetInstructionWithName(
"collective-permute-start.2");
EXPECT_TRUE(collective_permute_start_2->shape()
.tuple_shapes(0)
.layout()
.memory_space() == kAlternateMemorySpace);
EXPECT_TRUE(collective_permute_start_2->shape()
.tuple_shapes(1)
.layout()
.memory_space() == kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest,
TupleInPlaceAsyncCollectivePermuteSameBufferChained) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = bf16[4]{0} parameter(0)
param2 = bf16[48]{0} parameter(1)
negate0.1 = bf16[48]{0} negate(param2)
negate0.2 = bf16[48]{0} negate(param2)
const0 = s32[] constant(0)
const1 = s32[] constant(1)
tuple0.0 = (s32[]) tuple(const0)
tuple0 = ((s32[]), (s32[])) tuple(tuple0.0, tuple0.0)
tuple1.0 = (s32[]) tuple(const1)
tuple1 = ((s32[]), (s32[])) tuple(tuple1.0, tuple1.0)
tuple2 = (bf16[48]{0}, bf16[48]{0}) tuple(negate0.1, negate0.2)
collective-permute-start.1 = ((bf16[48]{0}, bf16[48]{0}), (bf16[48]{0}, bf16[48]{0}), u32[], u32[]) collective-permute-start(tuple2, tuple2, tuple0, tuple1), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{1}}
negate2 = bf16[4]{0} negate(param)
negate3 = bf16[4]{0} negate(negate2)
negate4 = bf16[4]{0} negate(negate3)
collective-permute-done.1 = (bf16[48]{0}, bf16[48]{0}) collective-permute-done(collective-permute-start.1)
collective-permute-start.2 = ((bf16[48]{0}, bf16[48]{0}), (bf16[48]{0}, bf16[48]{0}), u32[], u32[]) collective-permute-start(collective-permute-done.1, collective-permute-done.1, tuple0, tuple1), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{1}}
negate5 = bf16[4]{0} negate(negate4)
negate6 = bf16[4]{0} negate(negate5)
negate7 = bf16[4]{0} negate(negate6)
collective-permute-done.2 = (bf16[48]{0}, bf16[48]{0}) collective-permute-done(collective-permute-start.2)
gte = bf16[48]{0} get-tuple-element(collective-permute-done.2), index=0
ROOT root = (bf16[48]{0}, bf16[4]{0}) tuple(gte, negate7)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
const HloInstruction* cp_done1 =
FindInstruction(module.get(), "collective-permute-done.1");
EXPECT_EQ(cp_done1->operand(0)->opcode(), HloOpcode::kCollectivePermuteStart);
const HloInstruction* cp_done2 =
FindInstruction(module.get(), "collective-permute-done.2");
EXPECT_EQ(cp_done2->operand(0)->opcode(), HloOpcode::kCollectivePermuteStart);
}
TEST_F(MemorySpaceAssignmentTest,
TupleInPlaceAsyncCollectivePermuteSameBuffer) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = bf16[4]{0} parameter(0)
param2 = bf16[48]{0} parameter(1)
negate0.1 = bf16[48]{0} negate(param2)
negate0.2 = bf16[48]{0} negate(param2)
const0 = s32[] constant(0)
const1 = s32[] constant(1)
tuple0.0 = (s32[]) tuple(const0)
tuple0 = ((s32[]), (s32[])) tuple(tuple0.0, tuple0.0)
tuple1.0 = (s32[]) tuple(const1)
tuple1 = ((s32[]), (s32[])) tuple(tuple1.0, tuple1.0)
tuple2 = (bf16[48]{0}, bf16[48]{0}) tuple(negate0.1, negate0.1)
tuple3 = (bf16[48]{0}, bf16[48]{0}) tuple(negate0.2, negate0.2)
collective-permute-start.1 = ((bf16[48]{0}, bf16[48]{0}), (bf16[48]{0}, bf16[48]{0}), u32[], u32[]) collective-permute-start(tuple2, tuple3, tuple0, tuple1), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{1}}
negate2 = bf16[4]{0} negate(param)
negate3 = bf16[4]{0} negate(negate2)
negate4 = bf16[4]{0} negate(negate3)
collective-permute-done.1 = (bf16[48]{0}, bf16[48]{0}) collective-permute-done(collective-permute-start.1)
gte = bf16[48]{0} get-tuple-element(collective-permute-done.1), index=0
ROOT root = (bf16[48]{0}, bf16[4]{0}) tuple(gte, negate4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
const HloInstruction* cp_done1 =
FindInstruction(module.get(), "collective-permute-done.1");
EXPECT_EQ(cp_done1->operand(0)->opcode(), HloOpcode::kCollectivePermuteStart);
}
TEST_F(MemorySpaceAssignmentTest,
TupleInPlaceAsyncCollectivePermuteSameBufferRoot) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = bf16[4]{0} parameter(0)
param2 = bf16[48]{0} parameter(1)
negate0.1 = bf16[48]{0} negate(param2)
negate0.2 = bf16[48]{0} negate(param2)
const0 = s32[] constant(0)
const1 = s32[] constant(1)
tuple0.0 = (s32[]) tuple(const0)
tuple0 = ((s32[]), (s32[])) tuple(tuple0.0, tuple0.0)
tuple1.0 = (s32[]) tuple(const1)
tuple1 = ((s32[]), (s32[])) tuple(tuple1.0, tuple1.0)
tuple2 = (bf16[48]{0}, bf16[48]{0}) tuple(negate0.1, negate0.1)
tuple3 = (bf16[48]{0}, bf16[48]{0}) tuple(negate0.2, negate0.2)
collective-permute-start.1 = ((bf16[48]{0}, bf16[48]{0}), (bf16[48]{0}, bf16[48]{0}), u32[], u32[]) collective-permute-start(tuple2, tuple3, tuple0, tuple1), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{1}}
ROOT collective-permute-done.1 = (bf16[48]{0}, bf16[48]{0}) collective-permute-done(collective-permute-start.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
const HloInstruction* cp_done1 =
FindInstruction(module.get(), "collective-permute-done.1");
EXPECT_EQ(cp_done1->operand(0)->opcode(), HloOpcode::kCollectivePermuteStart);
ShapeUtil::ForEachSubshape(
cp_done1->shape(),
[&](const Shape& subshape, const ShapeIndex& ) {
if (subshape.IsArray() && subshape.has_layout()) {
EXPECT_EQ(subshape.layout().memory_space(), kDefaultMemorySpace);
}
});
}
TEST_F(MemorySpaceAssignmentTest, TupleInPlaceAsyncCollectivePermuteRoot) {
absl::string_view hlo_string = R"(
HloModule inplace_collective_permute, is_scheduled=true
ENTRY %inplace_collective_permute {
%param.0 = u32[8,1,1] parameter(0)
%constant.1000 = u32[] constant(1000)
%broadcast.1 = u32[8,1,1] broadcast(u32[] %constant.1000), dimensions={}
%broadcast.2 = u32[8,1,1] broadcast(u32[] %constant.1000), dimensions={}
%tuple.input = (u32[8,1,1], u32[8,1,1]) tuple(u32[8,1,1] %param.0, u32[8,1,1] %param.0)
%tuple.output = (u32[8,1,1], u32[8,1,1]) tuple(u32[8,1,1] %broadcast.1, u32[8,1,1] %broadcast.2)
%constant.0 = s32[] constant(0)
%constant.1 = s32[] constant(1)
%constant.2 = s32[] constant(2)
%indices.0.0.0 = (s32[], s32[], s32[]) tuple(s32[] %constant.0, s32[] %constant.0, s32[] %constant.0)
%indices.1.0.0 = (s32[], s32[], s32[]) tuple(s32[] %constant.1, s32[] %constant.0, s32[] %constant.0)
%indices.2.0.0 = (s32[], s32[], s32[]) tuple(s32[] %constant.2, s32[] %constant.0, s32[] %constant.0)
%indices.000.100 = ((s32[], s32[], s32[]), (s32[], s32[], s32[])) tuple((s32[], s32[], s32[]) %indices.0.0.0, (s32[], s32[], s32[]) %indices.1.0.0)
%indices.000.200 = ((s32[], s32[], s32[]), (s32[], s32[], s32[])) tuple((s32[], s32[], s32[]) %indices.0.0.0, (s32[], s32[], s32[]) %indices.2.0.0)
%indices.000.0 = ((s32[], s32[], s32[]), (s32[], s32[], s32[])) tuple((s32[], s32[], s32[]) %indices.0.0.0, (s32[], s32[], s32[]) %indices.0.0.0)
%input.indices = (((s32[], s32[], s32[]), (s32[], s32[], s32[])), ((s32[], s32[], s32[]), (s32[], s32[], s32[]))) tuple(((s32[], s32[], s32[]), (s32[], s32[], s32[])) %indices.000.100, ((s32[], s32[], s32[]), (s32[], s32[], s32[])) %indices.000.0)
%output.indices = (((s32[], s32[], s32[]), (s32[], s32[], s32[])), ((s32[], s32[], s32[]), (s32[], s32[], s32[]))) tuple(((s32[], s32[], s32[]), (s32[], s32[], s32[])) %indices.000.100, ((s32[], s32[], s32[]), (s32[], s32[], s32[])) %indices.000.200)
%collective-permute-start = ((u32[8,1,1], u32[8,1,1]), (u32[8,1,1], u32[8,1,1]), u32[], u32[]) collective-permute-start((u32[8,1,1], u32[8,1,1]) %tuple.input, (u32[8,1,1], u32[8,1,1]) %tuple.output, (((s32[], s32[], s32[]), (s32[], s32[], s32[])), ((s32[], s32[], s32[]), (s32[], s32[], s32[]))) %input.indices, (((s32[], s32[], s32[]), (s32[], s32[], s32[])), ((s32[], s32[], s32[]), (s32[], s32[], s32[]))) %output.indices), channel_id=42, source_target_pairs={{0,1},{1,0},{1,0},{0,1}}, slice_sizes={{4},{4},{4},{4}}
ROOT %collective-permute-done = (u32[8,1,1], u32[8,1,1]) collective-permute-done(((u32[8,1,1], u32[8,1,1]), (u32[8,1,1], u32[8,1,1]), u32[], u32[]) %collective-permute-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
const HloInstruction* cp_done =
FindInstruction(module.get(), "collective-permute-done");
EXPECT_EQ(cp_done->operand(0)->opcode(), HloOpcode::kCollectivePermuteStart);
ShapeUtil::ForEachSubshape(
cp_done->shape(),
[&](const Shape& subshape, const ShapeIndex& ) {
if (subshape.IsArray() && subshape.has_layout()) {
EXPECT_EQ(subshape.layout().memory_space(), kDefaultMemorySpace);
}
});
}
TEST_F(MemorySpaceAssignmentTest, ReservedScopedMemory) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param0 = f32[2,4] parameter(0)
a = f32[2,4] negate(param0)
b = f32[2,4] negate(a)
c = f32[2,4] negate(b)
d = f32[2,4] negate(c)
e = f32[2,4] negate(d)
ROOT f = f32[2,4] add(e, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.reserved_scoped_memory_fn =
[&](const HloInstruction* instruction,
const absl::flat_hash_set<std::pair<int, ShapeIndex>>
operands_in_alternate_memory,
const absl::flat_hash_set<ShapeIndex> outputs_in_alternate_memory) {
if (instruction->name() == "c") {
return 100;
}
return 0;
};
AssignMemorySpace(module.get(), options);
auto get_memory_space = [&](absl::string_view instruction_name) {
return module->entry_computation()
->GetInstructionWithName(instruction_name)
->shape()
.layout()
.memory_space();
};
EXPECT_TRUE(get_memory_space("a") == kAlternateMemorySpace);
EXPECT_TRUE(get_memory_space("b") == kDefaultMemorySpace);
EXPECT_TRUE(get_memory_space("c") == kDefaultMemorySpace);
EXPECT_TRUE(get_memory_space("d") == kAlternateMemorySpace);
EXPECT_TRUE(get_memory_space("e") == kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, ConstantAllocationFar) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param0 = f32[2,4] parameter(0)
const = f32[2,4] constant({...})
a = f32[2,4] negate(param0)
b = f32[2,4] negate(a)
c = f32[2,4] negate(b)
d = f32[2,4] negate(c)
e = f32[2,4] negate(d)
ROOT negate = f32[2,4] add(const, e)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
EXPECT_TRUE(module->entry_computation()
->GetInstructionWithName("const")
->shape()
.layout()
.memory_space() == kDefaultMemorySpace);
EXPECT_TRUE(module->entry_computation()
->GetInstructionWithName("negate")
->operand(0)
->shape()
.layout()
.memory_space() == kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, ConstantAllocationNear) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param0 = f32[2,4] parameter(0)
a = f32[2,4] negate(param0)
b = f32[2,4] negate(a)
c = f32[2,4] negate(b)
d = f32[2,4] negate(c)
e = f32[2,4] negate(d)
const = f32[2,4] constant({...})
ROOT negate = f32[2,4] add(const, e)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
EXPECT_TRUE(module->entry_computation()
->GetInstructionWithName("const")
->shape()
.layout()
.memory_space() == kDefaultMemorySpace);
EXPECT_TRUE(module->entry_computation()
->GetInstructionWithName("negate")
->operand(0)
->shape()
.layout()
.memory_space() == kAlternateMemorySpace);
}
class FakeMemorySpaceAssignmentRepacker : public MemorySpaceAssignmentRepacker {
public:
explicit FakeMemorySpaceAssignmentRepacker(
absl::flat_hash_map<std::pair<int64_t, int64_t>, int64_t>& repack_map,
std::function<void(absl::Span<AllocationBlock*>)> check_fun = nullptr,
bool always_return_modified = false)
: MemorySpaceAssignmentRepacker(128, 8),
repack_map_(repack_map),
check_fun_(check_fun),
always_return_modified_(always_return_modified) {}
absl::StatusOr<bool> Repack(
absl::Span<AllocationBlock*> allocations) override {
bool modified = false;
for (AllocationBlock* block : allocations) {
absl::flat_hash_set<int64_t> colocations;
std::string colocations_str;
for (const AllocationBlock* colocation : block->GetColocations()) {
absl::StrAppend(&colocations_str, colocation->id, ", ");
colocations.insert(colocation->id);
}
VLOG(1) << "Alloc id: " << block->id << " time: ["
<< block->inclusive_start_time << ", " << block->end_time
<< "] size: " << block->size
<< " init offset: " << block->initial_offset << " colocations: {"
<< colocations_str << "}";
auto it = repack_map_.find(
{block->inclusive_start_time, block->initial_offset});
if (it != repack_map_.end()) {
modified = true;
block->offset = it->second;
} else {
block->offset = block->initial_offset;
}
for (AllocationBlock* colocation : block->GetColocations()) {
if (it != repack_map_.end()) {
colocation->offset = it->second;
} else {
colocation->offset = colocation->initial_offset;
}
}
}
if (check_fun_) {
check_fun_(allocations);
}
return always_return_modified_ || modified;
}
private:
absl::flat_hash_map<std::pair<int64_t, int64_t>, int64_t> repack_map_;
std::function<void(absl::Span<AllocationBlock*>)> check_fun_;
bool always_return_modified_;
};
TEST_F(MemorySpaceAssignmentTest, Repack) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
ENTRY Entry {
param0 = f32[8,3] parameter(0)
param1 = f32[2,4] parameter(1)
a = f32[2,4] sine(param1)
b = f32[2,4] cosine(param1)
c = f32[8,3] negate(param0)
j = f32[2,4] negate(a)
d = f32[8,3] tanh(param0)
k = f32[2,4] negate(j)
l = f32[2,4] add(b, k)
m = f32[8,3] negate(d)
n = f32[2,4] sine(l)
o = f32[8,3] negate(m)
p = f32[2,4] negate(n)
q = f32[8,3] negate(m)
ROOT tuple = (f32[2,4], f32[8,3], f32[8,3]) tuple(p, q, o)
}
)";
MsaBufferIntervalCompare buffer_interval_compare =
[](const MsaBufferInterval& a, const MsaBufferInterval& b) {
auto get_opcode_priority = [](const HloOpcode& opcode) {
switch (opcode) {
case HloOpcode::kSin:
return 0;
case HloOpcode::kCos:
return 1;
case HloOpcode::kTanh:
return 2;
default:
return 3;
}
};
return get_opcode_priority(a.buffer->defining_instruction()->opcode()) <
get_opcode_priority(b.buffer->defining_instruction()->opcode());
};
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(2, 10);
absl::flat_hash_map<std::pair<int64_t, int64_t>, int64_t> repack_map;
repack_map[{2, 0}] = 32;
repack_map[{3, 32}] = 0;
FakeMemorySpaceAssignmentRepacker repacker =
FakeMemorySpaceAssignmentRepacker(repack_map);
Options options = DefaultMemorySpaceOptions();
options.max_repacks = 1;
options.repacker = &repacker;
AssignMemorySpace(module.get(), options, buffer_interval_compare,
&prefetch_interval_picker);
const HloInstruction* d =
module->entry_computation()->GetInstructionWithName("d");
EXPECT_EQ(d->shape().layout().memory_space(), kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, RepackExportsAliasedOffsets) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
while_condition {
param1 = (f32[2,4], f32[2,4]) parameter(0)
ROOT cond = pred[] constant(true)
}
while_body {
param2 = (f32[2,4], f32[2,4]) parameter(0)
gte2 = f32[2,4] get-tuple-element(param2), index=0
gte3 = f32[2,4] get-tuple-element(param2), index=1
add = f32[2,4] add(gte2, gte3)
ROOT tuple2 = (f32[2,4], f32[2,4]) tuple(add, gte3)
}
ENTRY Entry {
param0 = f32[2,4] parameter(0)
a = f32[2,4] sine(param0)
b = f32[2,4] negate(a)
c = f32[2,4] negate(b)
d = f32[2,4] negate(c)
e = f32[2,4] negate(d)
f = f32[2,4] negate(e)
g = f32[2,4] negate(f)
h = f32[2,4] negate(g)
i = f32[2,4] negate(h)
j = f32[2,4] negate(i)
k = f32[2,4] negate(j)
l = f32[2,4] negate(k)
m = f32[2,4] negate(l)
n = f32[2,4] negate(m)
o = f32[2,4] negate(n)
p = f32[2,4] negate(o)
q = f32[2,4] add(p, a)
tuple = (f32[2,4], f32[2,4]) tuple(q, a)
while = (f32[2,4], f32[2,4]) while(tuple), condition=while_condition, body=while_body
gte0 = f32[2,4] get-tuple-element(while), index=0
gte1 = f32[2,4] get-tuple-element(while), index=1
r = f32[2,4] negate(gte0)
s = f32[2,4] negate(r)
t = f32[2,4] negate(s)
constant = f32[] constant(0)
broadcast = f32[8,4] broadcast(constant), dimensions={}
cos = f32[8,4] cosine(broadcast)
u = f32[2,4] add(t, gte1)
v = f32[2,4] add(u, param0)
w = f32[8,4] negate(cos)
ROOT tuple3 = (f32[2,4], f32[8,4]) tuple(v, w)
}
)";
MsaBufferIntervalCompare buffer_interval_compare =
[](const MsaBufferInterval& a, const MsaBufferInterval& b) {
auto get_opcode_priority = [](const HloOpcode& opcode) {
switch (opcode) {
case HloOpcode::kSin:
return 0;
case HloOpcode::kCos:
return 1;
case HloOpcode::kTanh:
return 2;
default:
return 3;
}
};
return get_opcode_priority(a.buffer->defining_instruction()->opcode()) <
get_opcode_priority(b.buffer->defining_instruction()->opcode());
};
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(2, 10);
absl::flat_hash_map<std::pair<int64_t, int64_t>, int64_t> repack_map;
auto check_fun = [](absl::Span<AllocationBlock*> allocations) {
EXPECT_TRUE(allocations.at(0)->GetColocationsCount() == 1 ||
allocations.at(0)->GetColocationsCount() == 3);
EXPECT_EQ(allocations.at(1)->GetColocationsCount(), 3);
EXPECT_EQ(allocations.at(2)->GetColocationsCount(), 3);
EXPECT_TRUE(allocations.at(3)->GetColocationsCount() == 1 ||
allocations.at(3)->GetColocationsCount() == 3);
};
FakeMemorySpaceAssignmentRepacker repacker =
FakeMemorySpaceAssignmentRepacker(repack_map, check_fun);
Options options = DefaultMemorySpaceOptions();
options.max_repacks = 1;
options.repacker = &repacker;
AssignMemorySpace(module.get(), options, buffer_interval_compare,
&prefetch_interval_picker);
}
TEST_F(MemorySpaceAssignmentTest,
RepackExportsAliasedOffsetsForReservedScopedMemory) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param0 = f32[2,4] parameter(0)
a = f32[2,4] negate(param0)
b = f32[2,4] negate(a)
c = f32[2,4] negate(b)
d = f32[2,4] negate(c)
e = f32[2,4] negate(d)
ROOT f = f32[2,4] add(e, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.max_repacks = 1;
options.reserved_scoped_memory_fn =
[&](const HloInstruction* instruction,
const absl::flat_hash_set<std::pair<int, ShapeIndex>>
operands_in_alternate_memory,
const absl::flat_hash_set<ShapeIndex> outputs_in_alternate_memory) {
if (instruction->name() == "c" || instruction->name() == "d") {
return 100;
}
return 0;
};
absl::flat_hash_map<std::pair<int64_t, int64_t>, int64_t> repack_map;
bool repacker_ran = false;
auto check_fun = [&](absl::Span<AllocationBlock*> allocations) {
EXPECT_EQ(allocations.at(0)->GetColocationsCount(), 2);
EXPECT_EQ(allocations.at(1)->GetColocationsCount(), 2);
repacker_ran = true;
};
FakeMemorySpaceAssignmentRepacker repacker =
FakeMemorySpaceAssignmentRepacker(repack_map, check_fun);
options.repacker = &repacker;
AssignMemorySpace(module.get(), options);
EXPECT_TRUE(repacker_ran);
}
TEST_F(MemorySpaceAssignmentTest, ReduceReservedScopedVmemIfOperandInVmem) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
ENTRY Entry {
param0 = f32[8,3] parameter(0)
param1 = f32[2,4] parameter(1)
a = f32[2,4] sine(param1)
b = f32[2,4] cosine(param1)
c = f32[8,3] negate(param0)
j = f32[2,4] negate(a)
d = f32[8,3] tanh(param0)
k = f32[2,4] negate(j)
l = f32[2,4] add(b, k)
m = f32[8,3] negate(d)
n = f32[2,4] sine(l)
o = f32[8,3] negate(m)
p = f32[2,4] negate(n)
q = f32[8,3] negate(m)
ROOT tuple = (f32[2,4], f32[8,3], f32[8,3], f32[8,3]) tuple(p, q, o, c)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
absl::flat_hash_map<std::pair<int64_t, int64_t>, int64_t> repack_map;
Options options = DefaultMemorySpaceOptions();
options.max_repacks = 10;
options.repack_after_every_allocation = true;
options.reduce_scoped_memory_limit = true;
options.reserved_scoped_memory_fn =
[&](const HloInstruction* instruction,
const absl::flat_hash_set<std::pair<int, ShapeIndex>>
operands_in_alternate_memory,
const absl::flat_hash_set<ShapeIndex> outputs_in_alternate_memory) {
int64_t scoped_memory_size = 0;
if (operands_in_alternate_memory.empty()) {
scoped_memory_size += 1;
LOG(INFO) << instruction->name() << " has no operand in vmem";
}
if (outputs_in_alternate_memory.empty()) {
scoped_memory_size += 2;
LOG(INFO) << instruction->name() << " has no output in vmem";
}
return scoped_memory_size;
};
FakeMemorySpaceAssignmentRepacker repacker =
FakeMemorySpaceAssignmentRepacker(repack_map, nullptr);
options.repacker = &repacker;
std::unique_ptr<PresetAssignments> assignments =
AssignMemorySpace(module.get(), options);
auto instruction_consumes_assignment_fn =
[&](absl::string_view instruction_name) -> bool {
HloInstruction* instruction =
module->entry_computation()->GetInstructionWithName(instruction_name);
for (auto& pair : assignments->chunks()) {
HloInstruction* consumer = pair.first.instruction;
if (absl::c_any_of(instruction->operands(),
[&](const HloInstruction* operand) {
return operand == consumer;
})) {
return true;
}
}
return false;
};
auto instruction_produces_assignment_fn =
[&](absl::string_view instruction_name) -> bool {
HloInstruction* instruction =
module->entry_computation()->GetInstructionWithName(instruction_name);
for (auto& pair : assignments->chunks()) {
HloInstruction* producer = pair.first.instruction;
if (producer == instruction) {
return true;
}
}
return false;
};
auto check_reserved_scoped_memory_fn =
[&](absl::string_view instruction_name) -> bool {
int64_t scoped_memory_size = -1;
for (auto& pair : assignments->scoped_allocation_chunks()) {
HloInstruction* instruction = pair.first;
if (instruction->name() == instruction_name) {
scoped_memory_size = pair.second.size;
}
}
if (!instruction_consumes_assignment_fn(instruction_name)) {
scoped_memory_size -= 1;
}
if (!instruction_produces_assignment_fn(instruction_name)) {
scoped_memory_size -= 2;
}
return scoped_memory_size == 0;
};
for (auto& pair : assignments->assignment_informations()) {
LOG(INFO) << " space: " << pair.first << ", size: " << pair.second.size;
}
for (auto& pair : assignments->scoped_allocation_chunks()) {
HloInstruction* instruction = pair.first;
LOG(INFO) << instruction->name() << ": " << pair.second.size;
}
EXPECT_TRUE(check_reserved_scoped_memory_fn("a"));
EXPECT_TRUE(check_reserved_scoped_memory_fn("b"));
EXPECT_TRUE(check_reserved_scoped_memory_fn("c"));
EXPECT_TRUE(check_reserved_scoped_memory_fn("j"));
EXPECT_TRUE(check_reserved_scoped_memory_fn("d"));
EXPECT_TRUE(check_reserved_scoped_memory_fn("k"));
EXPECT_TRUE(check_reserved_scoped_memory_fn("l"));
EXPECT_TRUE(check_reserved_scoped_memory_fn("m"));
EXPECT_TRUE(check_reserved_scoped_memory_fn("n"));
EXPECT_TRUE(check_reserved_scoped_memory_fn("o"));
EXPECT_TRUE(check_reserved_scoped_memory_fn("p"));
EXPECT_TRUE(check_reserved_scoped_memory_fn("q"));
}
TEST_F(MemorySpaceAssignmentTest, ScopedAllocationWithDifferentOffset) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
ENTRY Entry {
param0 = f32[8,3] parameter(0)
param1 = f32[2,4] parameter(1)
a = f32[2,4] sine(param1)
b = f32[2,4] cosine(param1)
c = f32[8,3] negate(param0)
j = f32[2,4] negate(a)
d = f32[8,3] tanh(param0)
k = f32[2,4] negate(j)
l = f32[2,4] add(b, k)
m = f32[8,3] negate(d)
n = f32[2,4] sine(l)
o = f32[8,3] negate(m)
p = f32[2,4] negate(n)
q = f32[8,3] negate(m)
ROOT tuple = (f32[2,4], f32[8,3], f32[8,3]) tuple(p, q, o)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto check_fun = [](absl::Span<AllocationBlock*> allocations) {
for (AllocationBlock* block : allocations) {
if (block->inclusive_start_time == block->end_time) {
EXPECT_GT(block->GetColocationsCount(), 0);
}
}
};
absl::flat_hash_map<std::pair<int64_t, int64_t>, int64_t> repack_map;
FakeMemorySpaceAssignmentRepacker repacker =
FakeMemorySpaceAssignmentRepacker(repack_map, check_fun);
Options options = DefaultMemorySpaceOptions();
options.reserved_scoped_memory_fn =
[&](const HloInstruction* instruction,
const absl::flat_hash_set<std::pair<int, ShapeIndex>>
operands_in_alternate_memory,
const absl::flat_hash_set<ShapeIndex> outputs_in_alternate_memory) {
return 1;
};
options.max_repacks = 1;
options.repacker = &repacker;
options.allocate_reserved_scoped_memory_at_same_offset = false;
AssignMemorySpace(module.get(), options);
}
TEST_F(MemorySpaceAssignmentTest,
RepackShouldntEraseRequiredAssignmentForConditionalOutput) {
absl::string_view hlo_string = R"(
HloModule CondAllocation, is_scheduled=true
true_computation {
p0 = (f32[3]) parameter(0)
gte = f32[3] get-tuple-element(p0), index=0
neg1 = f32[3] negate(gte)
ROOT tuple1 = (f32[3]) tuple(neg1)
}
false_computation {
p0 = (f32[3]) parameter(0)
gte = f32[3] get-tuple-element(p0), index=0
neg2 = f32[3] negate(gte)
ROOT tuple2 = (f32[3]) tuple(neg2)
}
ENTRY entry {
p0 = f32[3] parameter(0)
p1 = pred[] parameter(1)
copy = f32[3] copy(p0)
tuple = (f32[3]) tuple(copy)
conditional = (f32[3]) conditional(p1, tuple, tuple), true_computation=true_computation, false_computation=false_computation
ROOT gte = f32[3] get-tuple-element(conditional), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
absl::flat_hash_map<std::pair<int64_t, int64_t>, int64_t> repack_map;
FakeMemorySpaceAssignmentRepacker repacker =
FakeMemorySpaceAssignmentRepacker(repack_map, nullptr,
true);
Options options = DefaultMemorySpaceOptions();
options.max_repacks = 10;
options.repacker = &repacker;
options.repack_after_every_allocation = true;
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(2, 10);
AssignMemorySpace(module.get(), options,
{}, &prefetch_interval_picker);
}
TEST_F(MemorySpaceAssignmentTest, Determinism) {
std::unique_ptr<HloModule> module = CreateEvictAndPrefetchModule();
AssignMemorySpace(module.get());
std::string module_str = module->ToString();
for (int i = 0; i < 10; ++i) {
std::unique_ptr<HloModule> other_module = CreateEvictAndPrefetchModule();
AssignMemorySpace(other_module.get());
EXPECT_EQ(module_str, other_module->ToString());
}
}
TEST_F(MemorySpaceAssignmentTest, InPlaceOp) {
absl::string_view hlo_string = R"(
HloModule Module, is_scheduled=true
fused_computation {
param0 = f32[2,3] parameter(0)
constant.1 = f32[] constant(0)
broadcast = f32[2,1] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[2,3] dynamic-update-slice(param0, broadcast, constant.3, constant.3)
}
ENTRY main {
param = f32[2,3] parameter(0)
negate = f32[2,3] negate(param)
fusion = f32[2,3] fusion(negate), kind=kLoop, calls=fused_computation
ROOT add = f32[2,3] add(fusion, fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto preset_assignments = AssignMemorySpace(module.get());
HloInstruction* negate_instruction =
module->entry_computation()->GetInstructionWithName("negate");
int64_t negate_offset =
GetAlternateMemoryOffset(*preset_assignments, negate_instruction);
HloInstruction* fusion_instruction =
module->entry_computation()->GetInstructionWithName("fusion");
int64_t fusion_offset =
GetAlternateMemoryOffset(*preset_assignments, fusion_instruction);
EXPECT_EQ(negate_offset, fusion_offset);
EXPECT_NE(negate_offset, -1);
}
TEST_F(MemorySpaceAssignmentTest, ConditionalInPlaceOp) {
absl::string_view hlo_string = R"(
HloModule Module, is_scheduled=true
fused_computation {
param0 = f32[2,3] parameter(0)
constant.1 = f32[] constant(0)
broadcast = f32[2,1] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[2,3] dynamic-update-slice(param0, broadcast, constant.3, constant.3)
}
true_computation {
p0 = (f32[2,3]) parameter(0)
gte = f32[2,3] get-tuple-element(p0), index=0
ROOT neg1 = f32[2,3] negate(gte)
}
false_computation {
p0 = (f32[2,3]) parameter(0)
gte = f32[2,3] get-tuple-element(p0), index=0
neg2 = f32[2,3] negate(gte)
ROOT fusion = f32[2,3] fusion(neg2), kind=kLoop, calls=fused_computation
}
ENTRY entry {
p0 = f32[2,3] parameter(0)
p1 = pred[] parameter(1)
copy = f32[2,3] copy(p0)
tuple = (f32[2,3]) tuple(copy)
ROOT conditional = f32[2,3] conditional(p1, tuple, tuple), true_computation=true_computation, false_computation=false_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, AsyncCallDisableAlternateMem) {
absl::string_view hlo_string = R"(
HloModule Module, is_scheduled=true
called_comp {
p0 = f32[2,3] parameter(0)
negate10 = f32[2,3] negate(p0)
negate11 = f32[2,3] negate(negate10)
negate12 = f32[2,3] negate(negate11)
negate13 = f32[2,3] negate(negate12)
negate14 = f32[2,3] negate(negate13)
ROOT negate15 = f32[2,3] negate(negate14)
}, execution_thread="foobar"
async_comp {
p0 = f32[2,3] parameter(0)
ROOT call = f32[2,3] call(p0), to_apply=called_comp
}, execution_thread="foobar"
ENTRY entry {
p0 = f32[2,3] parameter(0)
negate0 = f32[2,3] negate(p0)
negate1 = f32[2,3] negate(negate0)
negate2 = f32[2,3] negate(negate1)
negate3 = f32[2,3] negate(negate2)
negate4 = f32[2,3] negate(negate3)
async-start = ((f32[2,3]), f32[2,3], f32[2]) async-start(negate1), async_execution_thread="foobar", calls=async_comp
async-done = f32[2,3] async-done(async-start), async_execution_thread="foobar", calls=async_comp
add0 = f32[2,3] add(negate0, async-done)
negate5 = f32[2,3] negate(add0)
negate6 = f32[2,3] negate(negate5)
negate7 = f32[2,3] negate(negate6)
negate8 = f32[2,3] negate(negate7)
negate9 = f32[2,3] negate(negate8)
ROOT add1 = f32[2,3] add(negate9, async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.is_use_allowed_in_alternate_mem_fn = [](const HloUse& use) {
return use.instruction->opcode() != HloOpcode::kAsyncStart &&
use.instruction->opcode() != HloOpcode::kAsyncDone &&
use.instruction->parent()->IsMainThread();
};
options.is_position_allowed_in_alternate_mem_fn = [](const HloPosition& pos) {
return pos.instruction->opcode() != HloOpcode::kAsyncStart &&
pos.instruction->opcode() != HloOpcode::kAsyncDone &&
pos.instruction->parent()->IsMainThread();
};
AssignMemorySpace(module.get(), options);
auto has_alternate_memory_allocation =
[&](const HloInstruction* instruction) {
bool result = false;
auto shape_has_alternate_memory_allocation =
[&](const Shape& subshape, const ShapeIndex& ) {
if (subshape.IsArray() &&
subshape.layout().memory_space() == kAlternateMemorySpace) {
result = true;
}
};
ShapeUtil::ForEachSubshape(instruction->shape(),
shape_has_alternate_memory_allocation);
for (const HloInstruction* operand : instruction->operands()) {
ShapeUtil::ForEachSubshape(operand->shape(),
shape_has_alternate_memory_allocation);
}
return result;
};
const HloInstruction* async_start =
FindInstruction(module.get(), "async-start");
const HloInstruction* async_done =
FindInstruction(module.get(), "async-done");
EXPECT_FALSE(has_alternate_memory_allocation(async_start));
EXPECT_FALSE(has_alternate_memory_allocation(async_done));
for (const HloInstruction* instruction :
async_start->async_wrapped_instruction()
->called_computations()[0]
->instructions()) {
EXPECT_FALSE(has_alternate_memory_allocation(instruction));
}
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Add(op::Negate(),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::AsyncDone())));
EXPECT_THAT(async_start,
op::AsyncStart(op::AsyncCopy(
kDefaultMemorySpace, kAlternateMemorySpace, op::Negate())));
}
TEST_F(MemorySpaceAssignmentTest, InefficientAllocation) {
absl::string_view hlo_string = R"(
HloModule Module, is_scheduled=true
fused_computation {
param0 = f32[2,3] parameter(0)
constant.1 = f32[] constant(0)
broadcast = f32[2,1] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[2,3] dynamic-update-slice(param0, broadcast, constant.3, constant.3)
}
ENTRY entry {
p0 = f32[2,3] parameter(0)
p1 = pred[] parameter(1)
p2 = f32[2,3] parameter(2)
neg0 = f32[2,3] negate(p2)
neg1 = f32[2,3] negate(neg0)
neg2 = f32[2,3] negate(neg1)
neg3 = f32[2,3] negate(neg2)
neg4 = f32[2,3] negate(neg3)
neg5 = f32[2,3] negate(neg4)
neg6 = f32[2,3] negate(neg5)
neg7 = f32[2,3] negate(neg6)
fusion = f32[2,3] fusion(p0), kind=kLoop, calls=fused_computation
neg8 = f32[2,3] negate(neg7)
neg9 = f32[2,3] negate(neg8)
neg10 = f32[2,3] negate(neg9)
neg11 = f32[2,3] negate(neg10)
neg12 = f32[2,3] negate(neg11)
neg13 = f32[2,3] negate(neg12)
neg14 = f32[2,3] negate(neg13)
neg15 = f32[2,3] negate(neg14)
ROOT tuple = (f32[2,3], f32[2,3]) tuple(fusion, neg15)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.enable_cross_program_prefetch = false;
options.inefficient_use_to_copy_ratio = 0.0;
AssignMemorySpaceUsingCostAnalysis(module.get(),
options);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::AsyncCopy(kDefaultMemorySpace, kAlternateMemorySpace,
op::Fusion(op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::Parameter()))),
op::Negate()));
TF_ASSERT_OK_AND_ASSIGN(module, ParseAndReturnVerifiedModule(hlo_string));
options.inefficient_use_to_copy_ratio = 0.5;
AssignMemorySpaceUsingCostAnalysis(module.get(),
options);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::Fusion(op::Parameter()), op::Negate()));
}
TEST_F(MemorySpaceAssignmentTest, InefficientAllocationLivelockBug) {
absl::string_view hlo_string = R"(
HloModule Module, is_scheduled=true
fused_computation_1 {
param0 = f32[5,4] parameter(0)
constant.1 = f32[] constant(0)
broadcast = f32[5,1] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[5,4] dynamic-update-slice(param0, broadcast, constant.3, constant.3)
}
fused_computation_2 {
param0 = f32[5,4] parameter(0)
constant.1 = f32[] constant(0)
broadcast = f32[5,1] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[5,4] dynamic-update-slice(param0, broadcast, constant.3, constant.3)
}
ENTRY entry {
p0 = f32[5,4] parameter(0)
p1 = pred[] parameter(1)
p2 = f32[2,3] parameter(2)
neg0 = f32[2,3] negate(p2)
neg1 = f32[2,3] negate(neg0)
neg2 = f32[2,3] negate(neg1)
neg3 = f32[2,3] negate(neg2)
neg4 = f32[2,3] negate(neg3)
neg5 = f32[2,3] negate(neg4)
neg6 = f32[2,3] negate(neg5)
neg7 = f32[2,3] negate(neg6)
fusion.1 = f32[5,4] fusion(p0), kind=kLoop, calls=fused_computation_1
tanh = f32[2,3] tanh(neg7)
fusion.2 = f32[5,4] fusion(fusion.1), kind=kLoop, calls=fused_computation_2
neg8 = f32[2,3] negate(tanh)
neg9 = f32[2,3] negate(neg8)
neg10 = f32[2,3] negate(neg0)
neg11 = f32[2,3] negate(neg10)
neg12 = f32[2,3] negate(neg11)
ROOT tuple = (f32[5,4], f32[2,3]) tuple(fusion.2, neg12)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.enable_cross_program_prefetch = false;
options.inefficient_use_to_copy_ratio = 0.5;
HloCostAnalysis::Options hlo_cost_options = DefaultHloCostAnalysisOptions();
hlo_cost_options.set_transcendentals_per_second(0.4);
AssignMemorySpaceUsingCostAnalysis(
module.get(), options,
std::nullopt,
hlo_cost_options);
}
TEST_F(MemorySpaceAssignmentTest,
CalledComputationInefficientAllocationLiveLockBug) {
absl::string_view hlo_string = R"(
HloModule CondAllocation, is_scheduled=true
true_computation {
p0 = (f32[3], f32[3]) parameter(0)
gte = f32[3] get-tuple-element(p0), index=0
neg1 = f32[3] negate(gte)
ROOT tuple1 = (f32[3]) tuple(neg1)
}
false_computation {
p0 = (f32[3], f32[3]) parameter(0)
gte = f32[3] get-tuple-element(p0), index=0
neg2 = f32[3] negate(gte)
ROOT tuple2 = (f32[3]) tuple(neg2)
}
ENTRY entry {
p0 = f32[3] parameter(0)
p1 = pred[] parameter(1)
p2 = f32[3] parameter(2)
copy0 = f32[3] copy(p0)
negate0 = f32[3] negate(p0)
negate1 = f32[3] negate(negate0)
negate2 = f32[3] negate(negate1)
negate3 = f32[3] negate(negate2)
negate4 = f32[3] negate(negate3)
negate5 = f32[3] negate(negate4)
negate6 = f32[3] negate(negate5)
negate7 = f32[3] negate(negate6)
negate8 = f32[3] negate(negate7)
tuple = (f32[3], f32[3]) tuple(copy0, p2)
conditional = (f32[3]) conditional(p1, tuple, tuple), true_computation=true_computation, false_computation=false_computation
gte = f32[3] get-tuple-element(conditional), index=0
ROOT add = f32[3] add(gte, negate8)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.enable_cross_program_prefetch = false;
options.inefficient_use_to_copy_ratio = 0.5;
HloCostAnalysis::Options hlo_cost_options = DefaultHloCostAnalysisOptions();
hlo_cost_options.set_transcendentals_per_second(0.4);
AssignMemorySpaceUsingCostAnalysis(
module.get(), options,
std::nullopt,
hlo_cost_options);
}
TEST_F(MemorySpaceAssignmentTest, AsyncOpElapsedTime) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param0 = bf16[16]{0} parameter(0)
param1 = bf16[4]{0} parameter(1)
collective-permute-start = (bf16[16]{0}, bf16[16]{0}, u32[], u32[]) collective-permute-start(param0), source_target_pairs={{0,1},{1,2},{2,3}}
negate1 = bf16[4]{0} negate(param1)
collective-permute-done = bf16[16]{0} collective-permute-done(collective-permute-start)
ROOT negate2 = bf16[4]{0} negate(negate1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpaceUsingCostAnalysis(module.get());
EXPECT_THAT(FindInstruction(module.get(), "negate1")->operand(0),
op::Parameter(1));
}
TEST_F(MemorySpaceAssignmentTest, AliasedOperandBug) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param0 = f32[4,4]{0,1} parameter(0)
param1 = f32[4]{0} parameter(1)
param2 = f32[4,4]{0,1} parameter(2)
negate0 = f32[4]{0} negate(param1)
negate1 = f32[4]{0} negate(negate0)
negate2 = f32[4]{0} negate(negate1)
negate3 = f32[4]{0} negate(negate2)
negate4 = f32[4]{0} negate(negate3)
negate5 = f32[4]{0} negate(negate4)
custom_call1 = f32[4,4]{0,1} custom-call(param0), custom_call_target="FooBar", output_to_operand_aliasing={{}: (0, {})}
tanh = f32[4,4]{0,1} tanh(param2)
negate6 = f32[4]{0} negate(negate5)
negate7 = f32[4]{0} negate(negate6)
negate8 = f32[4]{0} negate(negate7)
negate9 = f32[4]{0} negate(negate8)
negate10 = f32[4]{0} negate(negate9)
negate11 = f32[4]{0} negate(negate10)
negate12 = f32[4]{0} negate(negate11)
negate13 = f32[4]{0} negate(negate12)
negate14 = f32[4]{0} negate(negate13)
negate15 = f32[4]{0} negate(negate14)
negate16 = f32[4]{0} negate(negate15)
custom_call2 = f32[4,4]{0,1} custom-call(custom_call1), custom_call_target="FooBar", output_to_operand_aliasing={{}: (0, {})}
custom_call3 = f32[4,4]{0,1} custom-call(param0, custom_call2), custom_call_target="FooBar", output_to_operand_aliasing={{}: (0, {})}
ROOT root = f32[4,4]{0,1} add(tanh, custom_call2)
}
)";
MsaBufferIntervalCompare buffer_interval_compare =
[](const MsaBufferInterval& a, const MsaBufferInterval& b) {
auto get_inst_priority = [](const HloInstruction* instruction) {
if (instruction->name() == "param2") {
return 0;
}
if (instruction->name() == "param0") {
return 1;
}
return 2;
};
return get_inst_priority(a.buffer->defining_instruction()) <
get_inst_priority(b.buffer->defining_instruction());
};
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(2, 10);
Options options = DefaultMemorySpaceOptions();
AssignMemorySpace(module.get(), options, buffer_interval_compare,
&prefetch_interval_picker);
}
TEST_F(MemorySpaceAssignmentTest, AsyncOpCustomFusionShortLiveRange) {
absl::string_view hlo_string = R"(
HloModule Module, is_scheduled=true
fused_computation_start {
param0 = f32[2,1] parameter(0)
negate = f32[2,1] negate(param0)
ROOT custom-call = (f32[2,1], f32[2,1], u32[], u32[]) custom-call(negate), custom_call_target="AsyncOpStart"
}
fused_computation_update {
param0 = f32[2,1] parameter(0)
param1 = f32[2,1] parameter(1)
param2 = f32[2,1] parameter(2)
param3 = f32[2,1] parameter(3)
param4 = u32[] parameter(4)
param5 = u32[] parameter(5)
add = f32[2,1] add(param0, param1)
negate = f32[2,1] negate(param2)
ROOT tuple = (f32[2,1], f32[2,1], f32[2,1], f32[2,1], u32[], u32[]) tuple(add, param2, param3, negate, param4, param5)
}
fused_computation_done {
param0 = f32[2,1] parameter(0)
param1 = f32[2,1] parameter(1)
param2 = u32[] parameter(2)
param3 = u32[] parameter(3)
negate = f32[2,1] negate(param0)
ROOT custom-call = f32[2,1] custom-call(param0, param1, negate, param2, param3), custom_call_target="AsyncOpDone"
}
ENTRY main {
param = f32[2,1] parameter(0)
negate1 = f32[2,1] negate(param)
negate2 = f32[2,1] negate(negate1)
fusion1 = (f32[2,1], f32[2,1], u32[], u32[]) fusion(negate1), kind=kCustom, output_to_operand_aliasing={{0}: (0, {})}, calls=fused_computation_start
negate3 = f32[2,1] negate(negate2)
negate4 = f32[2,1] negate(negate3)
gte0 = f32[2,1] get-tuple-element(fusion1), index=0
gte1 = f32[2,1] get-tuple-element(fusion1), index=1
gte2 = u32[] get-tuple-element(fusion1), index=2
gte3 = u32[] get-tuple-element(fusion1), index=3
fusion2 = (f32[2,1], f32[2,1], f32[2,1], f32[2,1], u32[], u32[]) fusion(negate4, negate2, gte0, gte1, gte2, gte3), kind=kLoop, output_to_operand_aliasing={{1}: (2, {}), {2}: (3, {}), {3}: (3, {}), {4}: (4, {}), {5}: (5, {})}, calls=fused_computation_update
gte4 = f32[2,1] get-tuple-element(fusion2), index=0
negate5 = f32[2,1] negate(gte4)
gte5 = f32[2,1] get-tuple-element(fusion2), index=1
gte6 = f32[2,1] get-tuple-element(fusion2), index=2
gte7 = u32[] get-tuple-element(fusion2), index=4
gte8 = u32[] get-tuple-element(fusion2), index=5
fusion3 = f32[2,1] fusion(gte5, gte6, gte7, gte8), kind=kCustom, output_to_operand_aliasing={{}: (1, {})}, calls=fused_computation_done
ROOT add = f32[2,1] add(negate5, fusion3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.position_requires_contiguous_allocation_fn =
[](const HloPosition& position) {
std::string_view inst_name = position.instruction->name();
if (inst_name == "fusion1" ||
(inst_name == "fusion2" && position.index != ShapeIndex({0}))) {
return true;
}
return false;
};
AssignMemorySpace(module.get(), options);
HloInstruction* fusion1 =
module->entry_computation()->GetInstructionWithName("fusion1");
HloInstruction* fusion2 =
module->entry_computation()->GetInstructionWithName("fusion2");
HloInstruction* fusion3 =
module->entry_computation()->GetInstructionWithName("fusion3");
EXPECT_THAT(fusion2->operand(2), op::GetTupleElement(fusion1, 0));
EXPECT_THAT(fusion2->operand(3), op::GetTupleElement(fusion1, 1));
EXPECT_THAT(fusion3->operand(0), op::GetTupleElement(fusion2, 1));
EXPECT_THAT(fusion3->operand(1), op::GetTupleElement(fusion2, 2));
EXPECT_THAT(fusion2->operand(2)->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_THAT(fusion2->operand(3)->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_THAT(fusion3->operand(0)->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_THAT(fusion3->operand(1)->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_THAT(fusion2->operand(0)->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_THAT(fusion2->operand(1)->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_THAT(
ShapeUtil::GetSubshape(fusion2->shape(), {0}).layout().memory_space(),
kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, AsyncOpCustomFusionLongLiveRange) {
absl::string_view hlo_string = R"(
HloModule Module, is_scheduled=true
fused_computation_start {
param0 = f32[2,1] parameter(0)
negate = f32[2,1] negate(param0)
ROOT custom-call = (f32[2,1], f32[2,1], u32[], u32[]) custom-call(negate), custom_call_target="AsyncOpStart"
}
fused_computation_update {
param0 = f32[2,1] parameter(0)
param1 = f32[2,1] parameter(1)
param2 = f32[2,1] parameter(2)
param3 = f32[2,1] parameter(3)
param4 = u32[] parameter(4)
param5 = u32[] parameter(5)
add = f32[2,1] add(param0, param1)
negate = f32[2,1] negate(param2)
ROOT tuple = (f32[2,1], f32[2,1], f32[2,1], f32[2,1], u32[], u32[]) tuple(add, param2, param3, negate, param4, param5)
}
fused_computation_done {
param0 = f32[2,1] parameter(0)
param1 = f32[2,1] parameter(1)
param2 = u32[] parameter(2)
param3 = u32[] parameter(3)
negate = f32[2,1] negate(param0)
ROOT custom-call = f32[2,1] custom-call(param0, param1, negate, param2, param3), custom_call_target="AsyncOpDone"
}
ENTRY main {
param = f32[2,1] parameter(0)
negate1 = f32[2,1] negate(param)
negate2 = f32[2,1] negate(negate1)
fusion1 = (f32[2,1], f32[2,1], u32[], u32[]) fusion(negate1), kind=kCustom, output_to_operand_aliasing={{0}: (0, {})}, calls=fused_computation_start
negate3 = f32[2,1] negate(negate2)
negate4 = f32[2,1] negate(negate3)
negate5 = f32[2,1] negate(negate4)
negate6 = f32[2,1] negate(negate5)
negate7 = f32[2,1] negate(negate6)
negate8 = f32[2,1] negate(negate7)
negate9 = f32[2,1] negate(negate8)
negate10 = f32[2,1] negate(negate9)
negate11 = f32[2,1] negate(negate10)
negate12 = f32[2,1] negate(negate11)
gte0 = f32[2,1] get-tuple-element(fusion1), index=0
gte1 = f32[2,1] get-tuple-element(fusion1), index=1
gte2 = u32[] get-tuple-element(fusion1), index=2
gte3 = u32[] get-tuple-element(fusion1), index=3
fusion2 = (f32[2,1], f32[2,1], f32[2,1], f32[2,1], u32[], u32[]) fusion(negate12, negate2, gte0, gte1, gte2, gte3), kind=kLoop, output_to_operand_aliasing={{1}: (2, {}), {2}: (3, {}), {3}: (3, {}), {4}: (4, {}), {5}: (5, {})}, calls=fused_computation_update
gte4 = f32[2,1] get-tuple-element(fusion2), index=0
negate13 = f32[2,1] negate(gte4)
gte5 = f32[2,1] get-tuple-element(fusion2), index=1
gte6 = f32[2,1] get-tuple-element(fusion2), index=2
gte7 = u32[] get-tuple-element(fusion2), index=4
gte8 = u32[] get-tuple-element(fusion2), index=5
fusion3 = f32[2,1] fusion(gte5, gte6, gte7, gte8), kind=kCustom, output_to_operand_aliasing={{}: (1, {})}, calls=fused_computation_done
ROOT add = f32[2,1] add(negate13, fusion3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.position_requires_contiguous_allocation_fn =
[](const HloPosition& position) {
std::string_view inst_name = position.instruction->name();
if (inst_name == "fusion1" ||
(inst_name == "fusion2" && position.index != ShapeIndex({0}))) {
return true;
}
return false;
};
AssignMemorySpace(module.get(), options);
HloInstruction* fusion1 =
module->entry_computation()->GetInstructionWithName("fusion1");
HloInstruction* fusion2 =
module->entry_computation()->GetInstructionWithName("fusion2");
HloInstruction* fusion3 =
module->entry_computation()->GetInstructionWithName("fusion3");
EXPECT_THAT(fusion2->operand(2), op::GetTupleElement(fusion1, 0));
EXPECT_THAT(fusion2->operand(2)->shape().layout().memory_space(),
kDefaultMemorySpace);
EXPECT_THAT(fusion2->operand(3), op::GetTupleElement(fusion1, 1));
EXPECT_THAT(fusion2->operand(3)->shape().layout().memory_space(),
kDefaultMemorySpace);
EXPECT_THAT(fusion3->operand(0), op::GetTupleElement(fusion2, 1));
EXPECT_THAT(fusion3->operand(0)->shape().layout().memory_space(),
kDefaultMemorySpace);
EXPECT_THAT(fusion3->operand(1), op::GetTupleElement(fusion2, 2));
EXPECT_THAT(fusion3->operand(1)->shape().layout().memory_space(),
kDefaultMemorySpace);
EXPECT_THAT(fusion2->operand(0)->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_THAT(fusion2->operand(1)->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_THAT(
ShapeUtil::GetSubshape(fusion2->shape(), {0}).layout().memory_space(),
kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, AsyncOpCustomFusionMultipleUsers) {
absl::string_view hlo_string = R"(
HloModule Module, is_scheduled=true
fused_computation_start {
param0 = f32[2,1] parameter(0)
negate = f32[2,1] negate(param0)
ROOT custom-call = (f32[2,1], f32[2,1], u32[], u32[]) custom-call(negate), custom_call_target="AsyncOpStart"
}
fused_computation_update1 {
param0 = f32[2,1] parameter(0)
param1 = f32[2,1] parameter(1)
param2 = f32[2,1] parameter(2)
param3 = f32[2,1] parameter(3)
param4 = u32[] parameter(4)
param5 = u32[] parameter(5)
add = f32[2,1] add(param0, param1)
negate = f32[2,1] negate(param2)
ROOT tuple = (f32[2,1], f32[2,1], f32[2,1], f32[2,1], u32[], u32[]) tuple(add, param2, param3, negate, param4, param5)
}
fused_computation_update2 {
param0 = f32[2,1] parameter(0)
param1 = f32[2,1] parameter(1)
param2 = f32[2,1] parameter(2)
param3 = f32[2,1] parameter(3)
param4 = u32[] parameter(4)
param5 = u32[] parameter(5)
add = f32[2,1] add(param0, param1)
negate = f32[2,1] negate(param2)
ROOT tuple = (f32[2,1], f32[2,1], f32[2,1], f32[2,1], u32[], u32[]) tuple(add, param2, param3, negate, param4, param5)
}
fused_computation_done {
param0 = f32[2,1] parameter(0)
param1 = f32[2,1] parameter(1)
param2 = u32[] parameter(2)
param3 = u32[] parameter(3)
negate = f32[2,1] negate(param0)
ROOT custom-call = f32[2,1] custom-call(param0, param1, negate, param2, param3), custom_call_target="AsyncOpDone"
}
ENTRY main {
param = f32[2,1] parameter(0)
negate1 = f32[2,1] negate(param)
negate2 = f32[2,1] negate(negate1)
fusion1 = (f32[2,1], f32[2,1], u32[], u32[]) fusion(negate1), kind=kCustom, output_to_operand_aliasing={{0}: (0, {})}, calls=fused_computation_start
negate3 = f32[2,1] negate(negate2)
negate4 = f32[2,1] negate(negate3)
gte0 = f32[2,1] get-tuple-element(fusion1), index=0
gte1 = f32[2,1] get-tuple-element(fusion1), index=1
gte2 = u32[] get-tuple-element(fusion1), index=2
gte3 = u32[] get-tuple-element(fusion1), index=3
fusion2 = (f32[2,1], f32[2,1], f32[2,1], f32[2,1], u32[], u32[]) fusion(negate4, negate2, gte0, gte1, gte2, gte3), kind=kLoop, output_to_operand_aliasing={{1}: (2, {}), {2}: (3, {}), {3}: (3, {}), {4}: (4, {}), {5}: (5, {})}, calls=fused_computation_update1
gte4 = f32[2,1] get-tuple-element(fusion2), index=0
negate5 = f32[2,1] negate(gte4)
negate10 = f32[2,1] negate(negate5)
negate11 = f32[2,1] negate(negate10)
negate12 = f32[2,1] negate(negate11)
negate13 = f32[2,1] negate(negate12)
negate14 = f32[2,1] negate(negate13)
negate15 = f32[2,1] negate(negate14)
negate16 = f32[2,1] negate(negate15)
negate17 = f32[2,1] negate(negate16)
negate18 = f32[2,1] negate(negate17)
negate19 = f32[2,1] negate(negate18)
fusion3 = (f32[2,1], f32[2,1], f32[2,1], f32[2,1], u32[], u32[]) fusion(negate19, negate2, gte0, gte1, gte2, gte3), kind=kLoop, output_to_operand_aliasing={{1}: (2, {}), {2}: (3, {}), {3}: (3, {}), {4}: (4, {}), {5}: (5, {})}, calls=fused_computation_update2
gte9 = f32[2,1] get-tuple-element(fusion3), index=0
negate6 = f32[2,1] negate(gte9)
gte5 = f32[2,1] get-tuple-element(fusion3), index=1
gte6 = f32[2,1] get-tuple-element(fusion3), index=2
gte7 = u32[] get-tuple-element(fusion3), index=4
gte8 = u32[] get-tuple-element(fusion3), index=5
fusion4 = f32[2,1] fusion(gte5, gte6, gte7, gte8), kind=kCustom, output_to_operand_aliasing={{}: (1, {})}, calls=fused_computation_done
ROOT add = f32[2,1] add(negate6, fusion4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.position_requires_contiguous_allocation_fn =
[](const HloPosition& position) {
std::string_view inst_name = position.instruction->name();
if (inst_name == "fusion1" ||
(inst_name == "fusion2" && position.index != ShapeIndex({0})) ||
(inst_name == "fusion3" && position.index != ShapeIndex({0}))) {
return true;
}
return false;
};
AssignMemorySpace(module.get(), options);
}
TEST_F(MemorySpaceAssignmentTest, HoistCopyStart) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true
ENTRY cross_program_prefetch {
p0 = (f32[8,8]{1,0}, f32[8,2]{1,0}) parameter(0)
get-tuple-element.0 = f32[8,8]{1,0} get-tuple-element(p0), index=0
add.0 = f32[8,8]{1,0} add(get-tuple-element.0, get-tuple-element.0)
get-tuple-element.1 = f32[8,2]{1,0} get-tuple-element(p0), index=1
dot.0 = f32[8,2]{1,0} dot(add.0, get-tuple-element.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
negate.1 = f32[8,2]{1,0} negate(dot.0)
negate.2 = f32[8,2]{1,0} negate(negate.1)
negate.3 = f32[8,2]{1,0} negate(negate.2)
negate.4 = f32[8,2]{1,0} negate(negate.3)
negate.5 = f32[8,2]{1,0} negate(negate.4)
negate.6 = f32[8,2]{1,0} negate(negate.5)
negate.7 = f32[8,2]{1,0} negate(negate.6)
negate.8 = f32[8,2]{1,0} negate(negate.7)
ROOT dot.1 = f32[2,2]{1,0} dot(negate.8, get-tuple-element.1), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.enable_cross_program_prefetch = true;
AssignMemorySpace(module.get(), options);
auto cross_program_prefetches = module->CrossProgramPrefetches();
ASSERT_EQ(cross_program_prefetches.size(), 1);
ASSERT_EQ(cross_program_prefetches[0].parameter, 0);
ASSERT_EQ(cross_program_prefetches[0].index, ShapeIndex({1}));
for (auto* instruction : module->schedule()
.sequence(module->entry_computation())
.instructions()) {
auto p0 = op::Parameter(0);
auto get_tuple_element_1 = op::GetTupleElement(p0, 1);
auto copy_start = op::CopyStart(get_tuple_element_1);
EXPECT_THAT(instruction, AnyOf(p0, get_tuple_element_1, copy_start));
if (::testing::Matches(copy_start)(instruction)) {
EXPECT_TRUE(instruction->cross_program_prefetch_index().has_value());
break;
}
}
}
TEST_F(MemorySpaceAssignmentTest, WindowPrefetch) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
%fused_computation {
%p0 = bf16[64,8]{1,0:T(8,128)(2,1)} parameter(0)
%p1 = bf16[64,8]{1,0:T(8,128)(2,1)} parameter(1)
%p2 = bf16[64,8]{1,0:T(8,128)(2,1)} parameter(2)
%add0 = bf16[64,8]{1,0:T(8,128)(2,1)} add(%p0, %p1)
ROOT %add1 = bf16[64,8]{1,0:T(8,128)(2,1)} add(%add0, %p2)
}
entry {
%p0 = bf16[64,8]{1,0:T(8,128)(2,1)} parameter(0)
%p1 = bf16[64,8]{1,0:T(8,128)(2,1)} parameter(1)
%p2 = bf16[64,8]{1,0:T(8,128)(2,1)} parameter(2)
ROOT fusion = bf16[64,8]{1,0:T(8,128)(2,1)} fusion(bf16[64,8]{1,0:T(8,128)(2,1)} %p0, bf16[64,8]{1,0:T(8,128)(2,1)} %p1, bf16[64,8]{1,0:T(8,128)(2,1)} %p2), kind=kLoop, calls=%fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto window_prefetch_detail_fn = [&](const HloInstruction* instruction) {
WindowPrefetchDetail window_prefetch_detail;
const HloInstruction* fusion = FindInstruction(module.get(), "fusion");
if (instruction == fusion) {
for (int i = 0; i < 3; ++i) {
auto* operand = window_prefetch_detail.add_windows();
operand->set_operand(i);
operand->set_size(32);
}
}
return window_prefetch_detail;
};
Options options = DefaultMemorySpaceOptions();
options.enable_window_prefetch = true;
options.window_prefetch_detail_fn = window_prefetch_detail_fn;
AssignMemorySpace(module.get(), options, 10,
0);
const HloInstruction* fusion = FindInstruction(module.get(), "fusion");
EXPECT_EQ(fusion->operand_count(), 5);
for (int i = 3; i < 5; i++) {
const HloInstruction* async_done = fusion->operand(i);
EXPECT_EQ(async_done->opcode(), HloOpcode::kAsyncDone);
EXPECT_EQ(async_done->operand_count(), 1);
EXPECT_TRUE(async_done->async_wrapped_instruction()->IsCustomCall(
"WindowPrefetch"));
const HloInstruction* async_start = async_done->operand(0);
EXPECT_EQ(async_start->opcode(), HloOpcode::kAsyncStart);
EXPECT_EQ(async_start->operand_count(), 1);
EXPECT_TRUE(async_start->async_wrapped_instruction()->IsCustomCall(
"WindowPrefetch"));
}
VLOG(2) << "module: " << module->ToString();
}
using AsynchronousCopyOrderingTest = ::testing::Test;
TEST_F(AsynchronousCopyOrderingTest, Simple) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyOrdering ordering;
EXPECT_FALSE(ordering.ViolatesOrdering(3, 11));
ordering.AddCopy({3, 11, 1, alternate_mem_space, 0});
EXPECT_FALSE(ordering.ViolatesOrdering(1, 8));
ordering.AddCopy({1, 8, 1, alternate_mem_space, 1});
EXPECT_FALSE(ordering.ViolatesOrdering(5, 14));
ordering.AddCopy({5, 14, 1, alternate_mem_space, 2});
EXPECT_FALSE(ordering.ViolatesOrdering(7, 14));
ordering.AddCopy({7, 14, 1, alternate_mem_space, 3});
EXPECT_TRUE(ordering.ViolatesOrdering(2, 16));
EXPECT_TRUE(ordering.ViolatesOrdering(9, 12));
EXPECT_TRUE(ordering.ViolatesOrdering(6, 17));
EXPECT_FALSE(ordering.ViolatesOrdering(5, 13));
ordering.AddCopy({5, 13, 1, alternate_mem_space, 4});
EXPECT_FALSE(ordering.ViolatesOrdering(5, 14));
ordering.AddCopy({5, 14, 1, alternate_mem_space, 5});
}
TEST_F(AsynchronousCopyOrderingTest, SameInterval) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyOrdering ordering;
EXPECT_FALSE(ordering.ViolatesOrdering(1, 5));
EXPECT_FALSE(ordering.ViolatesOrdering(2, 4));
ordering.AddCopy({1, 5, 1, alternate_mem_space, 0});
EXPECT_TRUE(ordering.ViolatesOrdering(2, 4));
ordering.AddCopy({1, 5, 1, alternate_mem_space, 1});
EXPECT_TRUE(ordering.ViolatesOrdering(2, 4));
ordering.AddCopy({1, 5, 1, alternate_mem_space, 2});
EXPECT_TRUE(ordering.ViolatesOrdering(2, 4));
ordering.RemoveCopy({1, 5, 1, alternate_mem_space, 1});
EXPECT_TRUE(ordering.ViolatesOrdering(2, 4));
ordering.RemoveCopy({1, 5, 1, alternate_mem_space, 2});
EXPECT_TRUE(ordering.ViolatesOrdering(2, 4));
ordering.RemoveCopy({1, 5, 1, alternate_mem_space, 0});
EXPECT_FALSE(ordering.ViolatesOrdering(2, 4));
}
using AsynchronousCopyResourceTest = ::testing::Test;
TEST_F(AsynchronousCopyResourceTest, Simple) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource(
{2.0, 3.0, 1.0, 6.0, 7.0, 1.0, 7.0, 2.0, 2.0, 4.0});
EXPECT_TRUE(resource.HasEnoughResource(-1, 3, 5.0));
resource.AddCopy({-1, 3, 5.0, alternate_mem_space, 0});
EXPECT_TRUE(resource.HasEnoughResource(1, 4, 4.0));
resource.AddCopy({1, 4, 4.0, alternate_mem_space, 1});
EXPECT_TRUE(resource.HasEnoughResource(5, 9, 10.0));
resource.AddCopy({5, 9, 10.0, alternate_mem_space, 2});
EXPECT_FALSE(resource.HasEnoughResource(4, 9, 3.0));
EXPECT_TRUE(resource.HasEnoughResource(4, 8, 2.0));
resource.AddCopy({4, 8, 2.0, alternate_mem_space, 3});
}
TEST_F(AsynchronousCopyResourceTest, Propagate) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource(
{2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0});
EXPECT_TRUE(resource.HasEnoughResource(6, 10, 2.0));
resource.AddCopy({6, 10, 2.0, alternate_mem_space, 0});
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 0.0, 2.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(5, 9, 2.0));
resource.AddCopy({5, 9, 2.0, alternate_mem_space, 1});
EXPECT_TRUE(resource.HasEnoughResource(4, 8, 2.0));
resource.AddCopy({4, 8, 2.0, alternate_mem_space, 2});
EXPECT_TRUE(resource.HasEnoughResource(3, 7, 2.0));
resource.AddCopy({3, 7, 2.0, alternate_mem_space, 3});
EXPECT_TRUE(resource.HasEnoughResource(2, 6, 2.0));
resource.AddCopy({2, 6, 2.0, alternate_mem_space, 4});
EXPECT_TRUE(resource.HasEnoughResource(1, 5, 2.0));
resource.AddCopy({1, 5, 2.0, alternate_mem_space, 5});
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(0, 4, 3.0));
resource.AddCopy({0, 4, 3.0, alternate_mem_space, 6});
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(0, 4, 3.0));
resource.AddCopy({0, 4, 3.0, alternate_mem_space, 7});
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}));
EXPECT_FALSE(resource.HasEnoughResource(0, 4, 1.0));
}
TEST_F(AsynchronousCopyResourceTest, CantPropagate) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource(
{2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0});
EXPECT_TRUE(resource.HasEnoughResource(5, 10, 2.0));
resource.AddCopy({5, 10, 2.0, alternate_mem_space, 0});
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 0.0, 2.0, 2.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(4, 7, 2.0));
resource.AddCopy({4, 7, 2.0, alternate_mem_space, 1});
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 2.0, 2.0, 2.0, 0.0, 0.0, 2.0, 2.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(4, 8, 4.0));
resource.AddCopy({4, 8, 4.0, alternate_mem_space, 2});
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 2.0, 2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 2.0}));
EXPECT_FALSE(resource.HasEnoughResource(3, 6, 4.0));
}
TEST_F(AsynchronousCopyResourceTest, Nested) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource({2.0, 2.0, 2.0, 2.0, 2.0});
EXPECT_TRUE(resource.HasEnoughResource(1, 3, 2.0));
resource.AddCopy({1, 3, 2.0, alternate_mem_space, 0});
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 2.0, 2.0}));
EXPECT_FALSE(resource.HasEnoughResource(0, 4, 4.0));
}
TEST_F(AsynchronousCopyResourceTest, Remove) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource({2.0, 2.0, 2.0, 2.0, 2.0});
AsynchronousCopy copy1{2, 5, 2.0, alternate_mem_space, 0};
AsynchronousCopy copy2{-1, 2, 3.0, alternate_mem_space, 1};
AsynchronousCopy copy3{0, 4, 4.0, alternate_mem_space, 2};
EXPECT_TRUE(resource.HasEnoughResource(2, 5, 2.0));
resource.AddCopy(copy1);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 2.0, 0.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(-1, 2, 3.0));
resource.AddCopy(copy2);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({0.0, 1.0, 2.0, 0.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(0, 4, 4.0));
resource.AddCopy(copy3);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({0.0, 0.0, 0.0, 0.0, 1.0}));
resource.RemoveCopy(copy3);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({0.0, 1.0, 2.0, 0.0, 2.0}));
resource.RemoveCopy(copy1);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({0.0, 1.0, 2.0, 2.0, 2.0}));
resource.RemoveCopy(copy2);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 2.0, 2.0, 2.0}));
}
TEST_F(AsynchronousCopyResourceTest, NestedRemove) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource({2.0, 2.0, 2.0, 2.0, 2.0});
AsynchronousCopy copy1{1, 3, 2.0, alternate_mem_space, 0};
AsynchronousCopy copy2{0, 4, 4.0, alternate_mem_space, 1};
EXPECT_TRUE(resource.HasEnoughResource(1, 3, 2.0));
resource.AddCopy(copy1);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 2.0, 2.0}));
EXPECT_FALSE(resource.HasEnoughResource(0, 4, 4.0));
resource.RemoveCopy(copy1);
auto current_resources = resource.GetCurrentResources();
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 2.0, 2.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(0, 4, 4.0));
resource.AddCopy(copy2);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 0.0, 0.0, 2.0, 2.0}));
EXPECT_FALSE(resource.HasEnoughResource(1, 3, 2.0));
resource.RemoveCopy(copy2);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 2.0, 2.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(1, 3, 2.0));
}
TEST_F(AsynchronousCopyResourceTest, PropagateRemove) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource(
{2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0});
EXPECT_TRUE(resource.HasEnoughResource(6, 10, 2.0));
resource.AddCopy({6, 10, 2.0, alternate_mem_space, 0});
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 0.0, 2.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(5, 9, 2.0));
resource.AddCopy({5, 9, 2.0, alternate_mem_space, 1});
EXPECT_TRUE(resource.HasEnoughResource(4, 8, 2.0));
resource.AddCopy({4, 8, 2.0, alternate_mem_space, 2});
EXPECT_TRUE(resource.HasEnoughResource(3, 7, 2.0));
resource.AddCopy({3, 7, 2.0, alternate_mem_space, 3});
EXPECT_TRUE(resource.HasEnoughResource(2, 6, 2.0));
resource.AddCopy({2, 6, 2.0, alternate_mem_space, 4});
EXPECT_TRUE(resource.HasEnoughResource(1, 5, 2.0));
resource.AddCopy({1, 5, 2.0, alternate_mem_space, 5});
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 2.0}));
AsynchronousCopy copy1{0, 4, 3.0, alternate_mem_space, 6};
EXPECT_TRUE(resource.HasEnoughResource(0, 4, 3.0));
resource.AddCopy(copy1);
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(0, 5, 3.0));
AsynchronousCopy copy2{0, 5, 3.0, alternate_mem_space, 7};
resource.AddCopy(copy2);
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}));
resource.RemoveCopy(copy2);
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0}));
resource.RemoveCopy(copy1);
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 2.0}));
}
TEST_F(AsynchronousCopyResourceTest, StartAtZeroAndRemove) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource({0.0, 0.0, 1.0, 1.0, 2.0});
AsynchronousCopy copy1{0, 4, 2.0, alternate_mem_space, 0};
EXPECT_TRUE(resource.HasEnoughResource(0, 4, 2.0));
resource.AddCopy(copy1);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({0.0, 0.0, 0.0, 0.0, 2.0}));
resource.RemoveCopy(copy1);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({0.0, 0.0, 1.0, 1.0, 2.0}));
resource.AddCopy(copy1);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({0.0, 0.0, 0.0, 0.0, 2.0}));
}
TEST_F(AsynchronousCopyResourceTest, OutOfOrderRemovalSameStartTime) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource({2.0, 2.0, 2.0, 2.0, 2.0});
AsynchronousCopy copy1{1, 3, 1.0, alternate_mem_space, 0};
AsynchronousCopy copy2{1, 4, 2.0, alternate_mem_space, 1};
EXPECT_TRUE(resource.HasEnoughResource(1, 3, 1.0));
resource.AddCopy(copy1);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 1.0, 2.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(1, 4, 2.0));
resource.AddCopy(copy2);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 1.0, 2.0}));
resource.RemoveCopy(copy1);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 2.0, 2.0}));
AsynchronousCopy copy3{1, 5, 1.0, alternate_mem_space, 2};
AsynchronousCopy copy4{1, 5, 1.0, alternate_mem_space, 3};
AsynchronousCopy copy5{1, 5, 1.0, alternate_mem_space, 4};
AsynchronousCopy copy6{1, 5, 1.0, alternate_mem_space, 5};
EXPECT_TRUE(resource.HasEnoughResource(1, 5, 1.0));
resource.AddCopy(copy3);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 1.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(1, 5, 1.0));
resource.AddCopy(copy4);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 0.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(1, 5, 1.0));
resource.AddCopy(copy5);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 0.0, 1.0}));
EXPECT_TRUE(resource.HasEnoughResource(1, 5, 1.0));
resource.AddCopy(copy6);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 0.0, 0.0}));
EXPECT_FALSE(resource.HasEnoughResource(1, 5, 1.0));
resource.RemoveCopy(copy2);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 0.0, 2.0}));
resource.RemoveCopy(copy3);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 1.0, 2.0}));
resource.RemoveCopy(copy4);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 2.0, 2.0}));
resource.RemoveCopy(copy5);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 1.0, 2.0, 2.0}));
resource.RemoveCopy(copy6);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 2.0, 2.0, 2.0}));
}
TEST_F(AsynchronousCopyResourceTest, HasEnoughResourceMultiCheckSuccess) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource(
{2.0, 1.0, 3.0, 6.0, 7.0, 3.0, 7.0, 2.0, 2.0, 4.0});
EXPECT_TRUE(resource.HasEnoughResource(-1, 3, 5.0));
resource.AddCopy({-1, 3, 5.0, alternate_mem_space, 0});
EXPECT_TRUE(resource.HasEnoughResource(1, 10, 4.0));
resource.AddCopy({1, 10, 4.0, alternate_mem_space, 1});
LOG(INFO) << "AsynchronousCopyResource after setup:\n"
<< resource.Dump(0, 10, alternate_mem_space);
for (int i = 0; i < 4; ++i) {
EXPECT_TRUE(
resource.HasEnoughResourceMultiCheck({{0, 6, 4.0}, {4, 6, 3.0}}));
}
}
TEST_F(AsynchronousCopyResourceTest, HasEnoughResourceMultiCheckFailure) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource(
{2.0, 1.0, 3.0, 6.0, 7.0, 3.0, 7.0, 2.0, 2.0, 4.0});
EXPECT_TRUE(resource.HasEnoughResource(-1, 3, 5.0));
resource.AddCopy({-1, 3, 5.0, alternate_mem_space, 0});
EXPECT_TRUE(resource.HasEnoughResource(1, 10, 4.0));
resource.AddCopy({1, 10, 4.0, alternate_mem_space, 1});
LOG(INFO) << "AsynchronousCopyResource after setup:\n"
<< resource.Dump(0, 10, alternate_mem_space);
EXPECT_FALSE(
resource.HasEnoughResourceMultiCheck({{0, 6, 4.0}, {4, 6, 4.0}}));
}
TEST_F(AsynchronousCopyResourceTest,
HasEnoughResourceMultiCheckRegressionTest) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource({ 24.0f,
0.0f,
6.0f,
411.0f,
3479.0f,
0.0f,
0.0f,
1537.0f,
3095.0f,
0.0f,
26.7f});
AsynchronousCopy copy1({1, 8, 170.8f, alternate_mem_space, 1});
AsynchronousCopy copy2({2, 8, 170.8f, alternate_mem_space, 2});
resource.AddCopy(copy1);
resource.AddCopy(copy2);
LOG(INFO) << "AsynchronousCopyResource after setup:\n"
<< resource.Dump(0, 11, alternate_mem_space);
EXPECT_FALSE(
resource.HasEnoughResourceMultiCheck({{0, 4, 170.8}, {1, 4, 170.8}}));
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 8;
constexpr int kFeature = 8;
constexpr int kOutput = 2;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto rhs_shape = ShapeUtil::MakeShape(F32, {kFeature, kOutput});
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kOutput});
HloInstruction* lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "lhs"));
HloInstruction* rhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, rhs_shape, "rhs"));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
result_shape, lhs, rhs, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {lhs, rhs, dot});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 1);
if (!cross_program_prefetches.empty()) {
EXPECT_EQ(cross_program_prefetches[0].parameter, 1);
EXPECT_EQ(cross_program_prefetches[0].index, ShapeIndex({}));
}
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Dot(op::Parameter(0),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::Parameter(1))));
}
TEST_F(MemorySpaceAssignmentTest, MultiCrossProgramPrefetchTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 8;
constexpr int kFeature = 8;
constexpr int kFirstOutput = 4;
constexpr int kSecondOutput = 2;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto first_weight_shape = ShapeUtil::MakeShape(F32, {kFeature, kFirstOutput});
auto second_weight_shape =
ShapeUtil::MakeShape(F32, {kFirstOutput, kSecondOutput});
auto intermediate_shape = ShapeUtil::MakeShape(F32, {kBatch, kFirstOutput});
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kSecondOutput});
HloInstruction* lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "lhs"));
HloInstruction* first_weight = builder.AddInstruction(
HloInstruction::CreateParameter(1, first_weight_shape, "first_weight"));
HloInstruction* second_weight = builder.AddInstruction(
HloInstruction::CreateParameter(2, second_weight_shape, "second_weight"));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto first_dot = builder.AddInstruction(
HloInstruction::CreateDot(intermediate_shape, lhs, first_weight,
dot_dnums, DefaultPrecisionConfig(2)));
auto second_dot = builder.AddInstruction(
HloInstruction::CreateDot(result_shape, first_dot, second_weight,
dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
computation, {lhs, first_weight, second_weight, first_dot, second_dot});
TF_CHECK_OK(module->set_schedule(schedule));
Options options = DefaultMemorySpaceOptions();
options.max_cross_program_prefetches = -1;
options.max_size_in_bytes = 256;
options.alignment_in_bytes = 8;
options.verify = true;
AssignMemorySpace(module.get(), options);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 2);
if (!cross_program_prefetches.empty()) {
EXPECT_EQ(cross_program_prefetches[0].parameter, 1);
EXPECT_EQ(cross_program_prefetches[0].index, ShapeIndex({}));
}
if (cross_program_prefetches.size() > 1) {
EXPECT_EQ(cross_program_prefetches[1].parameter, 2);
EXPECT_EQ(cross_program_prefetches[1].index, ShapeIndex({}));
}
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Dot(op::Dot(op::Parameter(0),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::Parameter(1))),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::Parameter(2))));
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchTupleTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 8;
constexpr int kFeature = 8;
constexpr int kOutput = 2;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto rhs_shape = ShapeUtil::MakeShape(F32, {kFeature, kOutput});
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kOutput});
auto tuple_shape = ShapeUtil::MakeTupleShape({lhs_shape, rhs_shape});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto lhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(lhs_shape, param, 0));
auto rhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(rhs_shape, param, 1));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
result_shape, lhs, rhs, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {param, lhs, rhs, dot});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 1);
if (!cross_program_prefetches.empty()) {
EXPECT_EQ(cross_program_prefetches[0].parameter, 0);
EXPECT_EQ(cross_program_prefetches[0].index, ShapeIndex({1}));
}
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchBitcastTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 8;
constexpr int kFeature = 8;
constexpr int kOutput = 2;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto rhs_shape = ShapeUtil::MakeShape(F32, {kOutput, kFeature});
auto bitcast_shape = ShapeUtil::MakeShape(F32, {kFeature, kOutput});
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kOutput});
HloInstruction* lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "lhs"));
HloInstruction* rhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, rhs_shape, "rhs"));
auto bitcast =
builder.AddInstruction(HloInstruction::CreateBitcast(bitcast_shape, rhs));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
result_shape, lhs, bitcast, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {lhs, rhs, bitcast, dot});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 1);
if (!cross_program_prefetches.empty()) {
EXPECT_EQ(cross_program_prefetches[0].parameter, 1);
EXPECT_EQ(cross_program_prefetches[0].index, ShapeIndex({}));
}
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchBitcastTupleTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 8;
constexpr int kFeature = 8;
constexpr int kOutput = 2;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto rhs_shape = ShapeUtil::MakeShape(F32, {kOutput, kFeature});
auto bitcast_shape = ShapeUtil::MakeShape(F32, {kFeature, kOutput});
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kOutput});
auto tuple_shape = ShapeUtil::MakeTupleShape({lhs_shape, rhs_shape});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto lhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(lhs_shape, param, 0));
auto rhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(rhs_shape, param, 1));
auto bitcast =
builder.AddInstruction(HloInstruction::CreateBitcast(bitcast_shape, rhs));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
result_shape, lhs, bitcast, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {param, lhs, rhs, bitcast, dot});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 1);
if (!cross_program_prefetches.empty()) {
EXPECT_EQ(cross_program_prefetches[0].parameter, 0);
EXPECT_EQ(cross_program_prefetches[0].index, ShapeIndex({1}));
}
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchNestedTupleTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 8;
constexpr int kFeature = 8;
constexpr int kOutput = 2;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto rhs_shape = ShapeUtil::MakeShape(F32, {kFeature, kOutput});
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kOutput});
auto tuple_shape = ShapeUtil::MakeTupleShape({lhs_shape, rhs_shape});
auto tuple_tuple_shape = ShapeUtil::MakeTupleShape({tuple_shape});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_tuple_shape, "p0"));
auto gte = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(tuple_shape, param, 0));
auto lhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(lhs_shape, gte, 0));
auto rhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(rhs_shape, gte, 1));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
result_shape, lhs, rhs, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {param, gte, lhs, rhs, dot});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchUnusedParamTest) {
HloComputation::Builder builder(TestName());
constexpr int kFeature = 8;
constexpr int kOutput = 2;
auto rhs_shape = ShapeUtil::MakeShape(F32, {kFeature, kOutput});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, rhs_shape, "p0"));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {param});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchTooBigTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 8;
constexpr int kFeature = 8;
constexpr int kOutput = 8;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto rhs_shape = ShapeUtil::MakeShape(F32, {kFeature, kOutput});
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kOutput});
HloInstruction* lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "lhs"));
HloInstruction* rhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, rhs_shape, "rhs"));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
result_shape, lhs, rhs, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {lhs, rhs, dot});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchTooBigTupleTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 8;
constexpr int kFeature = 8;
constexpr int kOutput = 8;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto rhs_shape = ShapeUtil::MakeShape(F32, {kFeature, kOutput});
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kOutput});
auto tuple_shape = ShapeUtil::MakeTupleShape({lhs_shape, rhs_shape});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto lhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(lhs_shape, param, 0));
auto rhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(rhs_shape, param, 1));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
result_shape, lhs, rhs, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {param, lhs, rhs, dot});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchFusionTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 2;
constexpr int kFeature = 2;
constexpr int kOutput = 2;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto rhs_shape = ShapeUtil::MakeShape(F32, {kFeature, kOutput});
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kOutput});
auto module = CreateNewVerifiedModule();
HloComputation::Builder fusion_builder("fusion");
{
HloInstruction* lhs = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "lhs"));
HloInstruction* rhs = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(1, rhs_shape, "rhs"));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = fusion_builder.AddInstruction(HloInstruction::CreateDot(
result_shape, lhs, rhs, dot_dnums, DefaultPrecisionConfig(2)));
(void)dot;
}
HloComputation* fusion_computation =
module->AddEmbeddedComputation(fusion_builder.Build());
auto activations = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{0.0, 1.0}, {2.0, 3.0}})));
auto weights = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{0.0, 1.0}, {2.0, 3.0}})));
HloInstruction* fusion = builder.AddInstruction(HloInstruction::CreateFusion(
result_shape, HloInstruction::FusionKind::kCustom, {activations, weights},
fusion_computation));
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {activations, weights, fusion});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchFusionTupleTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 2;
constexpr int kFeature = 2;
constexpr int kOutput = 2;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto rhs_shape = ShapeUtil::MakeShape(F32, {kFeature, kOutput});
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kOutput});
auto tuple_shape = ShapeUtil::MakeTupleShape({lhs_shape, rhs_shape});
auto module = CreateNewVerifiedModule();
HloComputation::Builder fusion_builder("fusion");
{
HloInstruction* param = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto lhs = fusion_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(lhs_shape, param, 0));
auto rhs = fusion_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(rhs_shape, param, 1));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = fusion_builder.AddInstruction(HloInstruction::CreateDot(
result_shape, lhs, rhs, dot_dnums, DefaultPrecisionConfig(2)));
(void)dot;
}
HloComputation* fusion_computation =
module->AddEmbeddedComputation(fusion_builder.Build());
auto activations = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{0.0, 1.0}, {2.0, 3.0}})));
auto weights = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{0.0, 1.0}, {2.0, 3.0}})));
HloInstruction* tuple = builder.AddInstruction(
HloInstruction::CreateTuple({activations, weights}));
HloInstruction* fusion = builder.AddInstruction(HloInstruction::CreateFusion(
result_shape, HloInstruction::FusionKind::kCustom, {tuple},
fusion_computation));
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {activations, weights, tuple, fusion});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchPinnedTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 8;
constexpr int kFeature = 8;
constexpr int kOutput = 2;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto rhs_shape = ShapeUtil::MakeShapeWithDenseLayout(
F32, {kFeature, kOutput},
{1, 0}, {},
1, 0,
kAlternateMemorySpace);
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kOutput});
HloInstruction* lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "lhs"));
HloInstruction* rhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, rhs_shape, "rhs"));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
result_shape, lhs, rhs, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {lhs, rhs, dot});
TF_CHECK_OK(module->set_schedule(schedule));
Options options = DefaultMemorySpaceOptions();
options.is_allowed_in_alternate_mem_fn = [](const HloValue& value) {
return true;
};
std::unique_ptr<PresetAssignments> preset_assignments =
AssignMemorySpace(module.get(), options);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchPinnedTupleTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 8;
constexpr int kFeature = 8;
constexpr int kOutput = 2;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto rhs_shape = ShapeUtil::MakeShapeWithDenseLayout(
F32, {kFeature, kOutput},
{1, 0}, {},
1, 0,
kAlternateMemorySpace);
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kOutput});
auto tuple_shape = ShapeUtil::MakeTupleShape({lhs_shape, rhs_shape});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto lhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(lhs_shape, param, 0));
auto rhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(rhs_shape, param, 1));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
result_shape, lhs, rhs, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {param, lhs, rhs, dot});
TF_CHECK_OK(module->set_schedule(schedule));
Options options = DefaultMemorySpaceOptions();
options.is_allowed_in_alternate_mem_fn = [](const HloValue& value) {
return true;
};
std::unique_ptr<PresetAssignments> preset_assignments =
AssignMemorySpace(module.get(), options);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramRootDupMayAlias) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true, input_output_alias={ {}: (0, {}, may-alias) }
ENTRY CrossProgramPrefetch {
c0 = s32[1,2] constant({{77, 77}})
c1 = s32[] constant(0)
p0 = s32[2,2] parameter(0)
ROOT dup = s32[2,2] dynamic-update-slice(s32[2,2] p0, s32[1,2] c0, s32[] c1, s32[] c1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto preset_assignments = AssignMemorySpace(
module.get(), DefaultMemorySpaceOptions(),
5, 2);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
EXPECT_THAT(FindInstruction(module.get(), "dup")->operand(0),
op::Parameter(0));
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramRootDusFusionMayAlias) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true, input_output_alias={ {}: (0, {}, may-alias) }
fused_computation {
fused_p0 = s32[2,2] parameter(0)
fused_p1 = s32[1,2] parameter(1)
fused_p2 = s32[] parameter(2)
fused_p3 = s32[] parameter(3)
ROOT dus = s32[2,2] dynamic-update-slice(fused_p0, fused_p1, fused_p2, fused_p3)
}
ENTRY CrossProgramPrefetch {
p0 = s32[2,2] parameter(0)
c0 = s32[1,2] constant({{77, 77}})
c1 = s32[] constant(0)
bitcast1 = s32[2,2] bitcast(p0)
ROOT fusion = s32[2,2] fusion(bitcast1, c0, c1, c1), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto preset_assignments = AssignMemorySpace(
module.get(), DefaultMemorySpaceOptions(),
5, 2);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramRootDup) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true
ENTRY CrossProgramPrefetch {
c0 = s32[1,2] constant({{77, 77}})
c1 = s32[] constant(0)
p0 = s32[2,2] parameter(0)
ROOT dup = s32[2,2] dynamic-update-slice(s32[2,2] p0, s32[1,2] c0, s32[] c1, s32[] c1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto preset_assignments = AssignMemorySpace(
module.get(), DefaultMemorySpaceOptions(),
5, 2);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
EXPECT_THAT(FindInstruction(module.get(), "dup")->operand(0),
op::Parameter(0));
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramRootDupDot) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true
ENTRY CrossProgramPrefetch {
c0 = s32[1,2] constant({{77, 77}})
c1 = s32[] constant(0)
p0 = s32[2,2] parameter(0)
p1 = s32[2,2] parameter(1)
dup = s32[2,2] dynamic-update-slice(s32[2,2] p0, s32[1,2] c0, s32[] c1, s32[] c1)
ROOT dot = s32[2,2] dot(p1, dup), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto preset_assignments = AssignMemorySpace(
module.get(), DefaultMemorySpaceOptions(),
5, 2);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 1);
EXPECT_THAT(FindInstruction(module.get(), "dup")->operand(0),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::Parameter(0)));
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramRootDotMayAlias) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true, input_output_alias={ {}: (0, {}, may-alias) }
ENTRY CrossProgramPrefetch {
p0 = s32[2,2] parameter(0)
p1 = s32[2,2] parameter(1)
ROOT dot = s32[2,2] dot(p1, p0), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto preset_assignments = AssignMemorySpace(
module.get(), DefaultMemorySpaceOptions(),
5, 2);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
EXPECT_THAT(FindInstruction(module.get(), "dot")->operand(1),
op::Parameter(0));
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramRootLiveOutBug) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true, input_output_alias={ {0}: (0, {}, may-alias) }
fused_computation {
p0 = s32[2,2] parameter(0)
p1 = s32[2,2] parameter(1)
slice = s32[1,2] slice(p1), slice={[0:1], [0:2]}
c1 = s32[] constant(0)
ROOT dus = s32[2,2] dynamic-update-slice(s32[2,2] p0, s32[1,2] slice, s32[] c1, s32[] c1)
}
ENTRY CrossProgramPrefetch {
p0 = s32[2,2] parameter(0)
p1 = s32[2,2] parameter(1)
dot = s32[2,2] dot(p1, p0), lhs_contracting_dims={0}, rhs_contracting_dims={0}
fusion = s32[2,2] fusion(p0, dot), kind=kLoop, calls=fused_computation
ROOT root = (s32[2,2], s32[2,2]) tuple(fusion, dot)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto preset_assignments = AssignMemorySpace(
module.get(), DefaultMemorySpaceOptions(),
5, 2);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramRootParameter) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true
ENTRY CrossProgramPrefetch {
p0 = s32[2,2] parameter(0)
ROOT bitcast = u32[2,2] bitcast(p0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto preset_assignments = AssignMemorySpace(
module.get(), DefaultMemorySpaceOptions(),
5, 2);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchNoReuse) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true
ENTRY CrossProgramPrefetch {
p0 = f32[8,8]{1,0} parameter(0)
p1 = f32[8,2]{1,0} parameter(1)
dot = f32[8,2]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
negate.1 = f32[8,2]{1,0} negate(dot)
negate.2 = f32[8,2]{1,0} negate(negate.1)
negate.3 = f32[8,2]{1,0} negate(negate.2)
negate.4 = f32[8,2]{1,0} negate(negate.3)
negate.5 = f32[8,2]{1,0} negate(negate.4)
negate.6 = f32[8,2]{1,0} negate(negate.5)
negate.7 = f32[8,2]{1,0} negate(negate.6)
negate.8 = f32[8,2]{1,0} negate(negate.7)
ROOT negate.9 = f32[8,2]{1,0} negate(negate.8)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto preset_assignments = AssignMemorySpace(
module.get(), DefaultMemorySpaceOptions(),
5, 2);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 1);
if (!cross_program_prefetches.empty()) {
EXPECT_EQ(cross_program_prefetches[0].parameter, 1);
EXPECT_EQ(cross_program_prefetches[0].index, ShapeIndex({}));
}
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloDataflowAnalysis> dataflow_analysis,
HloDataflowAnalysis::Run(*module));
LOG(ERROR) << "module: " << module->ToString();
const HloValue& cross_program_prefetched_value =
dataflow_analysis->GetValueDefinedAt(
module->entry_computation()->parameter_instruction(1), {});
auto is_cross_program_prefetch = [](const HloUse& use) {
return use.instruction->opcode() == HloOpcode::kCopyStart &&
use.instruction->cross_program_prefetch_index().has_value();
};
EXPECT_EQ(absl::c_count_if(cross_program_prefetched_value.GetUses(),
is_cross_program_prefetch),
1);
auto is_end_of_program_prefetch = [](const HloUse& use) {
return use.instruction->opcode() == HloOpcode::kCopyStart &&
!use.instruction->cross_program_prefetch_index().has_value();
};
EXPECT_EQ(absl::c_count_if(cross_program_prefetched_value.GetUses(),
is_end_of_program_prefetch),
1);
const HloInstruction* last_instruction =
module->schedule()
.sequence(module->entry_computation())
.instructions()[module->entry_computation()->instruction_count() - 1];
EXPECT_THAT(last_instruction, op::CopyDone());
EXPECT_NE(last_instruction, module->entry_computation()->root_instruction());
bool has_zero_offset_allocations = false;
for (auto pos_and_chunk : preset_assignments->chunks()) {
if (pos_and_chunk.first.instruction->opcode() == HloOpcode::kNegate &&
pos_and_chunk.second.offset == 0) {
has_zero_offset_allocations = true;
}
}
EXPECT_TRUE(has_zero_offset_allocations);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchTupleNoReuse) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true
ENTRY CrossProgramPrefetch {
p0 = (f32[8,8]{1,0}, f32[8,2]{1,0}) parameter(0)
get-tuple-element = f32[8,8]{1,0} get-tuple-element(p0), index=0
get-tuple-element.1 = f32[8,2]{1,0} get-tuple-element(p0), index=1
dot = f32[8,2]{1,0} dot(get-tuple-element, get-tuple-element.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
negate.1 = f32[8,2]{1,0} negate(dot)
negate.2 = f32[8,2]{1,0} negate(negate.1)
negate.3 = f32[8,2]{1,0} negate(negate.2)
negate.4 = f32[8,2]{1,0} negate(negate.3)
negate.5 = f32[8,2]{1,0} negate(negate.4)
negate.6 = f32[8,2]{1,0} negate(negate.5)
negate.7 = f32[8,2]{1,0} negate(negate.6)
negate.8 = f32[8,2]{1,0} negate(negate.7)
ROOT negate.9 = f32[8,2]{1,0} negate(negate.8)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto preset_assignments = AssignMemorySpace(
module.get(), DefaultMemorySpaceOptions(),
5, 2);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 1);
if (!cross_program_prefetches.empty()) {
EXPECT_EQ(cross_program_prefetches[0].parameter, 0);
EXPECT_EQ(cross_program_prefetches[0].index, ShapeIndex({1}));
}
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloDataflowAnalysis> dataflow_analysis,
HloDataflowAnalysis::Run(*module));
const HloValue& cross_program_prefetched_value =
dataflow_analysis->GetValueDefinedAt(
module->entry_computation()->parameter_instruction(0), {1});
auto is_cross_program_prefetch = [](const HloUse& use) {
return use.instruction->opcode() == HloOpcode::kCopyStart &&
use.instruction->cross_program_prefetch_index().has_value();
};
EXPECT_EQ(absl::c_count_if(cross_program_prefetched_value.GetUses(),
is_cross_program_prefetch),
1);
auto is_end_of_program_prefetch = [](const HloUse& use) {
return use.instruction->opcode() == HloOpcode::kCopyStart &&
!use.instruction->cross_program_prefetch_index().has_value();
};
EXPECT_EQ(absl::c_count_if(cross_program_prefetched_value.GetUses(),
is_end_of_program_prefetch),
1);
const HloInstruction* last_instruction =
module->schedule()
.sequence(module->entry_computation())
.instructions()[module->entry_computation()->instruction_count() - 1];
EXPECT_THAT(last_instruction, op::CopyDone());
EXPECT_NE(last_instruction, module->entry_computation()->root_instruction());
bool has_zero_offset_allocations = false;
for (auto pos_and_chunk : preset_assignments->chunks()) {
if (pos_and_chunk.first.instruction->opcode() == HloOpcode::kNegate &&
pos_and_chunk.second.offset == 0) {
has_zero_offset_allocations = true;
}
}
EXPECT_TRUE(has_zero_offset_allocations);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchReuse) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true
ENTRY CrossProgramPrefetch {
p0 = f32[8,8]{1,0} parameter(0)
p1 = f32[8,2]{1,0} parameter(1)
dot = f32[8,2]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
negate.1 = f32[8,2]{1,0} negate(dot)
negate.2 = f32[8,2]{1,0} negate(negate.1)
negate.3 = f32[8,2]{1,0} negate(negate.2)
negate.4 = f32[8,2]{1,0} negate(negate.3)
negate.5 = f32[8,2]{1,0} negate(negate.4)
negate.6 = f32[8,2]{1,0} negate(negate.5)
negate.7 = f32[8,2]{1,0} negate(negate.6)
negate.8 = f32[8,2]{1,0} negate(negate.7)
ROOT dot.2 = f32[2,2]{1,0} dot(negate.8, p1), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(),
5, 2);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 1);
if (!cross_program_prefetches.empty()) {
EXPECT_EQ(cross_program_prefetches[0].parameter, 1);
EXPECT_EQ(cross_program_prefetches[0].index, ShapeIndex({}));
}
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloDataflowAnalysis> dataflow_analysis,
HloDataflowAnalysis::Run(*module));
const HloValue& cross_program_prefetched_value =
dataflow_analysis->GetValueDefinedAt(
module->entry_computation()->parameter_instruction(1), {});
auto is_cross_program_prefetch = [](const HloUse& use) {
return use.instruction->opcode() == HloOpcode::kCopyStart &&
use.instruction->cross_program_prefetch_index().has_value();
};
EXPECT_EQ(absl::c_count_if(cross_program_prefetched_value.GetUses(),
is_cross_program_prefetch),
1);
auto is_end_of_program_prefetch = [](const HloUse& use) {
return use.instruction->opcode() == HloOpcode::kCopyStart &&
!use.instruction->cross_program_prefetch_index().has_value();
};
EXPECT_EQ(absl::c_count_if(cross_program_prefetched_value.GetUses(),
is_end_of_program_prefetch),
0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchTupleReuse) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true
ENTRY CrossProgramPrefetch {
p0 = (f32[8,8]{1,0}, f32[8,2]{1,0}) parameter(0)
get-tuple-element = f32[8,8]{1,0} get-tuple-element(p0), index=0
get-tuple-element.1 = f32[8,2]{1,0} get-tuple-element(p0), index=1
dot = f32[8,2]{1,0} dot(get-tuple-element, get-tuple-element.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
negate.1 = f32[8,2]{1,0} negate(dot)
negate.2 = f32[8,2]{1,0} negate(negate.1)
negate.3 = f32[8,2]{1,0} negate(negate.2)
negate.4 = f32[8,2]{1,0} negate(negate.3)
negate.5 = f32[8,2]{1,0} negate(negate.4)
negate.6 = f32[8,2]{1,0} negate(negate.5)
negate.7 = f32[8,2]{1,0} negate(negate.6)
negate.8 = f32[8,2]{1,0} negate(negate.7)
ROOT dot.2 = f32[2,2]{1,0} dot(negate.8, get-tuple-element.1), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(),
5, 2);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 1);
if (!cross_program_prefetches.empty()) {
EXPECT_EQ(cross_program_prefetches[0].parameter, 0);
EXPECT_EQ(cross_program_prefetches[0].index, ShapeIndex({1}));
}
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloDataflowAnalysis> dataflow_analysis,
HloDataflowAnalysis::Run(*module));
const HloValue& cross_program_prefetched_value =
dataflow_analysis->GetValueDefinedAt(
module->entry_computation()->parameter_instruction(0), {1});
auto is_cross_program_prefetch = [](const HloUse& use) {
return use.instruction->opcode() == HloOpcode::kCopyStart &&
use.instruction->cross_program_prefetch_index().has_value();
};
EXPECT_EQ(absl::c_count_if(cross_program_prefetched_value.GetUses(),
is_cross_program_prefetch),
1);
auto is_end_of_program_prefetch = [](const HloUse& use) {
return use.instruction->opcode() == HloOpcode::kCopyStart &&
!use.instruction->cross_program_prefetch_index().has_value();
};
EXPECT_EQ(absl::c_count_if(cross_program_prefetched_value.GetUses(),
is_end_of_program_prefetch),
0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchBufferUnused) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
%fused_computation {
%param_0.2 = f32[32]{0} parameter(0)
%param_1.4 = s32[100]{0} parameter(1)
%custom-call.1 = s32[100]{0} custom-call(s32[100]{0} %param_1.4), custom_call_target="AssumeGatherIndicesInBound", operand_layout_constraints={s32[100]{0}}
%slice.1 = s32[32]{0} slice(s32[100]{0} %custom-call.1), slice={[0:32]}
%reshape.7 = s32[32]{0} reshape(s32[32]{0} %slice.1)
%transpose.5 = s32[32]{0} transpose(s32[32]{0} %reshape.7), dimensions={0}
%gather.1 = f32[32]{0} gather(f32[32]{0} %param_0.2, s32[32]{0} %transpose.5), offset_dims={}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1}
%transpose.4 = f32[32]{0} transpose(f32[32]{0} %gather.1), dimensions={0}
ROOT %reshape.6 = f32[32]{0} reshape(f32[32]{0} %transpose.4)
}
%i.reduce_sub_computation {
%rhs = s32[] parameter(1)
%lhs = s32[] parameter(0)
ROOT %add = s32[] add(s32[] %lhs, s32[] %rhs)
}
%fused_computation.1 {
%constant.4 = s32[] constant(0)
%broadcast.4 = s32[100]{0} broadcast(s32[] %constant.4), dimensions={}
%param_0.4 = s32[32]{0} parameter(0)
%pad.1 = s32[100]{0} pad(s32[32]{0} %param_0.4, s32[] %constant.4), padding=0_68
%constant.3 = s32[] constant(76031)
%broadcast.3 = s32[100]{0} broadcast(s32[] %constant.3), dimensions={}
ROOT %clamp.1 = s32[100]{0} clamp(s32[100]{0} %broadcast.4, s32[100]{0} %pad.1, s32[100]{0} %broadcast.3)
}
ENTRY %main {
%constant = s32[] constant(0)
%i = s32[32,1]{0,1} parameter(1)
%o = f32[32]{0} parameter(0)
%reduce = s32[32]{0} reduce(s32[32,1]{0,1} %i, s32[] %constant), dimensions={1}, to_apply=%i.reduce_sub_computation
%fusion.1 = s32[100]{0} fusion(s32[32]{0} %reduce), kind=kLoop, calls=%fused_computation.1
ROOT %fusion = f32[32]{0} fusion(f32[32]{0} %o, s32[100]{0} %fusion.1), kind=kCustom, calls=%fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Fusion(op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace, op::Parameter(0)),
op::Fusion()));
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchPermissiveMode) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
fused_computation {
param_0 = f32[2] parameter(0)
param_1 = f32[4,2] parameter(1)
broadcast = f32[4,2] broadcast(param_0), dimensions={1}
ROOT multiply = f32[4,2] multiply(broadcast, param_1)
}
ENTRY entry {
p0 = f32[2] parameter(0)
p1 = f32[4,2] parameter(1)
fusion = f32[4,2] fusion(p0, p1), kind=kLoop, calls=fused_computation
ROOT negate = f32[4,2] negate(fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.cross_program_prefetch_permissive_mode = true;
AssignMemorySpace(module.get(), options);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 1);
}
TEST_F(MemorySpaceAssignmentTest, CopyResourceIntegration) {
std::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY main {
p0 = s32[8,8] parameter(0)
p1 = s32[8,8] parameter(1)
p2 = s32[] parameter(2)
a = negate(p2)
b = negate(a)
c = add(p0, p0)
d = negate(b)
e = negate(d)
f = add(p1, p1)
ROOT result = tuple(e,c,f)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.max_size_in_bytes = 300;
HloCostAnalysis::Properties properties;
properties[HloCostAnalysis::kBytesAccessedKey] = kBytesPerSecond;
HloCostAnalysis hlo_cost_analysis(ShapeSize, properties);
CostAnalysisOptions cost_analysis_options;
HloCostAnalysisCosts hlo_cost_analysis_costs(hlo_cost_analysis);
TF_ASSERT_OK_AND_ASSIGN(
auto cost_analysis,
FakeCostAnalysis::Create(hlo_cost_analysis_costs, *module,
cost_analysis_options));
cost_analysis->SetOverrideForGetInstructionElapsed(
[](const HloInstruction& instruction) -> float { return 10.0; });
cost_analysis->SetOverrideForGetAsyncCopyElapsed(
[](const Shape& shape) -> float { return 20.0; });
options.cost_analysis = cost_analysis.get();
CostAnalysisPrefetchIntervalPicker prefetch_interval_picker(
CostAnalysisPrefetchIntervalPicker(
*cost_analysis, 0.8,
1.5,
10.0,
options.max_size_in_bytes));
MsaBufferIntervalCompare compare = [](const MsaBufferInterval& lhs,
const MsaBufferInterval& rhs) -> bool {
auto lookup = [](const MsaBufferInterval& x) {
int priority = 100;
if (x.buffer->instruction()->name() == "p0") {
priority = 0;
} else if (x.buffer->instruction()->name() == "p1") {
priority = 1;
}
return std::make_tuple(priority, x.buffer->instruction()->name());
};
return lookup(lhs) < lookup(rhs);
};
AssignMemorySpace(module.get(), options, compare, &prefetch_interval_picker);
ASSERT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(_,
op::Add(op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace, op::Parameter(0)),
op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace, op::Parameter(0))),
op::Add(op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace, op::Parameter(1)),
op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace, op::Parameter(1)))));
const std::vector<HloInstruction*>& schedule =
module->schedule().sequence(module->entry_computation()).instructions();
auto find_schedule_index = [&schedule](std::string_view name) -> int {
for (int i = 0; i < schedule.size(); ++i) {
if (schedule[i]->name() == name) {
return i;
}
}
LOG(FATAL) << "Unable to find index of instruction with name " << name;
};
int c_index = find_schedule_index("c");
int p1_copy_start = find_schedule_index(module->entry_computation()
->root_instruction()
->operand(2)
->operand(0)
->operand(0)
->name());
int d_index = find_schedule_index("d");
int e_index = find_schedule_index("e");
int p1_copy_end = find_schedule_index(module->entry_computation()
->root_instruction()
->operand(2)
->operand(0)
->name());
int f_index = find_schedule_index("f");
EXPECT_EQ(p1_copy_start, c_index + 1);
EXPECT_EQ(d_index, p1_copy_start + 1);
EXPECT_EQ(e_index, d_index + 1);
EXPECT_EQ(p1_copy_end, e_index + 1);
EXPECT_EQ(f_index, p1_copy_end + 1);
}
class SlicedPrefetchTest : public MemorySpaceAssignmentTestBase {
protected:
enum class InstructionClass {
kUnknown,
kRelatedSliceStart,
kRelatedSliceDone,
kRelatedConcatBitcast,
kStartAfterNonCopy,
kDoneBeforeNonCopy,
kUnrelatedCopyLike,
kUnrelatedNonCopy,
};
static std::string InstructionClassToString(
InstructionClass instruction_class) {
switch (instruction_class) {
case InstructionClass::kUnknown:
return "unknown";
case InstructionClass::kRelatedSliceStart:
return "slice start";
case InstructionClass::kRelatedSliceDone:
return "slice done";
case InstructionClass::kRelatedConcatBitcast:
return "concat-bitcast";
case InstructionClass::kStartAfterNonCopy:
return "start after non-copy";
case InstructionClass::kDoneBeforeNonCopy:
return "done before non-copy";
case InstructionClass::kUnrelatedCopyLike:
return "unrelated copy-like";
case InstructionClass::kUnrelatedNonCopy:
return "unrelated non-copy";
}
}
class SliceProposer {
public:
SliceProposer() = default;
virtual ~SliceProposer() = default;
virtual absl::StatusOr<SliceProposalCollection> ProposeSlices(
const Shape& shape, const SlicedPrefetchOptions& options) = 0;
};
class MockSliceProposer : public SliceProposer {
public:
MOCK_METHOD(absl::StatusOr<SliceProposalCollection>, ProposeSlices,
(const Shape& shape, const SlicedPrefetchOptions& options),
(override));
};
class AsyncSlicedCopy
: public ::testing::MatcherInterface<const HloInstruction*> {
public:
AsyncSlicedCopy(int64_t to_space, int64_t from_space,
std::vector<std::vector<SliceParam>>
expected_slice_params_per_slice_in_spatial_order,
::testing::Matcher<const HloInstruction*> operand,
bool expect_bitcasted_io)
: to_space_(to_space),
from_space_(from_space),
expected_slice_params_per_slice_in_spatial_order_(
std::move(expected_slice_params_per_slice_in_spatial_order)),
base_hlo_matcher_(CreateBaseHloMatcher(
operand, expected_slice_params_per_slice_in_spatial_order_.size(),
expect_bitcasted_io)),
expect_bitcasted_io_(expect_bitcasted_io) {}
bool MatchAndExplain(
const HloInstruction* instruction,
::testing::MatchResultListener* listener) const override {
if (!base_hlo_matcher_.MatchAndExplain(instruction, listener)) {
return false;
}
if (!MatchMemorySpace(instruction, to_space_, "copy result", listener)) {
return false;
}
const HloInstruction* concat_bitcast =
(expect_bitcasted_io_ ? instruction->operand(0) : instruction);
VLOG(2) << "AsyncSlicedCopy identified the concat-bitcast as "
<< concat_bitcast->name();
const HloInstruction* copy_operand =
concat_bitcast->operand(0)->operand(0)->operand(0);
const HloInstruction* original_copy_operand =
(expect_bitcasted_io_ ? copy_operand->operand(0) : copy_operand);
VLOG(2) << "AsyncSlicedCopy identified the copy operand as "
<< copy_operand->name() << ", and the original copy operand as "
<< original_copy_operand->name();
if (!MatchMemorySpace(original_copy_operand, from_space_, "copy operand",
listener)) {
return false;
}
if (!Shape::Equal().IgnoreMemorySpaceInLayout()(
instruction->shape(), original_copy_operand->shape())) {
*listener << " has a shape of "
<< original_copy_operand->shape().ToString(
true)
<< " before copying but a shape of "
<< instruction->shape().ToString(true)
<< " after copying (ignoring memory space)";
return false;
}
CHECK_EQ(concat_bitcast->operand_count(),
expected_slice_params_per_slice_in_spatial_order_.size());
std::vector<const HloInstruction*> sorted_slices =
SortSlicesInExpectedSpatialOrder(concat_bitcast);
for (int i = 0; i < sorted_slices.size(); ++i) {
const HloInstruction* slice =
sorted_slices[i]->async_wrapped_instruction();
if (!MatchMemorySpace(slice, to_space_, "slice", listener)) {
return false;
}
const std::vector<SliceParam>& expected_slice_params_per_dim =
expected_slice_params_per_slice_in_spatial_order_[i];
if (slice->slice_starts().empty()) {
*listener << " has slice (" << slice->name()
<< "), with no slicing parameters";
return false;
}
if (slice->slice_limits().size() != slice->slice_starts().size() ||
slice->slice_strides().size() != slice->slice_limits().size()) {
*listener
<< " has slice (" << slice->name()
<< "), with an inconsistent number slice starts/limits/strides";
return false;
}
if (slice->slice_starts().size() != copy_operand->shape().rank()) {
*listener
<< " has slice (" << slice->name() << "), with "
<< slice->slice_starts().size()
<< " slice parameters (i.e., starts/limits/strides), expected "
<< expected_slice_params_per_slice_in_spatial_order_.size();
return false;
}
for (int dim = 0; dim < slice->slice_starts().size(); ++dim) {
const SliceParam& expected_slice_params =
expected_slice_params_per_dim[dim];
if (slice->slice_starts()[dim] !=
expected_slice_params.start_inclusive) {
*listener << " has slice (" << slice->name()
<< "), with slice start of " << slice->slice_starts()[dim]
<< " at dim " << dim << ", expected "
<< expected_slice_params.start_inclusive;
return false;
}
if (slice->slice_limits()[dim] !=
expected_slice_params.end_exclusive) {
*listener << " has slice (" << slice->name()
<< "), with slice limit of " << slice->slice_limits()[dim]
<< " at dim " << dim << ", expected "
<< expected_slice_params.end_exclusive;
return false;
}
if (slice->slice_strides()[dim] != 1) {
*listener << " has slice (" << slice->name()
<< "), slice stride of " << slice->slice_strides()[dim]
<< " at dim " << dim << ", expected 1";
return false;
}
}
}
return true;
}
void DescribeTo(std::ostream* os) const override {
base_hlo_matcher_.DescribeTo(os);
std::vector<std::string> slice_parameters_per_operand;
for (int op_idx = 0;
op_idx < expected_slice_params_per_slice_in_spatial_order_.size();
++op_idx) {
std::vector<std::string> slice_params_per_dim;
for (int dim = 0;
dim <
expected_slice_params_per_slice_in_spatial_order_[op_idx].size();
++dim) {
const SliceParam& slice_params =
expected_slice_params_per_slice_in_spatial_order_[op_idx][dim];
slice_params_per_dim.push_back(absl::StrCat(
"dim ", dim, ": {start: ", slice_params.start_inclusive,
", limit: ", slice_params.end_exclusive, "}"));
}
slice_parameters_per_operand.push_back(
absl::StrCat("operand ", op_idx, ": { ",
absl::StrJoin(slice_params_per_dim, ", "), " }"));
}
*os << " (copying from memory space " << from_space_ << " to "
<< to_space_
<< ", with asynchronous slice operands using the following slice "
"parameters: { "
<< absl::StrJoin(slice_parameters_per_operand, ", ") << " })";
}
private:
static ::testing::Matcher<const HloInstruction*> CreateBaseHloMatcher(
::testing::Matcher<const HloInstruction*> operand, int64_t num_slices,
bool expect_bitcasted_io) {
if (expect_bitcasted_io) {
return op::Bitcast(op::CustomCall(
kConcatBitcastCustomCall,
std::vector<::testing::Matcher<const HloInstruction*>>(
num_slices,
op::AsyncDone(op::AsyncStart(op::Bitcast(operand))))));
}
return op::CustomCall(
kConcatBitcastCustomCall,
std::vector<::testing::Matcher<const HloInstruction*>>(
num_slices, op::AsyncDone(op::AsyncStart(operand))));
}
static bool MatchMemorySpace(const HloInstruction* instruction,
int64_t expected_memory_space,
std::string_view error_message_identifier,
::testing::MatchResultListener* listener) {
if (!instruction->shape().has_layout()) {
*listener << " contains " << error_message_identifier << " named "
<< instruction->name()
<< " without a layout, expected a layout with memory space "
<< expected_memory_space;
return false;
}
if (instruction->shape().layout().memory_space() !=
expected_memory_space) {
*listener << " contains " << error_message_identifier << " named "
<< instruction->name() << " in memory space "
<< expected_memory_space << ", expected "
<< expected_memory_space;
return false;
}
return true;
}
int64_t to_space_;
int64_t from_space_;
std::vector<std::vector<SliceParam>>
expected_slice_params_per_slice_in_spatial_order_;
::testing::Matcher<const HloInstruction*> base_hlo_matcher_;
bool expect_bitcasted_io_;
};
static inline ::testing::Matcher<const HloInstruction*> IsAsyncSlicedCopy(
int64_t to_space, int64_t from_space,
std::vector<std::vector<SliceParam>>
expected_slice_params_per_slice_in_spatial_order,
::testing::Matcher<const HloInstruction*> operand_matcher,
bool expect_bitcasted_io = false) {
return ::testing::MakeMatcher(new AsyncSlicedCopy(
to_space, from_space, expected_slice_params_per_slice_in_spatial_order,
operand_matcher, expect_bitcasted_io));
}
class SlicedPrefetchOptionsMatcher
: public ::testing::MatcherInterface<const SlicedPrefetchOptions&> {
public:
explicit SlicedPrefetchOptionsMatcher(
SlicedPrefetchOptions expected_options)
: expected_options_(std::move(expected_options)) {}
bool MatchAndExplain(
const SlicedPrefetchOptions& options,
::testing::MatchResultListener* listener) const override {
if (options.max_slices() != expected_options_.max_slices()) {
*listener << " has " << options.max_slices() << " max slices, expected "
<< expected_options_.max_slices();
return false;
}
if (options.min_bytes() != expected_options_.min_bytes()) {
*listener << " has " << options.min_bytes() << " min bytes, expected "
<< expected_options_.min_bytes();
return false;
}
if (options.fail_on_non_alignment_boundary_slice_proposal() !=
expected_options_.fail_on_non_alignment_boundary_slice_proposal()) {
*listener
<< " has fail_on_non_alignment_boundary_slice_proposal set to "
<< options.fail_on_non_alignment_boundary_slice_proposal()
<< ", expected "
<< expected_options_
.fail_on_non_alignment_boundary_slice_proposal();
return false;
}
return true;
}
void DescribeTo(std::ostream* os) const override {
*os << " has the following options: max_slices("
<< expected_options_.max_slices() << "), min_bytes("
<< expected_options_.min_bytes()
<< ") fail_on_non_alignment_boundary_slice_proposal("
<< expected_options_.fail_on_non_alignment_boundary_slice_proposal()
<< ")";
}
private:
SlicedPrefetchOptions expected_options_;
};
static inline ::testing::Matcher<const SlicedPrefetchOptions&>
EqualsSlicedPrefetchOptions(SlicedPrefetchOptions expected_options) {
return ::testing::MakeMatcher(
new SlicedPrefetchOptionsMatcher(std::move(expected_options)));
}
static std::vector<const HloInstruction*> SortSlicesInExpectedSpatialOrder(
const HloInstruction* concat_bitcast) {
std::vector<const HloInstruction*> sorted_slices(
concat_bitcast->operands().begin(), concat_bitcast->operands().end());
absl::c_sort(sorted_slices, [](const HloInstruction* lhs,
const HloInstruction* rhs) {
CHECK(IsAsyncSliceDone(lhs));
CHECK(IsAsyncSliceDone(rhs));
CHECK(!lhs->async_wrapped_instruction()->slice_starts().empty());
CHECK(!rhs->async_wrapped_instruction()->slice_starts().empty());
return lhs->async_wrapped_instruction()->slice_starts().front() <
rhs->async_wrapped_instruction()->slice_starts().front();
});
return sorted_slices;
}
static bool IsAsyncCopyStart(const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kCopyStart;
}
static bool IsAsyncCopyDone(const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kCopyDone;
}
static bool IsAsyncSliceStart(const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kAsyncStart &&
instruction->async_wrapped_instruction()->opcode() ==
HloOpcode::kSlice;
}
static bool IsAsyncSliceDone(const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kAsyncDone &&
instruction->async_wrapped_instruction()->opcode() ==
HloOpcode::kSlice;
}
static bool IsConcatBitcast(const HloInstruction* instruction) {
return instruction->IsCustomCall(kConcatBitcastCustomCall);
}
static absl::StatusOr<int> FindScheduleIndexOfInstruction(
const std::vector<HloInstruction*>& schedule, std::string_view name,
InstructionClass c) {
for (int i = 0; i < schedule.size(); ++i) {
if (schedule[i]->name() == name) {
return i;
}
}
return NotFound(
"%s",
absl::StrCat("Could not find ", InstructionClassToString(c),
" instruction ", name, " in the instruction schedule."));
}
static const HloInstruction* FindNamedScheduledInstruction(
const HloModule& module, std::string_view name) {
for (const HloInstruction* i : module.entry_computation()->instructions()) {
if (i->name() == name) {
return i;
}
}
return nullptr;
}
static absl::StatusOr<std::vector<int>> GetSliceStartIndicies(
const std::vector<HloInstruction*>& schedule,
const HloInstruction* concat_bitcast) {
std::vector<int> indicies;
if (!IsConcatBitcast(concat_bitcast)) {
return InvalidArgumentStrCat(concat_bitcast->name(),
" is not a concat-bitcast.");
}
for (int i = 0; i < concat_bitcast->operand_count(); ++i) {
const HloInstruction* async_slice_done = concat_bitcast->operand(i);
if (!IsAsyncSliceDone(async_slice_done)) {
return InvalidArgumentStrCat("Operand ", i, " of ",
concat_bitcast->name(),
" is not an async-slice-done.");
}
const HloInstruction* async_slice_start = async_slice_done->operand(0);
if (!IsAsyncSliceStart(async_slice_start)) {
return InvalidArgumentStrCat("Operand 0, of operand ", i, " of ",
concat_bitcast->name(),
" is not an async-slice-start.");
}
TF_ASSIGN_OR_RETURN(
int schedule_index,
FindScheduleIndexOfInstruction(schedule, async_slice_start->name(),
InstructionClass::kRelatedSliceStart));
indicies.push_back(schedule_index);
}
return indicies;
}
static absl::Status ConcatBitcastAndSlicesAfterInstruction(
const std::vector<HloInstruction*>& schedule,
const std::vector<InstructionClass>& schedule_to_class,
int slices_start_after_index) {
for (int i = 0; i < slices_start_after_index; ++i) {
InstructionClass c = schedule_to_class[i];
const HloInstruction* instruction = schedule[i];
if (c == InstructionClass::kRelatedSliceStart ||
c == InstructionClass::kRelatedSliceDone ||
c == InstructionClass::kRelatedConcatBitcast) {
return FailedPrecondition(
"%s", absl::StrCat(InstructionClassToString(c), " ",
instruction->name(), " is scheduled at ", i,
", but is expected to be after ",
schedule[slices_start_after_index]->name(),
" at ", slices_start_after_index, "."));
}
}
return absl::OkStatus();
}
static absl::Status AtLeastOneNonCopyLikeInstructionBetweenSliceStarts(
const std::vector<HloInstruction*>& schedule,
const std::vector<InstructionClass>& schedule_to_class) {
bool found_non_copy_since_last_slice_start = true;
for (int i = 0; i < schedule_to_class.size(); ++i) {
InstructionClass c = schedule_to_class[i];
if (c == InstructionClass::kRelatedSliceStart &&
!found_non_copy_since_last_slice_start) {
return FailedPrecondition(
"%s",
absl::StrCat(
"Did not find a non-copy-like instruction between slice start ",
schedule[i]->name(), " at ", i,
" and the previous slice start."));
}
if (c == InstructionClass::kRelatedSliceStart) {
found_non_copy_since_last_slice_start = false;
} else if (c == InstructionClass::kUnrelatedNonCopy) {
found_non_copy_since_last_slice_start = true;
}
}
return absl::OkStatus();
}
static absl::Status OneSliceStartAfterInstructionWithNoCopyLikeBetween(
const std::vector<HloInstruction*>& schedule,
const std::vector<InstructionClass>& schedule_to_class,
int slices_start_after_index) {
int first_slice_start_after_schedule_after = -1;
int first_non_copy_after_schedule_after = -1;
for (int i = slices_start_after_index + 1;
i < schedule_to_class.size() &&
(first_slice_start_after_schedule_after == -1 ||
first_non_copy_after_schedule_after == -1);
++i) {
if (first_slice_start_after_schedule_after == -1 &&
schedule_to_class[i] == InstructionClass::kRelatedSliceStart) {
first_slice_start_after_schedule_after = i;
continue;
}
if (first_non_copy_after_schedule_after == -1 &&
schedule_to_class[i] == InstructionClass::kUnrelatedNonCopy) {
first_non_copy_after_schedule_after = i;
continue;
}
}
if (first_slice_start_after_schedule_after == -1) {
return NotFound(
"%s", absl::StrCat("Could not find a slice start instruction "
"after start after instruction ",
schedule[slices_start_after_index]->name(), " at ",
slices_start_after_index, "."));
}
if (first_non_copy_after_schedule_after == -1) {
return NotFound(
"%s", absl::StrCat("Could not a find non-copy-like instruction "
"after start after instruction ",
schedule[slices_start_after_index]->name(), " at ",
slices_start_after_index, "."));
}
if (first_slice_start_after_schedule_after >
first_non_copy_after_schedule_after) {
return FailedPrecondition(
"%s", absl::StrCat(
"Unexpectedly found a non-copy-like instruction at ",
first_non_copy_after_schedule_after, ", between ",
schedule[slices_start_after_index]->name(), " at ",
slices_start_after_index, ", and the first slice start at ",
first_slice_start_after_schedule_after, "."));
}
return absl::OkStatus();
}
static absl::Status ConcatBitcastAndSlicesBeforeInstruction(
const std::vector<HloInstruction*>& schedule,
const std::vector<InstructionClass>& schedule_to_class,
int slices_done_before_index) {
for (int i = slices_done_before_index + 1; i < schedule_to_class.size();
++i) {
InstructionClass c = schedule_to_class[i];
const HloInstruction* instruction = schedule[i];
if (c == InstructionClass::kRelatedSliceStart ||
c == InstructionClass::kRelatedSliceDone ||
c == InstructionClass::kRelatedConcatBitcast) {
return FailedPrecondition(
"%s", absl::StrCat(InstructionClassToString(c), " ",
instruction->name(), " is scheduled at ", i,
", but is expected to be before ",
schedule[slices_done_before_index]->name(),
" at ", slices_done_before_index, "."));
}
}
return absl::OkStatus();
}
static absl::Status
ConcatBitcastAndSliceDonesBeforeInstructionWithNoCopyLikeBetween(
const std::vector<HloInstruction*>& schedule,
const std::vector<InstructionClass>& schedule_to_class,
int slices_done_before_index) {
bool found_non_copy = false;
for (int i = slices_done_before_index - 1; i >= 0; --i) {
InstructionClass c = schedule_to_class[i];
const HloInstruction* instruction = schedule[i];
if (c == InstructionClass::kUnrelatedNonCopy) {
found_non_copy = true;
continue;
}
if (found_non_copy && (c == InstructionClass::kRelatedSliceDone ||
c == InstructionClass::kRelatedConcatBitcast)) {
return FailedPrecondition(
"%s",
absl::StrCat("Found non-copy instruction between ",
InstructionClassToString(c), " ", instruction->name(),
" at ", i, ", and slice done before instruction ",
schedule[slices_done_before_index]->name(), " at ",
slices_done_before_index, "."));
}
}
return absl::OkStatus();
}
static absl::Status ConcatBitcastAfterSliceDones(
const std::vector<HloInstruction*>& schedule,
const std::vector<InstructionClass>& schedule_to_class) {
int concat_bitcast_index = -1;
for (int i = 0; i < schedule_to_class.size(); ++i) {
InstructionClass c = schedule_to_class[i];
const HloInstruction* instruction = schedule[i];
if (concat_bitcast_index == -1 &&
c == InstructionClass::kRelatedConcatBitcast) {
concat_bitcast_index = i;
continue;
}
if (concat_bitcast_index != -1 &&
c == InstructionClass::kRelatedSliceDone) {
return FailedPrecondition(
"%s", absl::StrCat("Unexpectedly, found concat-bitcast ",
schedule[concat_bitcast_index]->name(), " at ",
concat_bitcast_index,
", which is before the slice done ",
instruction->name(), " at ", i, "."));
}
}
return absl::OkStatus();
}
static absl::Status CheckSchedule(
const HloModule& module, const HloInstruction* concat_bitcast,
std::string_view slices_start_after_instruction_name,
std::string_view slices_done_before_instruction_name,
bool expect_slices_started_at_different_times) {
CHECK(concat_bitcast->IsCustomCall(kConcatBitcastCustomCall));
auto entry_schedule =
module.schedule().sequence(module.entry_computation()).instructions();
std::vector<InstructionClass> schedule_to_class(
entry_schedule.size(), InstructionClass::kUnrelatedNonCopy);
for (int i = 0; i < entry_schedule.size(); ++i) {
const HloInstruction* instruction = entry_schedule[i];
if (IsAsyncCopyStart(instruction) || IsAsyncCopyDone(instruction) ||
IsAsyncSliceStart(instruction) || IsAsyncSliceDone(instruction) ||
IsConcatBitcast(instruction)) {
schedule_to_class[i] = InstructionClass::kUnrelatedCopyLike;
}
}
int slices_start_after_index;
TF_ASSIGN_OR_RETURN(slices_start_after_index,
FindScheduleIndexOfInstruction(
entry_schedule, slices_start_after_instruction_name,
InstructionClass::kStartAfterNonCopy));
schedule_to_class[slices_start_after_index] =
InstructionClass::kStartAfterNonCopy;
int slices_done_before_index;
TF_ASSIGN_OR_RETURN(slices_done_before_index,
FindScheduleIndexOfInstruction(
entry_schedule, slices_done_before_instruction_name,
InstructionClass::kDoneBeforeNonCopy));
schedule_to_class[slices_done_before_index] =
InstructionClass::kDoneBeforeNonCopy;
int concat_bitcast_index;
TF_ASSIGN_OR_RETURN(concat_bitcast_index,
FindScheduleIndexOfInstruction(
entry_schedule, concat_bitcast->name(),
InstructionClass::kRelatedConcatBitcast));
schedule_to_class[concat_bitcast_index] =
InstructionClass::kRelatedConcatBitcast;
for (const HloInstruction* slice : concat_bitcast->operands()) {
int done_index;
TF_ASSIGN_OR_RETURN(done_index, FindScheduleIndexOfInstruction(
entry_schedule, slice->name(),
InstructionClass::kRelatedSliceDone));
schedule_to_class[done_index] = InstructionClass::kRelatedSliceDone;
int start_index;
TF_ASSIGN_OR_RETURN(start_index,
FindScheduleIndexOfInstruction(
entry_schedule, slice->operand(0)->name(),
InstructionClass::kRelatedSliceStart));
schedule_to_class[start_index] = InstructionClass::kRelatedSliceStart;
}
TF_RETURN_IF_ERROR(ConcatBitcastAndSlicesAfterInstruction(
entry_schedule, schedule_to_class, slices_start_after_index));
TF_RETURN_IF_ERROR(OneSliceStartAfterInstructionWithNoCopyLikeBetween(
entry_schedule, schedule_to_class, slices_start_after_index));
if (expect_slices_started_at_different_times) {
TF_RETURN_IF_ERROR(AtLeastOneNonCopyLikeInstructionBetweenSliceStarts(
entry_schedule, schedule_to_class));
}
TF_RETURN_IF_ERROR(ConcatBitcastAndSlicesBeforeInstruction(
entry_schedule, schedule_to_class, slices_done_before_index));
TF_RETURN_IF_ERROR(
ConcatBitcastAndSliceDonesBeforeInstructionWithNoCopyLikeBetween(
entry_schedule, schedule_to_class, slices_done_before_index));
TF_RETURN_IF_ERROR(
ConcatBitcastAfterSliceDones(entry_schedule, schedule_to_class));
return absl::OkStatus();
}
static absl::Status CheckSliceChunks(const PresetAssignments& assignments,
const HloInstruction* sliced_copy_result,
bool expect_bitcasted_io = false) {
const HloInstruction* concat_bitcast =
(expect_bitcasted_io ? sliced_copy_result->operand(0)
: sliced_copy_result);
CHECK(concat_bitcast->IsCustomCall(kConcatBitcastCustomCall));
absl::flat_hash_map<const HloInstruction*, Chunk> slices_to_chunks;
std::optional<Chunk> result_chunk = std::nullopt;
for (const std::pair<HloPosition, Chunk>& position_chunk_pair :
assignments.chunks()) {
if (position_chunk_pair.first.instruction == sliced_copy_result) {
if (result_chunk.has_value()) {
return FailedPrecondition(
"%s", absl::StrCat("Sliced copy ", sliced_copy_result->name(),
" is assigned more than one chunk: ",
result_chunk->ToString(), " and ",
position_chunk_pair.second.ToString()));
}
result_chunk = position_chunk_pair.second;
}
for (const HloInstruction* slice : concat_bitcast->operands()) {
if (position_chunk_pair.first.instruction == slice) {
auto it = slices_to_chunks.find(slice);
if (it != slices_to_chunks.end()) {
return FailedPrecondition(
"%s", absl::StrCat("Slice ", slice->name(),
" is assigned more than one chunk: ",
it->second.ToString(), " and ",
position_chunk_pair.second.ToString()));
}
slices_to_chunks[slice] = position_chunk_pair.second;
}
}
}
std::vector<const HloInstruction*> sorted_slices =
SortSlicesInExpectedSpatialOrder(concat_bitcast);
VLOG(1) << "Chunk assignments for " << sliced_copy_result->name() << ":\n"
<< absl::StrJoin(
sorted_slices, "\n",
[&](std::string* out, const HloInstruction* slice) {
auto it = slices_to_chunks.find(slice);
std::string chunk = "no chunk assigned";
if (it != slices_to_chunks.end()) {
chunk = it->second.ToString();
}
absl::StrAppend(out, " slice ", slice->name(), ": ",
chunk);
})
<< "\n sliced copy result " << sliced_copy_result->name() << ": "
<< (result_chunk.has_value() ? result_chunk->ToString()
: "no chunk assigned");
if (sorted_slices.empty()) {
return absl::OkStatus();
}
int64_t previous_end = -1;
int64_t min_offset = std::numeric_limits<int64_t>::max();
int64_t max_limit = std::numeric_limits<int64_t>::min();
for (const HloInstruction* slice : sorted_slices) {
auto it = slices_to_chunks.find(slice);
if (it == slices_to_chunks.end()) {
return FailedPrecondition(
"%s",
absl::StrCat("Slice ", slice->name(), " is not assigned a chunk"));
}
const Chunk& chunk = it->second;
if (chunk.size != ShapeSize(slice->shape())) {
return FailedPrecondition(
"%s",
absl::StrCat("Slice ", slice->name(), " is assigned chunk ",
chunk.ToString(), " with size ", chunk.size,
". Expected a size of ", ShapeSize(slice->shape()),
", to match its shape."));
}
if (previous_end != -1 && chunk.offset != previous_end) {
return FailedPrecondition(
"%s", absl::StrCat(
"Slice ", slice->name(), " starts at offset ",
chunk.offset, ". Expected it to start at ", previous_end,
" because that's where the previous slice ended."));
}
previous_end = chunk.chunk_end();
min_offset = std::min(min_offset, chunk.offset);
max_limit = std::max(max_limit, chunk.chunk_end());
}
if (!result_chunk.has_value()) {
return FailedPrecondition(
"%s", absl::StrCat("Sliced copy result ", sliced_copy_result->name(),
" is not assigned a chunk."));
}
Chunk expected_result_chunk = Chunk::FromOffsetEnd(min_offset, max_limit);
if (!(*result_chunk == expected_result_chunk)) {
return FailedPrecondition(
"%s", absl::StrCat("Sliced copy result ", sliced_copy_result->name(),
" is assigned chunk ", result_chunk->ToString(),
", but it's expected to be assigned chunk ",
expected_result_chunk.ToString()));
}
if (result_chunk->size != ShapeSize(sliced_copy_result->shape())) {
return FailedPrecondition(
"%s", absl::StrCat("Sliced copy result ", sliced_copy_result->name(),
" is assigned chunk ", result_chunk->ToString(),
" with size ", result_chunk->size,
". Expected a size of ",
ShapeSize(sliced_copy_result->shape()),
", to match its shape."));
}
return absl::OkStatus();
}
SlicedPrefetchTest() {
EXPECT_CALL(slice_proposer_, ProposeSlices(_, _)).Times(0);
options_.max_size_in_bytes = 1024;
options_.sliced_prefetch_options.set_max_slices(2);
options_.sliced_prefetch_options.set_min_bytes(8);
options_.propose_slice_fn = [&](const Shape& shape,
const SlicedPrefetchOptions& options) {
return slice_proposer_.ProposeSlices(shape, options);
};
options_.get_equivalent_s8_shape_fn = [](const Shape& original_shape) {
return ShapeUtil::MakeShape(S8, {ShapeSize(original_shape)});
};
}
void SetupProposeSlicesToExpect2SlicesOfF32x8x8() {
EXPECT_CALL(slice_proposer_,
ProposeSlices(f32_8_8_, EqualsSlicedPrefetchOptions(
options_.sliced_prefetch_options)))
.WillRepeatedly(Return(SliceProposalCollection({
SliceProposal({f32_4_8_, std::vector<SliceParam>({{0, 4}, {0, 8}}),
ShapeSize(f32_4_8_)}),
SliceProposal({f32_4_8_, std::vector<SliceParam>({{4, 8}, {0, 8}}),
ShapeSize(f32_4_8_)}),
})));
}
const Shape f32_8_8_ = ShapeUtil::MakeShape(F32, {8, 8});
const Shape f32_4_8_ = ShapeUtil::MakeShape(F32, {4, 8});
MockSliceProposer slice_proposer_;
Options options_ = DefaultMemorySpaceOptions();
};
TEST_F(SlicedPrefetchTest, TwoSlices) {
std::string hlo_text = R"zz(
HloModule Slice, is_scheduled=true
ENTRY main {
p0 = f32[8,8] parameter(0)
p1 = f32[8,8] parameter(1)
a = f32[8,8] tanh(p0)
b = f32[8,8] tanh(a)
c = f32[8,8] tanh(b)
ROOT r = f32[8,8] add(c, p1)
})zz";
SetupProposeSlicesToExpect2SlicesOfF32x8x8();
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
VLOG(1) << "Original module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
std::unique_ptr<PresetAssignments> assignments = AssignMemorySpace(
module.get(), options_,
10, 1);
VLOG(1) << "Post-MSA module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Add(_, IsAsyncSlicedCopy(
kAlternateMemorySpace, kDefaultMemorySpace,
{{{0, 4}, {0, 8}}, {{4, 8}, {0, 8}}},
op::Parameter(1))));
TF_EXPECT_OK(
CheckSchedule(*module, root->operand(1),
"p1",
"r",
true));
TF_EXPECT_OK(CheckSliceChunks(*assignments, root->operand(1)));
}
TEST_F(SlicedPrefetchTest, ThreeSlices) {
std::string hlo_text = R"zz(
HloModule Slice, is_scheduled=true
ENTRY main {
p0 = f32[8,8] parameter(0)
p1 = f32[8,8] parameter(1)
a = f32[8,8] tanh(p0)
b = f32[8,8] tanh(a)
c = f32[8,8] tanh(b)
ROOT r = f32[8,8] add(c, p1)
})zz";
const Shape f32_3_8 = ShapeUtil::MakeShape(F32, {3, 8});
const Shape f32_2_8 = ShapeUtil::MakeShape(F32, {2, 8});
options_.sliced_prefetch_options.set_max_slices(3);
EXPECT_CALL(slice_proposer_,
ProposeSlices(f32_8_8_, EqualsSlicedPrefetchOptions(
options_.sliced_prefetch_options)))
.WillRepeatedly(Return(SliceProposalCollection({
SliceProposal({f32_3_8, std::vector<SliceParam>({{0, 3}, {0, 8}}),
ShapeSize(f32_3_8)}),
SliceProposal({f32_3_8, std::vector<SliceParam>({{3, 6}, {0, 8}}),
ShapeSize(f32_3_8)}),
SliceProposal({f32_2_8, std::vector<SliceParam>({{6, 8}, {0, 8}}),
ShapeSize(f32_2_8)}),
})));
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
VLOG(1) << "Original module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
std::unique_ptr<PresetAssignments> assignments = AssignMemorySpace(
module.get(), options_,
10, 1);
VLOG(1) << "Post-MSA module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::Add(_, IsAsyncSlicedCopy(
kAlternateMemorySpace, kDefaultMemorySpace,
{{{0, 3}, {0, 8}}, {{3, 6}, {0, 8}}, {{6, 8}, {0, 8}}},
op::Parameter(1))));
TF_EXPECT_OK(
CheckSchedule(*module, root->operand(1),
"p1",
"r",
true));
TF_EXPECT_OK(CheckSliceChunks(*assignments, root->operand(1)));
}
TEST_F(SlicedPrefetchTest, SlicingDisabled) {
std::string hlo_text = R"zz(
HloModule Slice, is_scheduled=true
ENTRY main {
p0 = f32[8,8] parameter(0)
p1 = f32[8,8] parameter(1)
a = f32[8,8] tanh(p0)
b = f32[8,8] tanh(a)
c = f32[8,8] tanh(b)
ROOT r = f32[8,8] add(c, p1)
})zz";
options_.sliced_prefetch_options.set_max_slices(0);
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
VLOG(1) << "Original module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
std::unique_ptr<PresetAssignments> assignments = AssignMemorySpace(
module.get(), options_,
10, 1);
VLOG(1) << "Post-MSA module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
auto entry_schedule =
module->schedule().sequence(module->entry_computation()).instructions();
for (const HloInstruction* instruction : entry_schedule) {
EXPECT_FALSE(IsAsyncSliceStart(instruction));
EXPECT_FALSE(IsAsyncSliceDone(instruction));
EXPECT_FALSE(IsConcatBitcast(instruction));
}
}
TEST_F(SlicedPrefetchTest, TooSmallToSlice) {
std::string hlo_text = R"zz(
HloModule Slice, is_scheduled=true
ENTRY main {
p0 = f32[8,8] parameter(0)
p1 = f32[8,8] parameter(1)
a = f32[8,8] tanh(p0)
b = f32[8,8] tanh(a)
c = f32[8,8] tanh(b)
ROOT r = f32[8,8] add(c, p1)
})zz";
options_.sliced_prefetch_options.set_min_bytes(1000000000);
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
VLOG(1) << "Original module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
std::unique_ptr<PresetAssignments> assignments = AssignMemorySpace(
module.get(), options_,
10, 1);
VLOG(1) << "Post-MSA module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
auto entry_schedule =
module->schedule().sequence(module->entry_computation()).instructions();
for (const HloInstruction* instruction : entry_schedule) {
EXPECT_FALSE(IsAsyncSliceStart(instruction));
EXPECT_FALSE(IsAsyncSliceDone(instruction));
EXPECT_FALSE(IsConcatBitcast(instruction));
}
}
TEST_F(SlicedPrefetchTest, FallbackToUnsliced) {
std::string hlo_text = R"zz(
HloModule Slice, is_scheduled=true
ENTRY main {
p0 = f32[8,8] parameter(0)
p1 = f32[8,8] parameter(1)
a = f32[8,8] tanh(p0)
b = f32[8,8] tanh(a)
c = f32[8,8] tanh(b)
ROOT r = f32[8,8] add(c, p1)
})zz";
EXPECT_CALL(slice_proposer_,
ProposeSlices(f32_8_8_, EqualsSlicedPrefetchOptions(
options_.sliced_prefetch_options)))
.WillRepeatedly(Return(absl::StatusOr<SliceProposalCollection>(
FailedPrecondition("%s", "Cannot slice."))));
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
VLOG(1) << "Original module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
std::unique_ptr<PresetAssignments> assignments = AssignMemorySpace(
module.get(), options_,
10, 1);
VLOG(1) << "Post-MSA module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
auto entry_schedule =
module->schedule().sequence(module->entry_computation()).instructions();
for (const HloInstruction* instruction : entry_schedule) {
EXPECT_FALSE(IsAsyncSliceStart(instruction));
EXPECT_FALSE(IsAsyncSliceDone(instruction));
EXPECT_FALSE(IsConcatBitcast(instruction));
}
}
TEST_F(SlicedPrefetchTest, UsingCostAnalysisIntervalPicker) {
std::string hlo_text = R"zz(
HloModule Slice, is_scheduled=true
ENTRY main {
p0 = f32[8,8] parameter(0)
p1 = f32[8,8] parameter(1)
a = f32[8,8] tanh(p0)
b = f32[8,8] tanh(a)
c = f32[8,8] tanh(b)
ROOT r = f32[8,8] add(c, p1)
})zz";
SetupProposeSlicesToExpect2SlicesOfF32x8x8();
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
VLOG(1) << "Original module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
std::unique_ptr<PresetAssignments> assignments =
AssignMemorySpaceUsingCostAnalysis(
module.get(), options_);
VLOG(1) << "Post-MSA module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Add(_, IsAsyncSlicedCopy(
kAlternateMemorySpace, kDefaultMemorySpace,
{{{0, 4}, {0, 8}}, {{4, 8}, {0, 8}}},
op::Parameter(1))));
TF_EXPECT_OK(CheckSchedule(
*module, root->operand(1),
"a",
"r",
true));
TF_EXPECT_OK(CheckSliceChunks(*assignments, root->operand(1)));
}
TEST_F(SlicedPrefetchTest, LoopAliasing) {
std::string hlo_text = R"zz(
HloModule Slice, is_scheduled=true
WhileBody {
body_param = (f32[8,8], f32[8,8], f32[], f32[]) parameter(0)
v0 = f32[8,8] get-tuple-element(body_param), index=0
v1 = f32[8,8] get-tuple-element(body_param), index=1
i = f32[] get-tuple-element(body_param), index=2
limit = f32[] get-tuple-element(body_param), index=3
one = f32[] constant(1)
new_i = f32[] add(i, one)
new_v1 = f32[8,8] add(v0, v1)
ROOT while_result = (f32[8,8], f32[8,8], f32[], f32[]) tuple(v0, new_v1, new_i, limit)
}
WhileCond {
cond_param = (f32[8,8], f32[8,8], f32[], f32[]) parameter(0)
i = f32[] get-tuple-element(cond_param), index=2
limit = f32[] get-tuple-element(cond_param), index=3
ROOT cond_result = pred[] compare(i, limit), direction=LT
}
ENTRY main {
p0 = f32[8,8] parameter(0)
p1 = f32[8,8] parameter(1)
iterations = f32[] parameter(2)
initial = f32[] constant(0)
a = f32[8,8] tanh(p0)
b = f32[8,8] tanh(a)
c = f32[8,8] tanh(b)
t = (f32[8,8], f32[8,8], f32[], f32[]) tuple(p0, p1, initial, iterations)
w = (f32[8,8], f32[8,8], f32[], f32[]) while(t), condition=WhileCond, body=WhileBody
d = f32[8,8] get-tuple-element(w), index=1
ROOT r = f32[8,8] add(c, d)
})zz";
SetupProposeSlicesToExpect2SlicesOfF32x8x8();
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
VLOG(1) << "Original module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
std::unique_ptr<PresetAssignments> assignments =
AssignMemorySpaceUsingCostAnalysis(
module.get(), options_);
VLOG(1) << "Post-MSA module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
auto root = module->entry_computation()->root_instruction();
ASSERT_THAT(
root,
op::Add(_,
op::GetTupleElement(
op::While(
op::Tuple(_,
IsAsyncSlicedCopy(
kAlternateMemorySpace, kDefaultMemorySpace,
{{{0, 4}, {0, 8}}, {{4, 8}, {0, 8}}},
op::Parameter(1)),
_, _)),
1)));
HloInstruction* w = root->mutable_operand(1)->mutable_operand(0);
HloInstruction* t = w->mutable_operand(0);
HloInstruction* concat_bitcast = t->mutable_operand(1);
HloComputation* while_body = w->while_body();
HloInstruction* body_param = while_body->parameter_instruction(0);
HloComputation* while_cond = w->while_condition();
HloInstruction* cond_param = while_cond->parameter_instruction(0);
absl::flat_hash_set<HloPosition> expected_aliases({
HloPosition{concat_bitcast, {}},
HloPosition{w, {1}},
HloPosition{t, {1}},
HloPosition{body_param, {1}},
HloPosition{cond_param, {1}},
});
auto alias_analysis = HloAliasAnalysis::Run(module.get()).value();
VLOG(2) << alias_analysis->ToString();
const HloBuffer& concat_bitcast_buffer =
alias_analysis->GetUniqueBufferAt(concat_bitcast);
EXPECT_THAT(concat_bitcast_buffer.ComputePositions(),
::testing::IsSupersetOf(expected_aliases));
int num_chunks_for_expected_aliases = 0;
for (const auto& position_chunk_pair : assignments->chunks()) {
if (expected_aliases.contains(position_chunk_pair.first)) {
num_chunks_for_expected_aliases++;
}
}
EXPECT_EQ(num_chunks_for_expected_aliases, 1);
}
class MockRepacker : public MemorySpaceAssignmentRepacker {
public:
MockRepacker()
: MemorySpaceAssignmentRepacker(std::numeric_limits<int64_t>::max(), 1) {}
MOCK_METHOD(absl::StatusOr<bool>, Repack, (absl::Span<AllocationBlock*>),
(override));
};
TEST_F(SlicedPrefetchTest, Repack) {
absl::string_view hlo_string = R"(
HloModule Slice, is_scheduled=true
ENTRY main {
p0 = f32[] parameter(0)
p1 = f32[16,16] parameter(1)
p2 = f32[32,16] parameter(2)
p3 = f32[16,16] parameter(3)
p4 = f32[32,16] parameter(4)
x1 = f32[] add(p0,p0)
x2 = f32[] add(x1, x1)
a = f32[16,16] sine(p1)
c = f32[16,16] sine(p3)
x3 = f32[] add(x2, x2)
x4 = f32[] add(x3, x3)
b = f32[32,16] sine(p2)
d = f32[32,16] sine(p4)
z1 = f32[16,16] broadcast(x4), dimensions={}
z2 = f32[16,16] add(z1, a)
z3 = f32[32,16] concatenate(z2, c), dimensions={0}
z4 = f32[32,16] add(z3, b)
ROOT z5 = f32[32,16] add(z4, d)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module_no_repacking,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(auto module_with_repacking,
ParseAndReturnVerifiedModule(hlo_string));
VLOG(1) << "Original module:\n"
<< module_no_repacking->ToString(HloPrintOptions::ShortParsable());
Shape f32_16_16 = ShapeUtil::MakeShape(F32, {16, 16});
Shape f32_32_16 = ShapeUtil::MakeShape(F32, {32, 16});
EXPECT_CALL(slice_proposer_,
ProposeSlices(f32_16_16, EqualsSlicedPrefetchOptions(
options_.sliced_prefetch_options)))
.WillRepeatedly(Return(SliceProposalCollection({})));
EXPECT_CALL(slice_proposer_,
ProposeSlices(f32_32_16, EqualsSlicedPrefetchOptions(
options_.sliced_prefetch_options)))
.WillRepeatedly(Return(SliceProposalCollection({
SliceProposal({f32_16_16, std::vector<SliceParam>({{0, 16}, {0, 16}}),
ShapeSize(f32_16_16)}),
SliceProposal({f32_16_16,
std::vector<SliceParam>({{16, 32}, {0, 16}}),
ShapeSize(f32_16_16)}),
})));
MsaBufferIntervalCompare buffer_interval_compare =
[](const MsaBufferInterval& lhs, const MsaBufferInterval& rhs) {
auto lookup = [](const MsaBufferInterval& x) {
int priority = 100;
if (x.buffer->instruction()->name() == "p1") {
priority = 1;
} else if (x.buffer->instruction()->name() == "p2") {
priority = 2;
} else if (x.buffer->instruction()->name() == "p3") {
priority = 3;
} else if (x.buffer->instruction()->name() == "p4") {
priority = 4;
}
return std::make_tuple(priority, x.buffer->instruction()->name());
};
return lookup(lhs) < lookup(rhs);
};
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(2, 50);
options_.max_size_in_bytes = 4 * 1024;
options_.max_repacks = 0;
std::unique_ptr<PresetAssignments> assignments =
AssignMemorySpace(module_no_repacking.get(), options_,
buffer_interval_compare, &prefetch_interval_picker);
VLOG(1) << "Post-MSA module (no repacking):\n"
<< module_no_repacking->ToString(HloPrintOptions::ShortParsable());
const HloInstruction* d =
FindNamedScheduledInstruction(*module_no_repacking, "d");
ASSERT_NE(d, nullptr);
EXPECT_FALSE(IsConcatBitcast(d->operand(0)));
MockRepacker repacker;
absl::flat_hash_map<std::pair<int64_t, int64_t>, int64_t> repack_map;
EXPECT_CALL(repacker, Repack(_))
.WillRepeatedly([](absl::Span<AllocationBlock*> allocations)
-> absl::StatusOr<bool> {
bool found_p2 = false;
bool found_p3 = false;
for (AllocationBlock* block : allocations) {
VLOG(1) << "Allocation block: " << block->ToString();
if (block->inclusive_start_time == 3 &&
block->initial_offset == 1024 && block->size == 2048) {
found_p2 = true;
block->offset = 2048;
EXPECT_TRUE(block->original_slice_data.has_value());
if (block->original_slice_data.has_value()) {
SlicedAllocationData expected(
{{AllocatedSlice{1024, 1024, 3},
AllocatedSlice{1024, 2048, 7}}});
EXPECT_EQ(*block->original_slice_data, expected)
<< "\nExpected: " << expected.ToString()
<< "\nGot: " << block->original_slice_data->ToString();
block->repacked_slice_data = SlicedAllocationData(
{{AllocatedSlice{1024, 2048, 7},
AllocatedSlice{1024, 3072, 3}}});
}
} else if (block->inclusive_start_time == 4 &&
block->initial_offset == 3072 && block->size == 1024) {
found_p3 = true;
block->offset = 1024;
EXPECT_FALSE(block->original_slice_data.has_value());
} else {
block->offset = block->initial_offset;
}
}
EXPECT_TRUE(found_p2);
EXPECT_TRUE(found_p3);
return true;
});
options_.max_repacks = 1;
options_.repacker = &repacker;
assignments =
AssignMemorySpace(module_with_repacking.get(), options_,
buffer_interval_compare, &prefetch_interval_picker);
VLOG(1) << "Post-MSA module (with repacking):\n"
<< module_with_repacking->ToString(HloPrintOptions::ShortParsable());
d = FindNamedScheduledInstruction(*module_with_repacking, "d");
ASSERT_NE(d, nullptr);
EXPECT_TRUE(IsConcatBitcast(d->operand(0)));
TF_EXPECT_OK(CheckSliceChunks(*assignments, d->operand(0)));
std::vector<const HloInstruction*> p2_slice_dones;
for (const HloInstruction* i :
module_with_repacking->entry_computation()->instructions()) {
if (IsAsyncSliceStart(i) && i->operand_count() == 1 &&
i->operand(0)->name() == "p2") {
ASSERT_EQ(i->user_count(), 1);
p2_slice_dones.push_back(i->users()[0]);
}
}
ASSERT_EQ(p2_slice_dones.size(), 2);
std::vector<int64_t> p2_slice_offsets;
for (const HloInstruction* i : p2_slice_dones) {
for (const std::pair<HloPosition, Chunk>& position_chunk_pair :
assignments->chunks()) {
if (position_chunk_pair.first.instruction == i) {
p2_slice_offsets.push_back(position_chunk_pair.second.offset);
}
}
}
ASSERT_EQ(p2_slice_offsets.size(), 2);
EXPECT_THAT(p2_slice_dones[0]->async_wrapped_instruction()->slice_starts(),
::testing::ElementsAreArray({16, 0}));
EXPECT_THAT(p2_slice_dones[0]->async_wrapped_instruction()->slice_limits(),
::testing::ElementsAreArray({32, 16}));
EXPECT_EQ(p2_slice_offsets[0], 3072);
EXPECT_THAT(p2_slice_dones[1]->async_wrapped_instruction()->slice_starts(),
::testing::ElementsAreArray({0, 0}));
EXPECT_THAT(p2_slice_dones[1]->async_wrapped_instruction()->slice_limits(),
::testing::ElementsAreArray({16, 16}));
EXPECT_EQ(p2_slice_offsets[1], 2048);
}
struct ModuleAndAssignments {
std::unique_ptr<VerifiedHloModule> module;
std::unique_ptr<PresetAssignments> assignments;
};
TEST_F(SlicedPrefetchTest, BackToBackWhileLoops) {
const std::string while_cond = R"zz(
WhileCond$ID {
cond_param = (f32[8,8], f32[8,8], f32[], f32[]) parameter(0)
i = f32[] get-tuple-element(cond_param), index=2
limit = f32[] get-tuple-element(cond_param), index=3
ROOT cond_result = pred[] compare(i, limit), direction=LT
})zz";
const std::string while_body = R"zz(
WhileBody$ID {
body_param = (f32[8,8], f32[8,8], f32[], f32[]) parameter(0)
v0 = f32[8,8] get-tuple-element(body_param), index=0
v1 = f32[8,8] get-tuple-element(body_param), index=1
i = f32[] get-tuple-element(body_param), index=2
limit = f32[] get-tuple-element(body_param), index=3
one = f32[] constant(1)
new_i = f32[] add(i, one)
$COMPUTATION
ROOT while_result = (f32[8,8], f32[8,8], f32[], f32[]) tuple(v0, new_v1, new_i, limit)
})zz";
const std::string while_computation_cheap = R"zz(
new_v1 = f32[8,8] add(v0, v1))zz";
std::string while_computation_expensive = R"zz(
new_v1_0 = f32[8,8] add(v0, v1)
new_v1_1 = f32[8,8] tanh(new_v1_0)
new_v1_2 = f32[8,8] tanh(new_v1_1)
new_v1_3 = f32[8,8] tanh(new_v1_2)
new_v1 = f32[8,8] tanh(new_v1_3))zz";
std::string module_text = R"zz(
HloModule Slice, is_scheduled=true
$WHILEBODY1
$WHILECOND1
$WHILEBODY2
$WHILECOND2
ENTRY main {
loop1_input1 = f32[8,8] parameter(0)
loop1_input2 = f32[8,8] parameter(1)
loop1_iterations = f32[] parameter(2)
loop1_begin = f32[] constant(0)
loop1_tuple = (f32[8,8], f32[8,8], f32[], f32[]) tuple(loop1_input1, loop1_input2, loop1_iterations, loop1_begin)
loop2_input1 = f32[8,8] parameter(3)
loop2_input2 = f32[8,8] parameter(4)
loop2_iterations = f32[] parameter(5)
loop2_begin = f32[] constant(0)
loop2_tuple = (f32[8,8], f32[8,8], f32[], f32[]) tuple(loop2_input1, loop2_input2, loop2_iterations, loop2_begin)
prefetch = f32[8,8] parameter(6)
loop1_output = (f32[8,8], f32[8,8], f32[], f32[]) while(loop1_tuple), condition=WhileCond1, body=WhileBody1
loop2_output = (f32[8,8], f32[8,8], f32[], f32[]) while(loop2_tuple), condition=WhileCond2, body=WhileBody2
prefetch_use = f32[8,8] tanh(prefetch)
loop1_result = f32[8,8] get-tuple-element(loop1_output), index=1
loop2_result = f32[8,8] get-tuple-element(loop2_output), index=1
tmp1 = f32[8,8] add(loop1_result, loop2_result)
ROOT r = f32[8,8] add(tmp1, prefetch_use)
})zz";
auto gen_hlo = [&](std::string_view while_computation1,
std::string_view while_computation2) {
return absl::StrReplaceAll(
module_text,
{
{"$WHILEBODY1",
absl::StrReplaceAll(
while_body,
{{"$ID", "1"}, {"$COMPUTATION", while_computation1}})},
{"$WHILECOND1", absl::StrReplaceAll(while_cond, {{"$ID", "1"}})},
{"$WHILEBODY2",
absl::StrReplaceAll(
while_body,
{{"$ID", "2"}, {"$COMPUTATION", while_computation2}})},
{"$WHILECOND2", absl::StrReplaceAll(while_cond, {{"$ID", "2"}})},
});
};
SetupProposeSlicesToExpect2SlicesOfF32x8x8();
MsaBufferIntervalCompare buffer_interval_compare =
[](const MsaBufferInterval& lhs, const MsaBufferInterval& rhs) {
auto lookup = [](const MsaBufferInterval& x) {
int priority = 100;
if (x.buffer->instruction()->name() == "prefetch") {
priority = 1;
}
return std::make_tuple(priority, x.buffer->instruction()->name());
};
return lookup(lhs) < lookup(rhs);
};
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(32, 100);
options_.max_size_in_bytes = 4 * 64;
auto run_msa =
[&](std::string_view hlo_text) -> absl::StatusOr<ModuleAndAssignments> {
ModuleAndAssignments module_and_assignments;
TF_ASSIGN_OR_RETURN(module_and_assignments.module,
ParseAndReturnVerifiedModule(hlo_text));
VLOG(1) << "Original module:\n"
<< module_and_assignments.module->ToString(
HloPrintOptions::ShortParsable());
module_and_assignments.assignments =
AssignMemorySpace(module_and_assignments.module.get(), options_,
buffer_interval_compare, &prefetch_interval_picker);
VLOG(1) << "Post-MSA module:\n"
<< module_and_assignments.module->ToString(
HloPrintOptions::ShortParsable());
return module_and_assignments;
};
TF_ASSERT_OK_AND_ASSIGN(
ModuleAndAssignments module_and_assignments1,
run_msa(gen_hlo(while_computation_cheap, while_computation_expensive)));
auto root1 =
module_and_assignments1.module->entry_computation()->root_instruction();
EXPECT_THAT(root1, op::Add(_, op::Tanh(IsAsyncSlicedCopy(
kAlternateMemorySpace, kDefaultMemorySpace,
{{{0, 4}, {0, 8}}, {{4, 8}, {0, 8}}},
op::Parameter(6)))));
TF_EXPECT_OK(CheckSchedule(
*module_and_assignments1.module, root1->operand(1)->operand(0),
"prefetch",
"prefetch_use",
true));
auto entry_schedule1 =
module_and_assignments1.module->schedule()
.sequence(module_and_assignments1.module->entry_computation())
.instructions();
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int> start_indicies,
GetSliceStartIndicies(entry_schedule1, root1->operand(1)->operand(0)));
ASSERT_EQ(start_indicies.size(), 2);
TF_ASSERT_OK_AND_ASSIGN(
int first_while,
FindScheduleIndexOfInstruction(
entry_schedule1, "loop1_output",
SlicedPrefetchTest::InstructionClass::kUnrelatedNonCopy));
TF_ASSERT_OK_AND_ASSIGN(
int second_while,
FindScheduleIndexOfInstruction(
entry_schedule1, "loop2_output",
SlicedPrefetchTest::InstructionClass::kUnrelatedNonCopy));
EXPECT_TRUE(
absl::c_is_sorted<std::vector<int>>(
{start_indicies[0], first_while, start_indicies[1], second_while}) ||
absl::c_is_sorted<std::vector<int>>(
{start_indicies[1], first_while, start_indicies[0], second_while}));
TF_ASSERT_OK_AND_ASSIGN(
ModuleAndAssignments module_and_assignments2,
run_msa(gen_hlo(while_computation_expensive, while_computation_cheap)));
auto root2 =
module_and_assignments2.module->entry_computation()->root_instruction();
EXPECT_THAT(root2, op::Add(_, op::Tanh(op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::Parameter(6)))));
auto entry_schedule2 =
module_and_assignments2.module->schedule()
.sequence(module_and_assignments2.module->entry_computation())
.instructions();
TF_ASSERT_OK_AND_ASSIGN(
int copy_done,
FindScheduleIndexOfInstruction(
entry_schedule2, root2->operand(1)->operand(0)->name(),
SlicedPrefetchTest::InstructionClass::kUnrelatedNonCopy));
TF_ASSERT_OK_AND_ASSIGN(
int copy_start,
FindScheduleIndexOfInstruction(
entry_schedule2, root2->operand(1)->operand(0)->operand(0)->name(),
SlicedPrefetchTest::InstructionClass::kUnrelatedNonCopy));
TF_ASSERT_OK_AND_ASSIGN(
first_while,
FindScheduleIndexOfInstruction(
entry_schedule2, "loop1_output",
SlicedPrefetchTest::InstructionClass::kUnrelatedNonCopy));
TF_ASSERT_OK_AND_ASSIGN(
second_while,
FindScheduleIndexOfInstruction(
entry_schedule2, "loop2_output",
SlicedPrefetchTest::InstructionClass::kUnrelatedNonCopy));
EXPECT_TRUE(absl::c_is_sorted<std::vector<int>>(
{copy_start, first_while, second_while, copy_done}));
}
using RepackingTest = ::testing::Test;
TEST_F(RepackingTest, Colocations) {
AllocationBlock a{10, 20, 100, 0, 1000, 0};
AllocationBlock b{15, 25, 150, 0, 2000, 1};
AllocationBlock c{18, 22, 50, 0, 500, 2};
AllocationBlock d{5, 9, 20, 0, 3000, 3};
AllocationBlock e{17, 22, 100, 0, 1500, 4};
AllocationBlock f{25, 27, 150, 0, 2500, 5};
a.next_colocated = &a;
b.next_colocated = &c;
c.next_colocated = &b;
d.next_colocated = &f;
e.next_colocated = &d;
f.next_colocated = &e;
EXPECT_EQ(a.GetColocationsCount(), 1);
EXPECT_THAT(a.GetColocations(), UnorderedElementsAre(&a));
EXPECT_EQ(b.GetColocationsCount(), 2);
EXPECT_THAT(b.GetColocations(), UnorderedElementsAre(&b, &c));
EXPECT_EQ(c.GetColocationsCount(), 2);
EXPECT_THAT(c.GetColocations(), UnorderedElementsAre(&b, &c));
EXPECT_EQ(d.GetColocationsCount(), 3);
EXPECT_THAT(d.GetColocations(), UnorderedElementsAre(&d, &e, &f));
EXPECT_EQ(e.GetColocationsCount(), 3);
EXPECT_THAT(e.GetColocations(), UnorderedElementsAre(&d, &e, &f));
EXPECT_EQ(f.GetColocationsCount(), 3);
EXPECT_THAT(f.GetColocations(), UnorderedElementsAre(&d, &e, &f));
}
TEST_F(SlicedPrefetchTest, UniformSizedSlicing) {
std::string hlo_text = R"zz(
HloModule Slice, is_scheduled=true
ENTRY main {
p0 = f32[8,8] parameter(0)
p1 = f32[8,8] parameter(1)
p2 = f32[8,16] parameter(2)
constant1 = f32[] constant(1.1)
a = f32[8,8] tanh(p0)
b = f32[8,8] tanh(a)
c = f32[8,8] tanh(b)
d = f32[8,8] tanh(c)
e = f32[8,8] tanh(d)
f = f32[8,8] tanh(e)
g = f32[8,8] tanh(f)
h = f32[8,8] tanh(g)
x = f32[8,8] add(p1, h)
padded_x = f32[8,16] pad(x, constant1), padding=0_0x0_8
ROOT r = f32[8,16] add(padded_x, p2)
})zz";
const Shape f32_8_16 = ShapeUtil::MakeShape(F32, {8, 16});
const Shape s8_128 = ShapeUtil::MakeShape(S8, {128});
options_.sliced_prefetch_options.set_max_slices(100000);
options_.sliced_prefetch_options.set_preferred_slice_size(4 * 8 * 4);
EXPECT_CALL(slice_proposer_,
ProposeSlices(f32_8_8_, EqualsSlicedPrefetchOptions(
options_.sliced_prefetch_options)))
.WillRepeatedly(Return(SliceProposalCollection({
SliceProposal(
{s8_128, std::vector<SliceParam>({{0, 128}}), ShapeSize(s8_128)}),
SliceProposal({s8_128, std::vector<SliceParam>({{128, 256}}),
ShapeSize(s8_128)}),
})));
EXPECT_CALL(slice_proposer_,
ProposeSlices(f32_8_16, EqualsSlicedPrefetchOptions(
options_.sliced_prefetch_options)))
.WillRepeatedly(Return(SliceProposalCollection({
SliceProposal(
{s8_128, std::vector<SliceParam>({{0, 128}}), ShapeSize(s8_128)}),
SliceProposal({s8_128, std::vector<SliceParam>({{128, 256}}),
ShapeSize(s8_128)}),
SliceProposal({s8_128, std::vector<SliceParam>({{256, 384}}),
ShapeSize(s8_128)}),
SliceProposal({s8_128, std::vector<SliceParam>({{384, 512}}),
ShapeSize(s8_128)}),
})));
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
VLOG(1) << "Original module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
std::unique_ptr<PresetAssignments> assignments = AssignMemorySpace(
module.get(), options_,
100, 1);
VLOG(1) << "Post-MSA module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::Add(op::Pad(op::Add(IsAsyncSlicedCopy(
kAlternateMemorySpace, kDefaultMemorySpace,
{{{0, 128}}, {{128, 256}}}, op::Parameter(1),
true),
_),
_),
IsAsyncSlicedCopy(
kAlternateMemorySpace, kDefaultMemorySpace,
{{{0, 128}}, {{128, 256}}, {{256, 384}}, {{384, 512}}},
op::Parameter(2), true)));
TF_EXPECT_OK(CheckSliceChunks(*assignments, root->operand(1),
true));
TF_EXPECT_OK(CheckSliceChunks(*assignments,
root->operand(0)->operand(0)->operand(0),
true));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_assignment/memory_space_assignment.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_assignment/memory_space_assignment_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
90efec1f-b7a5-4704-ad96-5a68bd299536 | cpp | tensorflow/tensorflow | best_fit_repacker | third_party/xla/xla/service/memory_space_assignment/best_fit_repacker.cc | third_party/xla/xla/service/memory_space_assignment/best_fit_repacker_test.cc | #include "xla/service/memory_space_assignment/best_fit_repacker.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/functional/any_invocable.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/service/heap_simulator/allocation_block.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
bool IsSliced(const AllocationBlock* block) {
return block->original_slice_data.has_value();
}
template <typename T>
std::vector<const AllocationBlock*> SortAllocationBlocks(const T& container) {
std::vector<const AllocationBlock*> result;
result.insert(result.end(), container.begin(), container.end());
absl::c_sort(
result, [](const AllocationBlock* lhs, const AllocationBlock* rhs) {
return std::make_tuple(lhs->inclusive_start_time, lhs->end_time,
lhs->initial_offset, lhs->size) <
std::make_tuple(rhs->inclusive_start_time, rhs->end_time,
rhs->initial_offset, rhs->size);
});
return result;
}
const SlicedAllocationData* GetSlicedAllocationDataPointer(
const std::optional<SlicedAllocationData>& sliced_allocation_data) {
if (!sliced_allocation_data.has_value()) {
return nullptr;
}
return &(*sliced_allocation_data);
}
class BestFitRepacker
: public GlobalDecreasingSizeBestFitHeap<AllocationBlock> {
public:
BestFitRepacker(
const memory_space_assignment::MemorySpaceAssignmentBestFitRepacker::
BestFitRepackOptions& options,
SliceTimePermutationIterator::Ty slice_time_permutation_iterator_type,
int64_t max_size, int64_t alignment)
: GlobalDecreasingSizeBestFitHeap<AllocationBlock>(
alignment, kCustom,
(options.buffer_interval_compare ? options.buffer_interval_compare
: DefaultBufferIntervalCompare()),
slice_time_permutation_iterator_type),
validate_(options.validate),
max_size_(max_size) {}
void ImportAllocationBlocks(absl::Span<AllocationBlock*> allocations) {
allocation_blocks_ = allocations;
for (AllocationBlock* allocation_block : allocation_blocks_) {
bool need_allocation = true;
CHECK_NE(allocation_block->next_colocated, nullptr);
for (AllocationBlock* colocated = allocation_block->next_colocated;
colocated != allocation_block;
colocated = colocated->next_colocated) {
auto aliased_it = full_buffer_interval_map_.find(colocated);
if (aliased_it != full_buffer_interval_map_.end() &&
aliased_it->second.need_allocation) {
aliased_it->second.colocations.push_back(allocation_block);
need_allocation = false;
break;
}
}
full_buffer_interval_map_.insert(
std::make_pair(allocation_block,
BufferInterval{allocation_block,
allocation_block->size,
allocation_block->inclusive_start_time,
allocation_block->end_time,
{},
need_allocation}));
}
for (AllocationBlock* allocation_block : allocation_blocks_) {
BufferInterval& full_buffer_interval =
full_buffer_interval_map_[allocation_block];
SlicedBufferInterval& sliced_buffer_interval =
sliced_buffer_interval_map_
.insert(std::make_pair(
allocation_block, SlicedBufferInterval::CreateMutableInterval(
full_buffer_interval)))
.first->second;
if (IsSliced(allocation_block)) {
const SlicedAllocationData& original_slice_data =
allocation_block->original_slice_data.value();
CHECK(!original_slice_data.slices_sorted_by_offset.empty());
sliced_buffer_interval.Slice(original_slice_data.SizesSortedByOffset());
sliced_buffer_interval.UpdateInclusiveSliceStartTimes(
original_slice_data.SortedInclusiveStartTimes());
}
buffer_intervals_[allocation_block] =
sliced_buffer_interval.IntervalForMakeFreeChunks(
sliced_buffer_interval.num_slices() - 1);
}
CHECK_EQ(allocation_blocks_.size(), buffer_intervals_.size());
CHECK_EQ(allocation_blocks_.size(), full_buffer_interval_map_.size());
CHECK_EQ(allocation_blocks_.size(), sliced_buffer_interval_map_.size());
VLOG(2) << [&]() -> std::string {
int sliced_blocks = 0;
int colocation_sets = 0;
int colocation_sets_with_multiple_sliced_blocks = 0;
absl::flat_hash_set<const AllocationBlock*> seen_blocks;
for (const auto& allocation_and_buffer_interval : buffer_intervals_) {
const AllocationBlock* block = allocation_and_buffer_interval.first;
const BufferInterval& min_buffer_interval =
allocation_and_buffer_interval.second;
if (IsSliced(block)) {
++sliced_blocks;
}
if (seen_blocks.contains(block)) {
continue;
}
seen_blocks.insert(block);
++colocation_sets;
int num_sliced_colocations = (IsSliced(block) ? 1 : 0);
for (const AllocationBlock* colocation :
GetTransitiveColocations(min_buffer_interval)) {
seen_blocks.insert(colocation);
if (IsSliced(colocation)) {
++num_sliced_colocations;
}
}
if (num_sliced_colocations > 1) {
++colocation_sets_with_multiple_sliced_blocks;
}
}
return absl::StrCat(
"Imported repacking stats: num_blocks=", allocation_blocks_.size(),
"; num_sliced_blocks=", sliced_blocks,
"; num_colocation_sets=", colocation_sets,
"; num_colocation_sets_with_multiple_sliced_blocks=",
colocation_sets_with_multiple_sliced_blocks);
}();
}
BufferIntervalCompare DefaultBufferIntervalCompare() const {
return LessThanByKey([this](const BufferInterval& x) {
const BufferInterval& full_buffer_interval =
full_buffer_interval_map_.at(x.buffer);
int64_t full_buffer_interval_end = full_buffer_interval.end;
for (auto colocation : GetTransitiveColocations(x)) {
full_buffer_interval_end =
std::max(full_buffer_interval_end,
full_buffer_interval_map_.at(colocation).end);
}
return std::make_tuple(
full_buffer_interval.start - full_buffer_interval_end,
-full_buffer_interval.size, std::cref(*full_buffer_interval.buffer));
});
}
void CommitChunks(const AllocationBlock* allocation_block,
const std::vector<Chunk>& chunks) {
VLOG(3) << "Committing repack chunks for " << allocation_block->ToString();
int64_t new_offset = -1;
std::optional<SlicedAllocationData> repacked_slice_data = std::nullopt;
if (IsSliced(allocation_block)) {
const SlicedAllocationData& original_slice_data =
allocation_block->original_slice_data.value();
CHECK_EQ(chunks.size(),
original_slice_data.slices_sorted_by_offset.size());
repacked_slice_data = SlicedAllocationData();
repacked_slice_data->slices_sorted_by_offset.reserve(chunks.size());
std::vector<int64_t> sorted_inclusive_start_times =
original_slice_data.SortedInclusiveStartTimes();
for (int i = 0; i < chunks.size(); ++i) {
const Chunk& chunk = chunks[i];
int64_t start_time = sorted_inclusive_start_times[i];
result_.heap_size = result_.UpdatedHeapSize(chunk);
VLOG(3) << "Adding sliced chunk " << chunk.ToString() << " at ["
<< start_time << ", " << allocation_block->end_time << "]";
interval_tree_.Add(start_time, allocation_block->end_time, chunk);
new_offset = (new_offset == -1 ? chunk.offset
: std::min(new_offset, chunk.offset));
repacked_slice_data->slices_sorted_by_offset.push_back(
AllocatedSlice({chunk.size, chunk.offset, start_time}));
}
absl::c_sort(repacked_slice_data->slices_sorted_by_offset,
[](const AllocatedSlice& lhs, const AllocatedSlice& rhs) {
return lhs.offset < rhs.offset;
});
} else {
CHECK_EQ(chunks.size(), 1);
new_offset = chunks.front().offset;
result_.heap_size = result_.UpdatedHeapSize(chunks.front());
VLOG(3) << "Adding unsliced chunk " << chunks.front().ToString()
<< " at [" << allocation_block->inclusive_start_time << ", "
<< allocation_block->end_time << ")";
interval_tree_.Add(allocation_block->inclusive_start_time,
allocation_block->end_time, chunks.front());
}
CHECK_NE(new_offset, -1);
CHECK(!new_offsets_.contains(allocation_block));
new_offsets_[allocation_block] = new_offset;
if (repacked_slice_data.has_value()) {
CHECK(IsSliced(allocation_block));
CHECK(!new_repacked_slicing_.contains(allocation_block));
new_repacked_slicing_[allocation_block] = *repacked_slice_data;
}
}
struct SlicedColocationData {
SlicedBufferInterval* sliced_buffer_interval;
SlicedAllocationFinder sliced_allocation_finder;
std::vector<Chunk> chunks;
};
void FindAndCommitChunks(BufferInterval* min_buffer_interval) {
const AllocationBlock* allocation_block = min_buffer_interval->buffer;
SlicedBufferInterval& sliced_buffer_interval =
sliced_buffer_interval_map_.at(allocation_block);
int64_t max_colocation_size = GetMaxColocationSize(*min_buffer_interval);
absl::flat_hash_map<const AllocationBlock*, SlicedColocationData>
sliced_buffer_map;
for (auto colocation :
SortAllocationBlocks(GetTransitiveColocations(*min_buffer_interval))) {
if (IsSliced(colocation)) {
SlicedBufferInterval& colocation_sliced_buffer_interval =
sliced_buffer_interval_map_.at(colocation);
SlicedAllocationFinder sliced_colocation_finder =
CreateSlicedAllocationFinder(
colocation_sliced_buffer_interval, max_colocation_size,
-1,
SliceTimePermutationIterator::CreateForRepack(
slice_time_permutation_iterator_type(),
GetSlicedAllocationDataPointer(
colocation->original_slice_data)),
&SlicedAllocationFinder::AllOffsetsAllowed);
sliced_buffer_map.insert(std::make_pair(
colocation,
SlicedColocationData{&colocation_sliced_buffer_interval,
std::move(sliced_colocation_finder),
{}}));
}
}
auto is_offset_allowed = [this, &sliced_buffer_map](int64_t offset) {
for (auto& block_and_colocation_data : sliced_buffer_map) {
SlicedColocationData& sliced_colocation_data =
block_and_colocation_data.second;
auto colocation_chunks =
sliced_colocation_data.sliced_allocation_finder.FindForOffset(
offset);
colocation_chunks = PostProcessFindChunkCandidatesResult(
*sliced_colocation_data.sliced_buffer_interval,
std::move(colocation_chunks));
if (colocation_chunks.empty()) {
return false;
}
sliced_colocation_data.chunks = std::move(colocation_chunks);
}
return true;
};
SlicedAllocationFinder finder = CreateSlicedAllocationFinder(
sliced_buffer_interval, max_colocation_size, -1,
SliceTimePermutationIterator::CreateForRepack(
slice_time_permutation_iterator_type(),
GetSlicedAllocationDataPointer(
allocation_block->original_slice_data)),
is_offset_allowed);
std::vector<Chunk> chunks = PostProcessFindChunkCandidatesResult(
sliced_buffer_interval, finder.Find());
int64_t min_offset =
absl::c_min_element(chunks, [](const Chunk& lhs, const Chunk& rhs) {
return lhs.offset < rhs.offset;
})->offset;
CommitChunks(allocation_block, chunks);
for (auto colocation : GetTransitiveColocations(*min_buffer_interval)) {
if (IsSliced(colocation)) {
CommitChunks(colocation, sliced_buffer_map.at(colocation).chunks);
} else {
const BufferInterval& colocation_full_buffer_interval =
full_buffer_interval_map_[colocation];
CommitChunks(colocation,
{Chunk::FromOffsetSize(
min_offset, colocation_full_buffer_interval.size)});
}
}
}
void AddToChunkMap(const AllocationBlock* buffer, Chunk chunk) override {
LOG(FATAL) << "We should never get here.";
}
absl::StatusOr<Result> Finish() override {
std::vector<BufferInterval> sorted_buffer_intervals =
GetSortedBufferIntervals();
for (auto& buffer_interval : sorted_buffer_intervals) {
if (!buffer_interval.need_allocation) {
continue;
}
FindAndCommitChunks(&buffer_interval);
}
Result result;
result.heap_size = result_.heap_size;
result.heap_results.emplace_back(result_);
return result;
}
struct TimedChunk {
std::string id;
const AllocationBlock* block;
int64_t start_inclusive;
int64_t end_inclusive;
Chunk chunk;
bool Overlaps(const TimedChunk& timed_chunk) {
if (timed_chunk.start_inclusive > end_inclusive ||
timed_chunk.end_inclusive < start_inclusive) {
return false;
}
return chunk.OverlapsWith(timed_chunk.chunk);
}
};
void DebuggingValidate() {
std::vector<TimedChunk> timed_chunks;
for (const AllocationBlock* block : allocation_blocks_) {
if (IsSliced(block)) {
for (int i = 0;
i < block->repacked_slice_data->slices_sorted_by_offset.size();
++i) {
const AllocatedSlice& slice =
block->repacked_slice_data->slices_sorted_by_offset[i];
timed_chunks.push_back(
TimedChunk{absl::StrCat(((int64_t)block), "_slice_", i), block,
slice.inclusive_start_time, block->end_time,
Chunk::FromOffsetSize(slice.offset, slice.size)});
}
} else {
timed_chunks.push_back(
TimedChunk{absl::StrCat(((int64_t)block)), block,
block->inclusive_start_time, block->end_time,
Chunk::FromOffsetSize(block->offset, block->size)});
}
}
bool overlap_found = false;
for (int i = 0; i < timed_chunks.size(); ++i) {
for (int j = i + 1; j < timed_chunks.size(); ++j) {
if (timed_chunks[i].Overlaps(timed_chunks[j])) {
overlap_found = true;
LOG(ERROR) << "Allocation block overlap\n"
<< " " << timed_chunks[i].block->ToString()
<< "\n " << timed_chunks[j].block->ToString();
}
}
}
if (overlap_found) {
LOG(FATAL) << "Allocation overlap found";
}
}
bool Repack() {
TF_CHECK_OK(Finish().status());
bool success = result_.heap_size <= max_size_;
if (!success) {
VLOG(1) << "Repacking unsuccessful with heap size " << result_.heap_size;
return false;
}
for (AllocationBlock* block : allocation_blocks_) {
CHECK(new_offsets_.contains(block));
block->offset = new_offsets_[block];
if (!IsSliced(block)) {
continue;
}
CHECK(new_repacked_slicing_.contains(block));
block->repacked_slice_data = std::move(new_repacked_slicing_[block]);
}
if (validate_) {
DebuggingValidate();
}
if (VLOG_IS_ON(2)) {
for (AllocationBlock* block : allocation_blocks_) {
VLOG(2) << "AllocationBlock after repacking: " << block->ToString();
}
}
VLOG(1) << "Repacking successful with heap size " << result_.heap_size;
return true;
}
private:
bool validate_ = false;
int64_t max_size_;
absl::Span<AllocationBlock*> allocation_blocks_;
absl::flat_hash_map<const AllocationBlock*, BufferInterval>
full_buffer_interval_map_;
absl::flat_hash_map<const AllocationBlock*, SlicedBufferInterval>
sliced_buffer_interval_map_;
absl::flat_hash_map<const AllocationBlock*, int64_t> new_offsets_;
absl::flat_hash_map<const AllocationBlock*, SlicedAllocationData>
new_repacked_slicing_;
};
}
namespace memory_space_assignment {
absl::StatusOr<bool> MemorySpaceAssignmentBestFitRepacker::Repack(
absl::Span<AllocationBlock*> allocations) {
BestFitRepacker best_fit_repacker = BestFitRepacker(
options_, slice_time_permutation_iterator_type_, max_size_, alignment_);
best_fit_repacker.ImportAllocationBlocks(allocations);
return best_fit_repacker.Repack();
}
}
} | #include "xla/service/memory_space_assignment/best_fit_repacker.h"
#include <cstdint>
#include "absl/container/flat_hash_map.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/service/heap_simulator/allocation_block.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "tsl/platform/test.h"
namespace xla {
class MemorySpaceAssignmentBestFitRepackerTest : public ::testing::Test {
protected:
MemorySpaceAssignmentBestFitRepackerTest()
: repacker_(100, 1, SliceTimePermutationIterator::Ty::kAll, options_) {}
AllocationBlock* MakeAllocationBlock(int64_t start_time, int64_t end_time,
int64_t size,
int64_t initial_offset = -1) {
allocation_blocks_.push_back(
{start_time, end_time, size, -1, initial_offset,
static_cast<int64_t>(allocation_blocks_.size())});
AllocationBlock* block = &allocation_blocks_.back();
block->next_colocated = block;
return block;
}
std::list<AllocationBlock> allocation_blocks_;
memory_space_assignment::MemorySpaceAssignmentBestFitRepacker::
BestFitRepackOptions options_{true,
nullptr};
memory_space_assignment::MemorySpaceAssignmentBestFitRepacker repacker_;
};
TEST_F(MemorySpaceAssignmentBestFitRepackerTest, Simple) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(10, 20, 10));
allocation_blocks.push_back(MakeAllocationBlock(5, 25, 15));
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, 15);
EXPECT_EQ(allocation_blocks[1]->offset, 0);
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest, Colocation) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(0, 2, 10));
allocation_blocks.push_back(MakeAllocationBlock(10, 20, 10));
allocation_blocks[0]->next_colocated = allocation_blocks[1];
allocation_blocks[1]->next_colocated = allocation_blocks[0];
allocation_blocks.push_back(MakeAllocationBlock(5, 25, 15));
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, 15);
EXPECT_EQ(allocation_blocks[1]->offset, 15);
EXPECT_EQ(allocation_blocks[2]->offset, 0);
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest, TooLarge) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(10, 20, 10));
allocation_blocks.push_back(MakeAllocationBlock(5, 25, 15));
allocation_blocks.push_back(MakeAllocationBlock(15, 20, 10));
allocation_blocks.push_back(MakeAllocationBlock(12, 22, 50));
allocation_blocks.push_back(MakeAllocationBlock(10, 18, 20));
EXPECT_FALSE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, -1);
EXPECT_EQ(allocation_blocks[1]->offset, -1);
EXPECT_EQ(allocation_blocks[2]->offset, -1);
EXPECT_EQ(allocation_blocks[3]->offset, -1);
EXPECT_EQ(allocation_blocks[4]->offset, -1);
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest, ColocationDifferentSizes) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(0, 2, 5));
allocation_blocks.push_back(MakeAllocationBlock(10, 20, 10));
allocation_blocks[0]->next_colocated = allocation_blocks[1];
allocation_blocks[1]->next_colocated = allocation_blocks[0];
allocation_blocks.push_back(MakeAllocationBlock(9, 11, 2));
allocation_blocks.push_back(MakeAllocationBlock(1, 2, 2));
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, 0);
EXPECT_EQ(allocation_blocks[1]->offset, 0);
EXPECT_EQ(allocation_blocks[2]->offset, 10);
EXPECT_EQ(allocation_blocks[3]->offset, 5);
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest, RepackedSlicesFit) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(0, 15, 2));
allocation_blocks.push_back(MakeAllocationBlock(11, 21, 3));
allocation_blocks.push_back(MakeAllocationBlock(16, 25, 4));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{2, -1, 16}, AllocatedSlice{2, -1, 22}}});
allocation_blocks.push_back(MakeAllocationBlock(26, 33, 4));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{2, -1, 26}, AllocatedSlice{2, -1, 30}}});
allocation_blocks.push_back(MakeAllocationBlock(19, 25, 2));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{1, -1, 19}, AllocatedSlice{1, -1, 22}}});
allocation_blocks.push_back(MakeAllocationBlock(26, 29, 2));
absl::flat_hash_map<AllocationBlock*, int> sort_keys;
for (int i = 0; i < allocation_blocks.size(); ++i) {
sort_keys[allocation_blocks[i]] = i;
}
options_.buffer_interval_compare = LessThanByKey(
[sort_keys](const memory_space_assignment::
MemorySpaceAssignmentBestFitRepacker::BufferInterval& x) {
return sort_keys.at(x.buffer);
});
repacker_ = memory_space_assignment::MemorySpaceAssignmentBestFitRepacker(
100, 1, SliceTimePermutationIterator::Ty::kAll, options_);
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, 0);
EXPECT_FALSE(allocation_blocks[0]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[1]->offset, 2);
EXPECT_FALSE(allocation_blocks[1]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[2]->offset, 0);
ASSERT_TRUE(allocation_blocks[2]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[2]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{2, 0, 16}, AllocatedSlice{2, 2, 22}}})));
EXPECT_EQ(allocation_blocks[3]->offset, 0);
ASSERT_TRUE(allocation_blocks[3]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[3]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{2, 0, 26}, AllocatedSlice{2, 2, 30}}})));
EXPECT_EQ(allocation_blocks[4]->offset, 4);
ASSERT_TRUE(allocation_blocks[4]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[4]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{1, 4, 22}, AllocatedSlice{1, 5, 19}}})));
EXPECT_EQ(allocation_blocks[5]->offset, 2);
EXPECT_FALSE(allocation_blocks[5]->repacked_slice_data.has_value());
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest,
SliceTimePermutationsMatchOriginalSizeTimeMapping) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(0, 10, 2, 0));
allocation_blocks.push_back(MakeAllocationBlock(5, 15, 3, 2));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{2, 2, 5}, AllocatedSlice{1, 4, 11}}});
allocation_blocks.push_back(MakeAllocationBlock(5, 15, 2, 6));
absl::flat_hash_map<AllocationBlock*, int> sort_keys;
for (int i = 0; i < allocation_blocks.size(); ++i) {
sort_keys[allocation_blocks[i]] = i;
}
options_.buffer_interval_compare = LessThanByKey(
[sort_keys](const memory_space_assignment::
MemorySpaceAssignmentBestFitRepacker::BufferInterval& x) {
return sort_keys.at(x.buffer);
});
repacker_ = memory_space_assignment::MemorySpaceAssignmentBestFitRepacker(
100, 1, SliceTimePermutationIterator::Ty::kAll, options_);
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
ASSERT_TRUE(allocation_blocks[1]->repacked_slice_data.has_value());
ASSERT_EQ(
allocation_blocks[1]->repacked_slice_data->slices_sorted_by_offset.size(),
2);
const AllocatedSlice& slice_with_smaller_offset =
allocation_blocks[1]->repacked_slice_data->slices_sorted_by_offset[0];
const AllocatedSlice& slice_with_larger_offset =
allocation_blocks[1]->repacked_slice_data->slices_sorted_by_offset[1];
ASSERT_GT(slice_with_smaller_offset.size, slice_with_larger_offset.size);
const AllocatedSlice& larger_slice = slice_with_smaller_offset;
const AllocatedSlice& smaller_slice = slice_with_larger_offset;
ASSERT_LT(larger_slice.inclusive_start_time,
smaller_slice.inclusive_start_time);
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest,
SliceTimePermutationsMatchOriginalSizeTimeMapping2) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(0, 10, 2, 0));
allocation_blocks.push_back(MakeAllocationBlock(11, 20, 2, 4));
allocation_blocks.push_back(MakeAllocationBlock(5, 15, 3, 1));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{1, 1, 5}, AllocatedSlice{2, 2, 11}}});
absl::flat_hash_map<AllocationBlock*, int> sort_keys;
for (int i = 0; i < allocation_blocks.size(); ++i) {
sort_keys[allocation_blocks[i]] = i;
}
options_.buffer_interval_compare = LessThanByKey(
[sort_keys](const memory_space_assignment::
MemorySpaceAssignmentBestFitRepacker::BufferInterval& x) {
return sort_keys.at(x.buffer);
});
repacker_ = memory_space_assignment::MemorySpaceAssignmentBestFitRepacker(
100, 1, SliceTimePermutationIterator::Ty::kAll, options_);
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, 0);
EXPECT_FALSE(allocation_blocks[0]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[1]->offset, 0);
EXPECT_FALSE(allocation_blocks[1]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[2]->offset, 2);
ASSERT_TRUE(allocation_blocks[2]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[2]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{1, 2, 5}, AllocatedSlice{2, 3, 11}}})));
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest, SlicedColocationsFit) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(0, 12, 2));
allocation_blocks.push_back(MakeAllocationBlock(0, 8, 2));
allocation_blocks.push_back(MakeAllocationBlock(5, 11, 2));
allocation_blocks.push_back(MakeAllocationBlock(15, 20, 5));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{2, -1, 15}, AllocatedSlice{3, -1, 18}}});
allocation_blocks.push_back(MakeAllocationBlock(9, 14, 4));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{2, -1, 9}, AllocatedSlice{2, -1, 12}}});
allocation_blocks.back()->next_colocated = allocation_blocks[3];
allocation_blocks[3]->next_colocated = allocation_blocks.back();
allocation_blocks.push_back(MakeAllocationBlock(15, 17, 5));
absl::flat_hash_map<AllocationBlock*, int> sort_keys;
for (int i = 0; i < allocation_blocks.size(); ++i) {
sort_keys[allocation_blocks[i]] = i;
}
options_.buffer_interval_compare = LessThanByKey(
[sort_keys](const memory_space_assignment::
MemorySpaceAssignmentBestFitRepacker::BufferInterval& x) {
return sort_keys.at(x.buffer);
});
repacker_ = memory_space_assignment::MemorySpaceAssignmentBestFitRepacker(
100, 1, SliceTimePermutationIterator::Ty::kAll, options_);
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, 0);
EXPECT_FALSE(allocation_blocks[0]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[1]->offset, 2);
EXPECT_FALSE(allocation_blocks[1]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[2]->offset, 4);
ASSERT_FALSE(allocation_blocks[2]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[3]->offset, 2);
ASSERT_TRUE(allocation_blocks[3]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[3]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{2, 2, 15}, AllocatedSlice{3, 4, 18}}})));
EXPECT_EQ(allocation_blocks[4]->offset, 2);
ASSERT_TRUE(allocation_blocks[4]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[4]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{2, 2, 9}, AllocatedSlice{2, 4, 12}}})));
EXPECT_EQ(allocation_blocks[5]->offset, 4);
EXPECT_FALSE(allocation_blocks[5]->repacked_slice_data.has_value());
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest,
SlicedColocationsPermutationsMatchOriginalSizeTimeMapping) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(1, 5, 2));
allocation_blocks.push_back(MakeAllocationBlock(11, 15, 2));
allocation_blocks.push_back(MakeAllocationBlock(1, 10, 5));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{2, 2, 6}, AllocatedSlice{3, 4, 1}}});
allocation_blocks.push_back(MakeAllocationBlock(15, 20, 5));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{2, 2, 11}, AllocatedSlice{3, 4, 16}}});
allocation_blocks.back()->next_colocated = allocation_blocks[2];
allocation_blocks[2]->next_colocated = allocation_blocks.back();
absl::flat_hash_map<AllocationBlock*, int> sort_keys;
for (int i = 0; i < allocation_blocks.size(); ++i) {
sort_keys[allocation_blocks[i]] = i;
}
options_.buffer_interval_compare = LessThanByKey(
[sort_keys](const memory_space_assignment::
MemorySpaceAssignmentBestFitRepacker::BufferInterval& x) {
return sort_keys.at(x.buffer);
});
repacker_ = memory_space_assignment::MemorySpaceAssignmentBestFitRepacker(
100, 1, SliceTimePermutationIterator::Ty::kAll, options_);
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, 0);
EXPECT_FALSE(allocation_blocks[0]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[1]->offset, 0);
EXPECT_FALSE(allocation_blocks[1]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[2]->offset, 2);
ASSERT_TRUE(allocation_blocks[2]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[3]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{2, 2, 11}, AllocatedSlice{3, 4, 16}}})));
EXPECT_EQ(allocation_blocks[3]->offset, 2);
ASSERT_TRUE(allocation_blocks[3]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[3]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{2, 2, 11}, AllocatedSlice{3, 4, 16}}})));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_assignment/best_fit_repacker.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_assignment/best_fit_repacker_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c9ef4d9a-f25e-4c7a-9aae-2c4c8594cea4 | cpp | tensorflow/tensorflow | allocation | tensorflow/compiler/mlir/lite/allocation.cc | tensorflow/lite/allocation_test.cc | #include "tensorflow/compiler/mlir/lite/allocation.h"
#include <stddef.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <memory>
#include "tensorflow/compiler/mlir/lite/core/api/error_reporter.h"
namespace tflite {
#ifndef TFLITE_MCU
FileCopyAllocation::FileCopyAllocation(const char* filename,
ErrorReporter* error_reporter)
: Allocation(error_reporter, Allocation::Type::kFileCopy) {
std::unique_ptr<FILE, decltype(&fclose)> file(fopen(filename, "rb"), fclose);
if (!file) {
error_reporter_->Report("Could not open '%s'.", filename);
return;
}
struct stat sb;
#ifdef _WIN32
#define FILENO(_x) _fileno(_x)
#else
#define FILENO(_x) fileno(_x)
#endif
if (fstat(FILENO(file.get()), &sb) != 0) {
error_reporter_->Report("Failed to get file size of '%s'.", filename);
return;
}
#undef FILENO
buffer_size_bytes_ = sb.st_size;
std::unique_ptr<char[]> buffer(new char[buffer_size_bytes_]);
if (!buffer) {
error_reporter_->Report("Malloc of buffer to hold copy of '%s' failed.",
filename);
return;
}
size_t bytes_read =
fread(buffer.get(), sizeof(char), buffer_size_bytes_, file.get());
if (bytes_read != buffer_size_bytes_) {
error_reporter_->Report("Read of '%s' failed (too few bytes read).",
filename);
return;
}
copied_buffer_.reset(const_cast<char const*>(buffer.release()));
}
FileCopyAllocation::~FileCopyAllocation() {}
const void* FileCopyAllocation::base() const { return copied_buffer_.get(); }
size_t FileCopyAllocation::bytes() const { return buffer_size_bytes_; }
bool FileCopyAllocation::valid() const { return copied_buffer_ != nullptr; }
#endif
MemoryAllocation::MemoryAllocation(const void* ptr, size_t num_bytes,
ErrorReporter* error_reporter)
: Allocation(error_reporter, Allocation::Type::kMemory) {
#ifdef __arm__
if ((reinterpret_cast<uintptr_t>(ptr) & 0x3) != 0) {
TF_LITE_REPORT_ERROR(error_reporter,
"The supplied buffer is not 4-bytes aligned");
buffer_ = nullptr;
buffer_size_bytes_ = 0;
return;
}
#endif
#if defined(__x86_64__) && defined(UNDEFINED_BEHAVIOR_SANITIZER)
if ((reinterpret_cast<uintptr_t>(ptr) & 0x3) != 0) {
aligned_ptr_ = ::aligned_alloc(4, num_bytes);
if (aligned_ptr_ == nullptr) {
TF_LITE_REPORT_ERROR(error_reporter, "Failed to allocate aligned buffer");
buffer_ = nullptr;
buffer_size_bytes_ = 0;
return;
}
memcpy(aligned_ptr_, ptr, num_bytes);
buffer_ = aligned_ptr_;
} else {
buffer_ = ptr;
}
#else
buffer_ = ptr;
#endif
buffer_size_bytes_ = num_bytes;
}
MemoryAllocation::~MemoryAllocation() {
#if defined(__x86_64__) && defined(UNDEFINED_BEHAVIOR_SANITIZER)
if (aligned_ptr_) {
free(aligned_ptr_);
}
#endif
}
const void* MemoryAllocation::base() const { return buffer_; }
size_t MemoryAllocation::bytes() const { return buffer_size_bytes_; }
bool MemoryAllocation::valid() const { return buffer_ != nullptr; }
} | #include "tensorflow/lite/allocation.h"
#if defined(__linux__)
#include <fcntl.h>
#endif
#include <sys/stat.h>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/testing/util.h"
namespace tflite {
TEST(MMAPAllocation, TestInvalidFile) {
if (!MMAPAllocation::IsSupported()) {
return;
}
TestErrorReporter error_reporter;
MMAPAllocation allocation("/tmp/tflite_model_1234", &error_reporter);
EXPECT_FALSE(allocation.valid());
}
TEST(MMAPAllocation, TestValidFile) {
if (!MMAPAllocation::IsSupported()) {
return;
}
TestErrorReporter error_reporter;
MMAPAllocation allocation(
"tensorflow/lite/testdata/empty_model.bin", &error_reporter);
ASSERT_TRUE(allocation.valid());
EXPECT_GT(allocation.fd(), 0);
EXPECT_GT(allocation.bytes(), 0);
EXPECT_NE(allocation.base(), nullptr);
}
#if defined(__linux__)
TEST(MMAPAllocation, TestInvalidFileDescriptor) {
if (!MMAPAllocation::IsSupported()) {
return;
}
TestErrorReporter error_reporter;
MMAPAllocation allocation(-1, &error_reporter);
EXPECT_FALSE(allocation.valid());
}
TEST(MMAPAllocation, TestInvalidSizeAndOffset) {
if (!MMAPAllocation::IsSupported()) {
return;
}
int fd =
open("tensorflow/lite/testdata/empty_model.bin", O_RDONLY);
ASSERT_GT(fd, 0);
struct stat fd_stat;
ASSERT_EQ(fstat(fd, &fd_stat), 0);
size_t file_size = fd_stat.st_size;
TestErrorReporter error_reporter;
MMAPAllocation allocation_invalid_offset(fd, file_size + 100,
1, &error_reporter);
EXPECT_FALSE(allocation_invalid_offset.valid());
MMAPAllocation allocation_invalid_length(fd, 0, 0,
&error_reporter);
EXPECT_FALSE(allocation_invalid_length.valid());
MMAPAllocation allocation_excessive_length(fd, 0,
file_size + 1,
&error_reporter);
EXPECT_FALSE(allocation_excessive_length.valid());
MMAPAllocation allocation_excessive_length_with_offset(
fd, 10, file_size, &error_reporter);
EXPECT_FALSE(allocation_excessive_length_with_offset.valid());
close(fd);
}
TEST(MMAPAllocation, TestValidFileDescriptor) {
if (!MMAPAllocation::IsSupported()) {
return;
}
int fd =
open("tensorflow/lite/testdata/empty_model.bin", O_RDONLY);
ASSERT_GT(fd, 0);
TestErrorReporter error_reporter;
MMAPAllocation allocation(fd, &error_reporter);
EXPECT_TRUE(allocation.valid());
EXPECT_GT(allocation.fd(), 0);
EXPECT_GT(allocation.bytes(), 0);
EXPECT_NE(allocation.base(), nullptr);
close(fd);
}
TEST(MMAPAllocation, TestValidFileDescriptorWithOffset) {
if (!MMAPAllocation::IsSupported()) {
return;
}
int fd =
open("tensorflow/lite/testdata/empty_model.bin", O_RDONLY);
ASSERT_GT(fd, 0);
struct stat fd_stat;
ASSERT_EQ(fstat(fd, &fd_stat), 0);
size_t file_size = fd_stat.st_size;
TestErrorReporter error_reporter;
MMAPAllocation allocation(fd, 10, file_size - 10,
&error_reporter);
EXPECT_TRUE(allocation.valid());
EXPECT_GT(allocation.fd(), 0);
EXPECT_GT(allocation.bytes(), 0);
EXPECT_NE(allocation.base(), nullptr);
close(fd);
}
#endif
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/allocation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/allocation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5f7e1e07-e422-4212-9406-7b918c7a1893 | cpp | tensorflow/tensorflow | simulator | third_party/xla/xla/service/memory_space_assignment/simulator.cc | third_party/xla/xla/service/memory_space_assignment/simulator_test.cc | #include "xla/service/memory_space_assignment/simulator.h"
#include <algorithm>
#include <cstdint>
#include <list>
#include <memory>
#include <optional>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/layout.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/memory_space_assignment/allocation.h"
#include "xla/shape_util.h"
#include "xla/util.h"
namespace xla {
namespace memory_space_assignment {
void RuntimeSimulator::InitializeAlternateMemoryMap(
const AllocationSequence& allocations) {
outputs_in_alternate_memory_map_.clear();
operands_in_alternate_memory_map_.clear();
for (auto& allocation : allocations) {
if (!allocation->is_copy_allocation()) {
if (allocation->memory_space() == MemorySpace::kAlternate) {
const HloInstruction* defining_instruction =
allocation->defining_position().instruction;
outputs_in_alternate_memory_map_[defining_instruction].push_back(
allocation->defining_position().index);
}
}
for (auto& hlo_use : allocation->uses()) {
const HloInstruction* use_instruction = hlo_use.instruction;
operands_in_alternate_memory_map_[use_instruction].push_back(
std::make_pair(hlo_use.operand_number, hlo_use.operand_index));
}
}
}
float RuntimeSimulator::SimulateElapsedTimeWithoutAsyncCopyLikes(
const HloLiveRange& hlo_live_range, const AllocationSequence& allocations) {
InitializeAlternateMemoryMap(allocations);
const auto& instruction_sequence =
hlo_live_range.flattened_instruction_sequence().instructions();
float total_elapsed = 0.0;
for (const HloInstruction* instruction : instruction_sequence) {
if (instruction->opcode() == HloOpcode::kWhile) {
continue;
}
absl::Span<const ShapeIndex> outputs_in_alternate_memory;
auto output_it = outputs_in_alternate_memory_map_.find(instruction);
if (output_it != outputs_in_alternate_memory_map_.end()) {
outputs_in_alternate_memory = absl::MakeSpan(output_it->second);
}
absl::Span<const std::pair<int64_t, ShapeIndex>>
operands_in_alternate_memory;
auto operand_it = operands_in_alternate_memory_map_.find(instruction);
if (operand_it != operands_in_alternate_memory_map_.end()) {
operands_in_alternate_memory = absl::MakeSpan(operand_it->second);
}
float instruction_elapsed_per_invoke =
cost_analysis_->GetInstructionElapsedInAlternateMemory(
*instruction, operands_in_alternate_memory,
outputs_in_alternate_memory);
float total_trip_count = cost_analysis_->CalculateNestTripCount(
instruction, &cost_analysis_cache_);
total_elapsed += total_trip_count * instruction_elapsed_per_invoke;
}
return total_elapsed;
}
bool IsAsyncCopyLikeStart(const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kCopyStart ||
(instruction->opcode() == HloOpcode::kAsyncStart &&
instruction->async_wrapped_instruction()->opcode() ==
HloOpcode::kSlice);
}
bool IsAsyncCopyLikeDone(const HloInstruction* instruction) {
return (instruction->opcode() == HloOpcode::kCopyDone ||
(instruction->opcode() == HloOpcode::kAsyncDone &&
instruction->async_wrapped_instruction()->opcode() ==
HloOpcode::kSlice));
}
MemoryTransferDirection GetAsyncCopyLikeDirection(
const HloInstruction* async_copy_like_start,
int64_t alternate_memory_space) {
CHECK(IsAsyncCopyLikeStart(async_copy_like_start));
int64_t operand_memory_space =
async_copy_like_start->operand(0)->shape().layout().memory_space();
std::optional<int64_t> output_memory_space;
for (const HloInstruction* user : async_copy_like_start->users()) {
if (user->opcode() == HloOpcode::kCopyDone ||
user->opcode() == HloOpcode::kAsyncDone) {
output_memory_space.emplace(user->shape().layout().memory_space());
break;
}
}
if (!output_memory_space.has_value()) {
return MemoryTransferDirection::kUnsupported;
}
if (operand_memory_space == xla::Layout::kDefaultMemorySpace &&
output_memory_space == alternate_memory_space) {
return MemoryTransferDirection::kDefaultToAlternate;
}
if (operand_memory_space == alternate_memory_space &&
output_memory_space == xla::Layout::kDefaultMemorySpace) {
return MemoryTransferDirection::kAlternateToDefault;
}
return MemoryTransferDirection::kUnsupported;
}
const std::list<OutstandingAsyncCopyLike>&
RuntimeSimulator::GetOutstandingReadDefaultQueue() const {
return outstanding_read_default_queue_;
}
const std::list<OutstandingAsyncCopyLike>&
RuntimeSimulator::GetOutstandingWriteDefaultQueue() const {
return outstanding_write_default_queue_;
}
const HloInstruction* RuntimeSimulator::RemoveBytesFromQueueIfNotEmpty(
std::list<OutstandingAsyncCopyLike>& async_copy_like_queue,
float processed_bytes) {
if (async_copy_like_queue.empty()) return nullptr;
CHECK_GE(async_copy_like_queue.front().remaining_bytes_to_transfer,
processed_bytes);
async_copy_like_queue.front().remaining_bytes_to_transfer -= processed_bytes;
if (async_copy_like_queue.front().remaining_bytes_to_transfer == 0.0) {
const HloInstruction* retired_instruction =
async_copy_like_queue.front().copy_like_start_inst;
async_copy_like_queue.pop_front();
return retired_instruction;
}
return nullptr;
}
float RuntimeSimulator::SimulateAsyncCopyLikeDone(
const HloInstruction* copy_like_done_instruction) {
const HloInstruction* copy_like_start_instruction =
copy_like_done_instruction->operand(0);
MemoryTransferDirection direction = GetAsyncCopyLikeDirection(
copy_like_start_instruction, alternate_memory_space_);
if (direction == MemoryTransferDirection::kUnsupported) {
LOG(WARNING) << "Unsupported memory transfer direction for copy-done: "
<< copy_like_done_instruction->ToString();
return 0.0;
}
std::list<OutstandingAsyncCopyLike>& same_direction_queue =
direction == MemoryTransferDirection::kDefaultToAlternate
? outstanding_read_default_queue_
: outstanding_write_default_queue_;
std::list<OutstandingAsyncCopyLike>& opposite_direction_queue =
direction == MemoryTransferDirection::kDefaultToAlternate
? outstanding_write_default_queue_
: outstanding_read_default_queue_;
if (absl::c_find_if(same_direction_queue,
[&](const OutstandingAsyncCopyLike& async_copy_like) {
return async_copy_like.copy_like_start_inst ==
copy_like_start_instruction;
}) == same_direction_queue.end()) {
return 0.0;
}
float elapsed_time = 0.0;
const HloInstruction* retired_instruction_in_same_direction_queue = nullptr;
do {
float bytes_to_process =
same_direction_queue.front().remaining_bytes_to_transfer;
float available_bandwidth = cost_analysis_->base_costs().BytesPerSecond();
if (!opposite_direction_queue.empty()) {
available_bandwidth *= 0.5;
bytes_to_process = std::min(
bytes_to_process,
opposite_direction_queue.front().remaining_bytes_to_transfer);
}
elapsed_time += bytes_to_process / available_bandwidth;
RemoveBytesFromQueueIfNotEmpty(opposite_direction_queue, bytes_to_process);
retired_instruction_in_same_direction_queue =
RemoveBytesFromQueueIfNotEmpty(same_direction_queue, bytes_to_process);
} while (retired_instruction_in_same_direction_queue !=
copy_like_start_instruction);
return elapsed_time;
};
float RuntimeSimulator::SimulateComputeInstruction(
const HloInstruction* instruction,
absl::Span<const std::pair<int64_t, ShapeIndex>>
operands_in_alternate_memory,
absl::Span<const ShapeIndex> outputs_in_alternate_memory) {
float default_memory_idle_time =
cost_analysis_->GetDefaultMemoryBandwidthIdleTime(
*instruction, operands_in_alternate_memory,
outputs_in_alternate_memory);
ProcessAsyncCopyLikesInIdleTime(default_memory_idle_time);
float inst_elapsed = cost_analysis_->GetInstructionElapsedInAlternateMemory(
*instruction, operands_in_alternate_memory, outputs_in_alternate_memory);
return inst_elapsed;
}
void RuntimeSimulator::ProcessAsyncCopyLikesInIdleTime(float time) {
if (time <= 0.0) {
return;
}
float remaining_simulation_time = time;
while ((!outstanding_read_default_queue_.empty() ||
!outstanding_write_default_queue_.empty()) &&
remaining_simulation_time > 0.0) {
float available_bandwidth = cost_analysis_->base_costs().BytesPerSecond();
if (!outstanding_read_default_queue_.empty() &&
!outstanding_write_default_queue_.empty()) {
available_bandwidth *= 0.5;
}
float bytes_to_process = available_bandwidth * remaining_simulation_time;
if (!outstanding_read_default_queue_.empty()) {
bytes_to_process = std::min(
bytes_to_process,
outstanding_read_default_queue_.front().remaining_bytes_to_transfer);
}
if (!outstanding_write_default_queue_.empty()) {
bytes_to_process = std::min(
bytes_to_process,
outstanding_write_default_queue_.front().remaining_bytes_to_transfer);
}
float real_elapsed_time = bytes_to_process / available_bandwidth;
remaining_simulation_time -= real_elapsed_time;
RemoveBytesFromQueueIfNotEmpty(outstanding_read_default_queue_,
bytes_to_process);
RemoveBytesFromQueueIfNotEmpty(outstanding_write_default_queue_,
bytes_to_process);
}
}
float RuntimeSimulator::SimulateElapsedTime(
const HloModule* hlo_module, const AllocationSequence& allocations) {
InitializeAlternateMemoryMap(allocations);
std::unique_ptr<xla::HloAliasAnalysis> alias_analysis =
HloAliasAnalysis::Run(hlo_module).value();
std::unique_ptr<HloLiveRange> hlo_live_range =
HloLiveRange::Run(hlo_module->schedule(), *alias_analysis,
hlo_module->entry_computation())
.value();
CHECK_GT(cost_analysis_->base_costs().BytesPerSecond(), 0.0);
float total_elapsed = 0.0;
const auto& instruction_sequence =
hlo_live_range->flattened_instruction_sequence().instructions();
for (const HloInstruction* instruction : instruction_sequence) {
float inst_elapsed = 0.0;
if (instruction->opcode() == HloOpcode::kWhile) {
continue;
}
if (instruction->parent()->IsAsyncComputation()) {
continue;
}
if (IsAsyncCopyLikeStart(instruction)) {
MemoryTransferDirection direction =
GetAsyncCopyLikeDirection(instruction, alternate_memory_space_);
const Shape& transfer_shape =
(instruction->opcode() == HloOpcode::kCopyStart)
? instruction->operand(0)->shape()
: ShapeUtil::GetSubshape(instruction->shape(),
{1});
float transfer_bytes = static_cast<float>(
cost_analysis_->base_costs().GetShapeSize(transfer_shape));
if (direction == MemoryTransferDirection::kDefaultToAlternate) {
outstanding_read_default_queue_.push_back(
OutstandingAsyncCopyLike{instruction, transfer_bytes});
} else if (direction == MemoryTransferDirection::kAlternateToDefault) {
outstanding_write_default_queue_.push_back(
OutstandingAsyncCopyLike{instruction, transfer_bytes});
} else {
}
} else if (IsAsyncCopyLikeDone(instruction)) {
inst_elapsed = SimulateAsyncCopyLikeDone(instruction);
} else {
absl::Span<const ShapeIndex> outputs_in_alternate_memory;
auto output_it = outputs_in_alternate_memory_map_.find(instruction);
if (output_it != outputs_in_alternate_memory_map_.end()) {
outputs_in_alternate_memory = absl::MakeSpan(output_it->second);
}
absl::Span<const std::pair<int64_t, ShapeIndex>>
operands_in_alternate_memory;
auto operand_it = operands_in_alternate_memory_map_.find(instruction);
if (operand_it != operands_in_alternate_memory_map_.end())
operands_in_alternate_memory = absl::MakeSpan(operand_it->second);
inst_elapsed =
SimulateComputeInstruction(instruction, operands_in_alternate_memory,
outputs_in_alternate_memory);
}
if (inst_elapsed > 0.0) {
float total_trip_count = cost_analysis_->CalculateNestTripCount(
instruction, &cost_analysis_cache_);
total_elapsed += inst_elapsed * total_trip_count;
}
}
return total_elapsed;
}
}
} | #include "xla/service/memory_space_assignment/simulator.h"
#include <cstdint>
#include <list>
#include <memory>
#include <string_view>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/allocation.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using memory_space_assignment::CostAnalysis;
using memory_space_assignment::CostAnalysisOptions;
using memory_space_assignment::RuntimeSimulator;
using ::testing::ElementsAreArray;
using ::testing::IsEmpty;
constexpr int64_t kPointerSize = 8;
constexpr int64_t kAlternateMemorySpace = 1;
int64_t ShapeSize(const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
}
class MemorySpaceAssignmentSimulatorTest : public HloTestBase {
protected:
absl::Status Initialize(absl::string_view hlo_string) {
TF_ASSIGN_OR_RETURN(module_, ParseAndReturnVerifiedModule(hlo_string));
for (HloInstruction* inst : module_->entry_computation()->instructions()) {
instruction_map_[inst->name()] = inst;
if (inst->shape().has_layout() &&
inst->shape().layout().memory_space() == kAlternateMemorySpace) {
std::unique_ptr<xla::memory_space_assignment::Allocation> allocation =
std::make_unique<memory_space_assignment::PinnedAllocation>(
HloPosition{inst, {}},
memory_space_assignment::MemorySpace::kAlternate,
HeapSimulator::Chunk::FromOffsetSize(-1, -1),
0,
1, false);
for (HloInstruction* user : inst->users()) {
allocation->AddUse(HloUse{user, 0});
}
allocations_.push_back(std::move(allocation));
}
}
HloCostAnalysis::Options tpu_device_options;
tpu_device_options.shape_size = ShapeSize;
tpu_device_options.set_flops_per_second(1);
tpu_device_options.set_bytes_per_second(1);
hlo_cost_analysis_ = std::make_unique<HloCostAnalysis>(tpu_device_options);
TF_RETURN_IF_ERROR(
module_->entry_computation()->Accept(hlo_cost_analysis_.get()));
hlo_cost_analysis_costs_ =
std::make_unique<memory_space_assignment::HloCostAnalysisCosts>(
*hlo_cost_analysis_);
CostAnalysisOptions _options;
_options.alternate_mem_bandwidth_bytes_per_second = 2;
TF_ASSIGN_OR_RETURN(
cost_analysis_,
CostAnalysis::Create(*hlo_cost_analysis_costs_, _options, *module_));
TF_ASSIGN_OR_RETURN(alias_analysis_, HloAliasAnalysis::Run(module_.get()));
TF_ASSIGN_OR_RETURN(hlo_live_range_,
HloLiveRange::Run(module_->schedule(), *alias_analysis_,
module_->entry_computation()));
runtime_simulator_ = std::make_unique<RuntimeSimulator>(
cost_analysis_.get(), kAlternateMemorySpace);
return absl::OkStatus();
}
absl::flat_hash_map<std::string_view, const HloInstruction*> instruction_map_;
std::unique_ptr<HloCostAnalysis> hlo_cost_analysis_;
std::unique_ptr<memory_space_assignment::HloCostAnalysisCosts>
hlo_cost_analysis_costs_;
std::unique_ptr<CostAnalysis> cost_analysis_;
std::unique_ptr<HloAliasAnalysis> alias_analysis_;
std::unique_ptr<HloLiveRange> hlo_live_range_;
memory_space_assignment::AllocationSequence allocations_;
std::unique_ptr<RuntimeSimulator> runtime_simulator_;
std::unique_ptr<HloModule> module_;
};
TEST_F(MemorySpaceAssignmentSimulatorTest, SingleLayerLoop) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
%body {
%constant.1 = s32[] constant(1)
%param = (s32[]) parameter(0)
%count = s32[] get-tuple-element(%param), index=0
%increment = s32[] add(s32[] %count, s32[] %constant.1)
ROOT %loop_result = (s32[]) tuple(%increment)
}
%condition {
%param = (s32[]) parameter(0)
%constant.42 = s32[] constant(42)
%condition_input = s32[] get-tuple-element(%param), index=0
ROOT %greater = pred[] compare(s32[] %constant.42, s32[] %condition_input), direction=GT
}
ENTRY Entry {
%dummy_input = s32[] parameter(0)
%constant.0 = s32[] constant(0)
ROOT %while = (s32[]) while(tuple(%constant.0)), condition=%condition, body=%body
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
EXPECT_EQ(runtime_simulator_->SimulateElapsedTimeWithoutAsyncCopyLikes(
*hlo_live_range_, allocations_),
1226);
EXPECT_EQ(
runtime_simulator_->SimulateElapsedTime(module_.get(), allocations_),
1226);
}
TEST_F(MemorySpaceAssignmentSimulatorTest, NestedLayerLoop) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
%inner.body {
%constant.1 = s32[] constant(1)
%param = (s32[]) parameter(0)
%count = s32[] get-tuple-element(%param), index=0
%increment = s32[] add(s32[] %count, s32[] %constant.1)
ROOT %loop_result = (s32[]) tuple(%increment)
}
%inner.condition {
%param = (s32[]) parameter(0)
%constant.42 = s32[] constant(42)
%condition_input = s32[] get-tuple-element(%param), index=0
ROOT %greater = pred[] compare(s32[] %constant.42, s32[] %condition_input), direction=GT
}
%outer.body {
%constant.0 = s32[] constant(0)
%constant.1 = s32[] constant(1)
%param = (s32[]) parameter(0)
%inner_while = (s32[]) while(tuple(%constant.0)), condition=%inner.condition, body=%inner.body
%count = s32[] get-tuple-element(%param), index=0
%increment = s32[] add(s32[] %count, s32[] %constant.1)
ROOT %loop_result = (s32[]) tuple(%increment)
}
%outer.condition {
%param = (s32[]) parameter(0)
%constant.27 = s32[] constant(27)
%condition_input = s32[] get-tuple-element(%param), index=0
ROOT %greater = pred[] compare(s32[] %constant.27, s32[] %condition_input), direction=GT
}
ENTRY Entry {
%constant.0 = s32[] constant(0)
ROOT %while_outer = (s32[]) while(tuple(%constant.0)), condition=%outer.condition, body=%outer.body
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
EXPECT_EQ(runtime_simulator_->SimulateElapsedTimeWithoutAsyncCopyLikes(
*hlo_live_range_, allocations_),
33893);
EXPECT_EQ(
runtime_simulator_->SimulateElapsedTime(module_.get(), allocations_),
33893);
}
TEST_F(MemorySpaceAssignmentSimulatorTest, SingleAsyncCopyOverhead) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[1,1,1024,2048] parameter(0)
copy-start.1 = (f32[1,1,1024,2048]{0,1,2,3:S(1)}, f32[1,1,1024,2048], u32[]) copy-start(param_0)
ROOT copy-done.1 = f32[1,1,1024,2048]{0,1,2,3:S(1)} copy-done(copy-start.1)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
memory_space_assignment::AllocationSequence allocations;
EXPECT_EQ(runtime_simulator_->SimulateElapsedTimeWithoutAsyncCopyLikes(
*hlo_live_range_, allocations_),
0);
EXPECT_EQ(
runtime_simulator_->SimulateElapsedTime(module_.get(), allocations_),
8388608);
}
TEST_F(MemorySpaceAssignmentSimulatorTest, AsyncCopyWithComputationOverhead) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[8] parameter(0)
param_1 = f32[2] parameter(1)
copy-start.1 = (f32[8]{0:S(1)}, f32[8], u32[]) copy-start(param_0)
neg_compute = f32[2] negate(param_1)
ROOT copy-done.1 = f32[8]{0:S(1)} copy-done(copy-start.1)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
EXPECT_EQ(
runtime_simulator_->SimulateElapsedTime(module_.get(), allocations_), 48);
}
TEST_F(MemorySpaceAssignmentSimulatorTest, SingleAsyncSliceCopyOverhead) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[3072,2048] parameter(0)
slice-start = ((f32[3072,2048]), f32[768,2048]{1,0:S(1)}, s32[]) slice-start(f32[3072,2048] param_0), slice={[1536:2304], [0:2048]}
ROOT slice-done = f32[768,2048]{1,0:T(8,128)S(1)} slice-done(((f32[3072,2048]), f32[768,2048]{1,0:S(1)}, s32[]) slice-start)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
memory_space_assignment::AllocationSequence allocations;
float expected_elapsed_time = 6291456;
EXPECT_EQ(
runtime_simulator_->SimulateElapsedTime(module_.get(), allocations_),
expected_elapsed_time);
}
TEST_F(MemorySpaceAssignmentSimulatorTest,
AsyncCopyAndAsyncSliceAndComputeOverhead) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[2048] parameter(0)
param_1 = f32[64] parameter(1)
param_2 = f32[128] parameter(2)
slice-start = ((f32[2048]), f32[64]{0:S(1)}, s32[]) slice-start(f32[2048] param_0), slice={[0:64]}
copy-start = (f32[64]{0:S(1)}, f32[64], u32[]) copy-start(f32[64] param_1)
slice-done = f32[64]{0:S(1)} slice-done(((f32[2048]), f32[64]{0:S(1)}, s32[]) slice-start)
copy-done = f32[64]{0:S(1)} copy-done(copy-start)
copy-start-overlap = (f32[128]{0:S(1)}, f32[128], u32[]) copy-start(f32[128] param_2)
add = f32[64]{0:S(1)} add(slice-done, copy-done)
ROOT copy-done-overlap = f32[128]{0:S(1)} copy-done(copy-start-overlap)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
EXPECT_EQ(
runtime_simulator_->SimulateElapsedTime(module_.get(), allocations_),
1024);
}
class SimulateAsyncCopyLikeDoneTest
: public MemorySpaceAssignmentSimulatorTest {
protected:
absl::Status Initialize(absl::string_view hlo_string) {
TF_RETURN_IF_ERROR(
MemorySpaceAssignmentSimulatorTest::Initialize(hlo_string));
if (instruction_map_.contains("copy-start.1")) {
outstanding_read_default_queue_.push_back(
memory_space_assignment::OutstandingAsyncCopyLike{
instruction_map_["copy-start.1"], 512});
}
if (instruction_map_.contains("copy-start.2")) {
outstanding_write_default_queue_.push_back(
memory_space_assignment::OutstandingAsyncCopyLike{
instruction_map_["copy-start.2"], 128});
}
runtime_simulator_ = std::make_unique<RuntimeSimulator>(
cost_analysis_.get(), kAlternateMemorySpace,
outstanding_read_default_queue_, outstanding_write_default_queue_);
return absl::OkStatus();
}
std::list<memory_space_assignment::OutstandingAsyncCopyLike>
outstanding_read_default_queue_;
std::list<memory_space_assignment::OutstandingAsyncCopyLike>
outstanding_write_default_queue_;
};
TEST_F(SimulateAsyncCopyLikeDoneTest, AsyncCopyAlreadyCompleted) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[128] parameter(0)
copy-start.1 = (f32[128]{0:S(1)}, f32[128], u32[]) copy-start(param_0)
ROOT copy-done.1 = f32[128]{0:S(1)} copy-done(copy-start.1)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
const HloInstruction* copy_done_inst = instruction_map_["copy-done.1"];
runtime_simulator_->SimulateAsyncCopyLikeDone(copy_done_inst);
EXPECT_THAT(runtime_simulator_->GetOutstandingReadDefaultQueue(), IsEmpty());
EXPECT_THAT(runtime_simulator_->GetOutstandingWriteDefaultQueue(), IsEmpty());
float elapsed_time_for_completed_copy =
runtime_simulator_->SimulateAsyncCopyLikeDone(copy_done_inst);
EXPECT_EQ(elapsed_time_for_completed_copy, 0);
EXPECT_THAT(runtime_simulator_->GetOutstandingReadDefaultQueue(), IsEmpty());
EXPECT_THAT(runtime_simulator_->GetOutstandingWriteDefaultQueue(), IsEmpty());
}
TEST_F(SimulateAsyncCopyLikeDoneTest, AsyncCopyFullBandwidth) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[128] parameter(0)
copy-start.1 = (f32[128]{0:S(1)}, f32[128], u32[]) copy-start(param_0)
ROOT copy-done.1 = f32[128]{0:S(1)} copy-done(copy-start.1)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
const HloInstruction* copy_done_inst = instruction_map_["copy-done.1"];
float copy_done_elapsed_time =
runtime_simulator_->SimulateAsyncCopyLikeDone(copy_done_inst);
EXPECT_EQ(copy_done_elapsed_time, 512);
EXPECT_THAT(runtime_simulator_->GetOutstandingReadDefaultQueue(), IsEmpty());
EXPECT_THAT(runtime_simulator_->GetOutstandingWriteDefaultQueue(), IsEmpty());
}
TEST_F(SimulateAsyncCopyLikeDoneTest, AsyncCopySharedBandwidth) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[128] parameter(0)
param_1 = f32[32]{0:S(1)} parameter(1)
copy-start.1 = (f32[128]{0:S(1)}, f32[128], u32[]) copy-start(param_0)
copy-start.2 = (f32[32], f32[32]{0:S(1)}, u32[]) copy-start(param_1)
copy-done.2 = f32[32] copy-done(copy-start.2)
ROOT copy-done.1 = f32[128]{0:S(1)} copy-done(copy-start.1)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
const HloInstruction* copy_start_1_inst = instruction_map_["copy-start.1"];
const HloInstruction* copy_done_2_inst = instruction_map_["copy-done.2"];
float copy_done_2_elapsed_time =
runtime_simulator_->SimulateAsyncCopyLikeDone(copy_done_2_inst);
EXPECT_EQ(copy_done_2_elapsed_time, 256);
EXPECT_THAT(runtime_simulator_->GetOutstandingWriteDefaultQueue(), IsEmpty());
EXPECT_THAT(
runtime_simulator_->GetOutstandingReadDefaultQueue(),
ElementsAreArray({memory_space_assignment::OutstandingAsyncCopyLike{
copy_start_1_inst, 384}}));
}
TEST_F(SimulateAsyncCopyLikeDoneTest, AsyncCopyTransferPartialProcess) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[128] parameter(0)
param_1 = f32[32]{0:S(1)} parameter(1)
copy-start.1 = (f32[128]{0:S(1)}, f32[128], u32[]) copy-start(param_0)
copy-start.2 = (f32[32], f32[32]{0:S(1)}, u32[]) copy-start(param_1)
copy-done.2 = f32[32] copy-done(copy-start.2)
ROOT copy-done.1 = f32[128]{0:S(1)} copy-done(copy-start.1)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
const HloInstruction* copy_start_1_inst = instruction_map_["copy-start.1"];
const HloInstruction* copy_done_1_inst = instruction_map_["copy-done.1"];
const HloInstruction* copy_done_2_inst = instruction_map_["copy-done.2"];
float copy_done_2_elapsed_time =
runtime_simulator_->SimulateAsyncCopyLikeDone(copy_done_2_inst);
EXPECT_EQ(copy_done_2_elapsed_time, 256);
EXPECT_THAT(
runtime_simulator_->GetOutstandingReadDefaultQueue(),
ElementsAreArray({memory_space_assignment::OutstandingAsyncCopyLike{
copy_start_1_inst, 384}}));
EXPECT_THAT(runtime_simulator_->GetOutstandingWriteDefaultQueue(), IsEmpty());
float copy_done_1_elapsed_time =
runtime_simulator_->SimulateAsyncCopyLikeDone(copy_done_1_inst);
EXPECT_EQ(copy_done_1_elapsed_time, 384);
EXPECT_THAT(runtime_simulator_->GetOutstandingReadDefaultQueue(), IsEmpty());
EXPECT_THAT(runtime_simulator_->GetOutstandingWriteDefaultQueue(), IsEmpty());
}
TEST_F(SimulateAsyncCopyLikeDoneTest,
SimulateComputeInstructionWithSingleAsyncCopy) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[128] parameter(0)
param_1 = f32[32] parameter(1)
copy-start.1 = (f32[128]{0:S(1)}, f32[128], u32[]) copy-start(param_0)
neg = f32[32] negate(param_1)
ROOT copy-done.1 = f32[128]{0:S(1)} copy-done(copy-start.1)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
const HloInstruction* copy_start_1_inst = instruction_map_["copy-start.1"];
const HloInstruction* neg_inst = instruction_map_["neg"];
float compute_elapsed_time = runtime_simulator_->SimulateComputeInstruction(
neg_inst, {},
{});
EXPECT_EQ(compute_elapsed_time, 256);
EXPECT_THAT(
runtime_simulator_->GetOutstandingReadDefaultQueue(),
ElementsAreArray({memory_space_assignment::OutstandingAsyncCopyLike{
copy_start_1_inst, 512}}));
EXPECT_THAT(runtime_simulator_->GetOutstandingWriteDefaultQueue(), IsEmpty());
}
TEST_F(SimulateAsyncCopyLikeDoneTest,
SimulateComputeInstructionWithSharedBandwidth) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[128] parameter(0)
param_1 = f32[32]{0:S(1)} parameter(1)
copy-start.1 = (f32[128]{0:S(1)}, f32[128], u32[]) copy-start(param_0)
copy-start.2 = (f32[32], f32[32]{0:S(1)}, u32[]) copy-start(param_1)
neg = f32[32] negate(param_1)
copy-done.2 = f32[32] copy-done(copy-start.2)
ROOT copy-done.1 = f32[128]{0:S(1)} copy-done(copy-start.1)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
const HloInstruction* copy_start_1_inst = instruction_map_["copy-start.1"];
const HloInstruction* copy_start_2_inst = instruction_map_["copy-start.2"];
float compute_elapsed_time = runtime_simulator_->SimulateComputeInstruction(
instruction_map_["neg"], {{0, {}}},
{});
EXPECT_EQ(compute_elapsed_time, 192);
EXPECT_THAT(
runtime_simulator_->GetOutstandingReadDefaultQueue(),
ElementsAreArray({memory_space_assignment::OutstandingAsyncCopyLike{
copy_start_1_inst, 480}}));
EXPECT_THAT(
runtime_simulator_->GetOutstandingWriteDefaultQueue(),
ElementsAreArray({memory_space_assignment::OutstandingAsyncCopyLike{
copy_start_2_inst, 96}}));
}
TEST_F(SimulateAsyncCopyLikeDoneTest,
SimulateComputeInstructionWithFullBandwidth) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[128] parameter(0)
param_1 = f32[32]{0:S(1)} parameter(1)
copy-start.1 = (f32[128]{0:S(1)}, f32[128], u32[]) copy-start(param_0)
neg = f32[32] negate(param_1)
ROOT copy-done.1 = f32[128]{0:S(1)} copy-done(copy-start.1)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
const HloInstruction* copy_start_1_inst = instruction_map_["copy-start.1"];
float compute_elapsed_time = runtime_simulator_->SimulateComputeInstruction(
instruction_map_["neg"], {{0, {}}},
{});
EXPECT_EQ(compute_elapsed_time, 192);
EXPECT_THAT(
runtime_simulator_->GetOutstandingReadDefaultQueue(),
ElementsAreArray({memory_space_assignment::OutstandingAsyncCopyLike{
copy_start_1_inst, 448}}));
EXPECT_THAT(runtime_simulator_->GetOutstandingWriteDefaultQueue(), IsEmpty());
}
TEST_F(SimulateAsyncCopyLikeDoneTest,
SimulateComputeInstructionWithEmptyQueues) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
ENTRY Entry {
param_0 = f32[128] parameter(0)
ROOT neg = f32[128] negate(param_0)
}
)";
TF_ASSERT_OK(Initialize(hlo_string));
float compute_elapsed_time = runtime_simulator_->SimulateComputeInstruction(
instruction_map_["neg"], {},
{});
EXPECT_EQ(compute_elapsed_time, 1024);
EXPECT_THAT(runtime_simulator_->GetOutstandingReadDefaultQueue(), IsEmpty());
EXPECT_THAT(runtime_simulator_->GetOutstandingWriteDefaultQueue(), IsEmpty());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_assignment/simulator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_assignment/simulator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f9f10a84-8072-4a0b-badf-15411ee7d548 | cpp | tensorflow/tensorflow | cost_analysis | tensorflow/compiler/mlir/tfrt/analysis/cost_analysis.cc | third_party/xla/xla/service/memory_space_assignment/cost_analysis_test.cc | #include "tensorflow/compiler/mlir/tfrt/analysis/cost_analysis.h"
#include <algorithm>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Operation.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tfrt/constants.h"
#include "tensorflow/core/tfrt/fallback/cost_recorder.h"
#include "tfrt/compiler/opdefs/tfrt_op_interfaces.h"
namespace tensorflow {
namespace tfrt_compiler {
namespace {
constexpr int64_t kDefaultCheapCost = 1;
int64_t GetRankedTensorSize(mlir::TensorType type) {
auto shape = type.getShape();
int64_t size = 1;
for (int64_t dim : shape) {
size *= std::max(kDefaultCheapCost, dim);
}
return size;
}
int64_t InferTensorSize(const CostContext& context, mlir::TensorType type) {
if (type.hasRank()) return GetRankedTensorSize(type);
return context.default_unranked_tensor_size;
}
int64_t InferLookupTableFindV2Cost(const CostContext& context,
mlir::TF::LookupTableFindV2Op op) {
constexpr int64_t kLookupTableFindCostScale = 8;
constexpr int64_t kLookupTableFindStringKeyCostScale = 16;
auto value_type = mlir::cast<mlir::TensorType>(op.getValues().getType());
auto key_type = mlir::cast<mlir::TensorType>(op.getKeys().getType());
int64_t output_size = InferTensorSize(context, value_type);
int64_t cost = kLookupTableFindCostScale * output_size;
if (mlir::isa<mlir::TF::StringType>(key_type.getElementType()))
cost *= kLookupTableFindStringKeyCostScale;
return cost;
}
int64_t InferGatherV2Cost(const CostContext& context, mlir::TF::GatherV2Op op) {
return InferTensorSize(
context, mlir::cast<mlir::TensorType>(op.getOutput().getType()));
}
template <typename OpType>
int64_t InferSparseSegmentOpCost(const CostContext& context, OpType op) {
return InferTensorSize(
context, mlir::cast<mlir::TensorType>(op.getOutput().getType()));
}
using CostFunctionRegistry = absl::flat_hash_map<std::string, CostFunction>;
void RegisterCostFunction(CostFunctionRegistry& registry,
absl::string_view op_name,
CostFunction cost_function) {
auto r = registry.try_emplace(op_name, std::move(cost_function));
assert(r.second);
(void)r;
}
template <typename OpType, typename F>
void RegisterCostFunction(CostFunctionRegistry& registry, F f) {
RegisterCostFunction(
registry, OpType::getOperationName().str(),
[f = std::move(f)](const CostContext& context, mlir::Operation* op) {
return f(context, llvm::cast<OpType>(op));
});
}
CostFunctionRegistry& GetCostFunctionRegistry() {
static auto* const registry = []() {
auto* registry = new CostFunctionRegistry;
RegisterCostFunction<mlir::TF::GatherV2Op>(*registry, InferGatherV2Cost);
RegisterCostFunction<mlir::TF::SparseSegmentSumOp>(
*registry, InferSparseSegmentOpCost<mlir::TF::SparseSegmentSumOp>);
RegisterCostFunction<mlir::TF::SparseSegmentMeanOp>(
*registry, InferSparseSegmentOpCost<mlir::TF::SparseSegmentMeanOp>);
RegisterCostFunction<mlir::TF::SparseSegmentSqrtNOp>(
*registry, InferSparseSegmentOpCost<mlir::TF::SparseSegmentSqrtNOp>);
RegisterCostFunction<mlir::TF::LookupTableFindV2Op>(
*registry, InferLookupTableFindV2Cost);
return registry;
}();
return *registry;
}
}
void RegisterCostFunction(absl::string_view op_name,
CostFunction cost_function) {
RegisterCostFunction(GetCostFunctionRegistry(), op_name,
std::move(cost_function));
}
bool HasCostFunctionRegistered(absl::string_view op_name) {
return GetCostFunctionRegistry().contains(op_name);
}
int64_t CostAnalysis::GetCost(mlir::Operation* op) const {
assert(cost_map_.count(op) > 0);
return cost_map_.lookup(op);
}
void CostAnalysis::AnalyzeArguments(mlir::func::FuncOp func_op) {
for (auto arg : func_op.getArguments()) {
if (!mlir::isa<mlir::TensorType>(arg.getType())) continue;
auto type = mlir::cast<mlir::TensorType>(arg.getType());
if (type.hasRank()) {
max_arg_size_ = std::max(max_arg_size_, GetRankedTensorSize(type));
}
}
}
void CostAnalysis::AnalyzeBlock(mlir::Block* block) {
for (auto& op : *block) {
EvaluateCost(&op);
}
}
void CostAnalysis::EvaluateCost(mlir::Operation* op) {
if (auto cost_function =
mlir::dyn_cast<tfrt::compiler::CostFunctionInterface>(op)) {
cost_map_[op] = cost_function.cost();
return;
}
if (!llvm::isa<mlir::TF::TensorFlowDialect>(op->getDialect())) {
cost_map_[op] = max_arg_size_;
return;
}
const auto& registry = GetCostFunctionRegistry();
absl::string_view op_name = op->getName().getStringRef();
auto iter = registry.find(op_name);
if (iter != registry.end()) {
CostContext context;
context.default_unranked_tensor_size = max_arg_size_;
cost_map_[op] = iter->second(context, op);
return;
}
if (cost_recorder_ != nullptr) {
const auto op_key_attr =
op->getAttrOfType<mlir::IntegerAttr>(kOpKeyAttrName);
if (op_key_attr) {
cost_map_[op] = cost_recorder_->GetCost(op_key_attr.getInt());
return;
}
}
if (llvm::isa<mlir::TF::ShapeOp, mlir::TF::StridedSliceOp,
mlir::TF::ReshapeOp, mlir::TF::ExpandDimsOp>(op)) {
cost_map_[op] = kDefaultCheapCost;
return;
}
int64_t cost = kDefaultCheapCost;
for (auto operand : op->getOperands()) {
auto type = mlir::cast<mlir::TensorType>(operand.getType());
if (type.hasRank()) {
cost += GetRankedTensorSize(type);
} else {
cost += max_arg_size_;
}
}
cost_map_[op] = cost;
}
}
} | #include "xla/service/memory_space_assignment/cost_analysis.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using memory_space_assignment::CostAnalysis;
using memory_space_assignment::CostAnalysisOptions;
constexpr int64_t kPointerSize = 8;
int64_t ShapeSize(const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
}
class MemorySpaceAssignmentCostAnalysisTest : public HloTestBase {
protected:
absl::Status Initialize(const HloModule* module,
float pipeline_overhead_window_size_mib = 0.0) {
HloCostAnalysis::Options options;
options_.alternate_mem_bandwidth_bytes_per_second = 128;
options_.async_copy_bandwidth_bytes_per_second = 32;
options_.pipeline_overhead_window_size_mib =
pipeline_overhead_window_size_mib;
options.shape_size = ShapeSize;
options.set_flops_per_second(8);
options.set_bytes_per_second(32);
options.set_transcendentals_per_second(16);
options.set_flops_min_latency_second(1);
hlo_cost_analysis_ = std::make_unique<HloCostAnalysis>(options);
TF_RETURN_IF_ERROR(
module->entry_computation()->Accept(hlo_cost_analysis_.get()));
hlo_cost_analysis_costs_ =
std::make_unique<memory_space_assignment::HloCostAnalysisCosts>(
*hlo_cost_analysis_);
TF_ASSIGN_OR_RETURN(
cost_analysis_,
CostAnalysis::Create(*hlo_cost_analysis_costs_, options_, *module));
return absl::OkStatus();
}
CostAnalysisOptions options_;
std::unique_ptr<HloCostAnalysis> hlo_cost_analysis_;
std::unique_ptr<memory_space_assignment::HloCostAnalysisCosts>
hlo_cost_analysis_costs_;
std::unique_ptr<CostAnalysis> cost_analysis_;
};
TEST_F(MemorySpaceAssignmentCostAnalysisTest, NoPipelineOverhead) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY Entry {
param0 = f32[2,4] parameter(0)
param1 = f32[2,4] parameter(1)
ROOT add = f32[2,4] add(param0, param1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(Initialize(module.get()));
const HloInstruction* add = module->entry_computation()->root_instruction();
const float expected_compute_elapsed = std::max(
8.0f / 8.0f,
hlo_cost_analysis_->min_latency_seconds(HloCostAnalysis::kFlopsKey));
LOG(INFO) << "Expected compute elapsed = " << expected_compute_elapsed;
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToCompute(*add),
expected_compute_elapsed);
float expected_memory_elapsed =
(3 * 4 * 8) / 32.0;
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToMemory(*add),
expected_memory_elapsed);
EXPECT_EQ(cost_analysis_->GetInstructionElapsed(*add),
expected_memory_elapsed);
EXPECT_EQ(
cost_analysis_->GetInstructionElapsedInAlternateMemory(*add, {}, {}),
expected_memory_elapsed);
expected_memory_elapsed =
((2 * 4 * 8) / 32.0) +
((4 * 8) / 128.0);
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToMemory(*add, {{0, {}}}),
expected_memory_elapsed);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedInAlternateMemory(
*add, {{0, {}}}, {}),
expected_memory_elapsed);
expected_memory_elapsed =
((4 * 8) / 32.0) +
((2 * 4 * 8) / 128.0);
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
EXPECT_EQ(
cost_analysis_->GetInstructionElapsedDueToMemory(*add, {{0, {}}}, {{}}),
expected_memory_elapsed);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedInAlternateMemory(
*add, {{0, {}}}, {{}}),
expected_memory_elapsed);
expected_memory_elapsed =
(3 * 4 * 8) / 128.0;
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToMemory(
*add, {{0, {}}, {1, {}}}, {{}}),
expected_memory_elapsed);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedInAlternateMemory(
*add, {{0, {}}, {1, {}}}, {{}}),
expected_compute_elapsed);
}
TEST_F(MemorySpaceAssignmentCostAnalysisTest, PipelineOverhead) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY Entry {
param0 = f32[2,4] parameter(0)
param1 = f32[2,4] parameter(1)
ROOT add = f32[2,4] add(param0, param1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(
Initialize(module.get(),
(64.0 / 1024 / 1024)));
const HloInstruction* add = module->entry_computation()->root_instruction();
const float expected_compute_elapsed = std::max(
8.0f / 8.0f,
hlo_cost_analysis_->min_latency_seconds(HloCostAnalysis::kFlopsKey));
LOG(INFO) << "Expected compute elapsed = " << expected_compute_elapsed;
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToCompute(*add),
expected_compute_elapsed);
float expected_memory_elapsed =
(3 * 4 * 8) / 32.0;
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToMemory(*add),
expected_memory_elapsed);
float expected_overhead = expected_compute_elapsed * 2 / 3;
LOG(INFO) << "Expected overhead = " << expected_overhead;
EXPECT_EQ(cost_analysis_->GetDefaultMemoryAccessOverhead(*add),
expected_overhead);
EXPECT_EQ(cost_analysis_->GetInstructionElapsed(*add),
expected_memory_elapsed + expected_overhead);
EXPECT_EQ(
cost_analysis_->GetInstructionElapsedInAlternateMemory(*add, {}, {}),
expected_memory_elapsed + expected_overhead);
expected_memory_elapsed =
((2 * 4 * 8) / 32.0) +
((4 * 8) / 128.0);
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
EXPECT_EQ(cost_analysis_->GetDefaultMemoryAccessOverhead(*add, {{0, {}}}),
expected_overhead);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToMemory(*add, {{0, {}}}),
expected_memory_elapsed);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedInAlternateMemory(
*add, {{0, {}}}, {}),
expected_memory_elapsed + expected_overhead);
expected_memory_elapsed =
((4 * 8) / 32.0) +
((2 * 4 * 8) / 128.0);
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
expected_overhead = expected_compute_elapsed / 3;
LOG(INFO) << "Expected overhead = " << expected_overhead;
EXPECT_EQ(
cost_analysis_->GetDefaultMemoryAccessOverhead(*add, {{0, {}}}, {{}}),
expected_overhead);
EXPECT_EQ(
cost_analysis_->GetInstructionElapsedDueToMemory(*add, {{0, {}}}, {{}}),
expected_memory_elapsed);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedInAlternateMemory(
*add, {{0, {}}}, {{}}),
expected_memory_elapsed + expected_overhead);
expected_memory_elapsed =
(3 * 4 * 8) / 128.0;
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
expected_overhead = 0;
LOG(INFO) << "Expected overhead = " << expected_overhead;
EXPECT_EQ(cost_analysis_->GetDefaultMemoryAccessOverhead(
*add, {{0, {}}, {1, {}}}, {{}}),
expected_overhead);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToMemory(
*add, {{0, {}}, {1, {}}}, {{}}),
expected_memory_elapsed);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedInAlternateMemory(
*add, {{0, {}}, {1, {}}}, {{}}),
expected_compute_elapsed);
}
TEST_F(MemorySpaceAssignmentCostAnalysisTest, LatencyBoundCompute) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY Entry {
param0 = f32[2,2] parameter(0)
param1 = f32[2,2] parameter(1)
ROOT add = f32[2,2] add(param0, param1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(Initialize(module.get()));
const HloInstruction* add = module->entry_computation()->root_instruction();
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToCompute(*add), 1.0f);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/analysis/cost_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_assignment/cost_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
69fb686e-9d31-45fd-992b-c343fb30b34f | cpp | tensorflow/tensorflow | prefetch_interval_picker | third_party/xla/xla/service/memory_space_assignment/prefetch_interval_picker.cc | third_party/xla/xla/service/memory_space_assignment/prefetch_interval_picker_test.cc | #include "xla/service/memory_space_assignment/prefetch_interval_picker.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.pb.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace memory_space_assignment {
namespace {
const float kEvictionRetryMultiplier = 2.0;
const int kNumExploredDecreasingIntervals = 100;
}
bool InstructionCountPrefetchIntervalPicker::CanAllocateInAlternateMemoryNoCopy(
const Shape& shape, int64_t start_time, int64_t end_time) const {
return end_time - start_time <= max_overlap_count_;
}
int64_t InstructionCountPrefetchIntervalPicker::PreferredEvictionEndTime(
const Shape& shape, int64_t start_time, int64_t latest_end_time) const {
return std::min(start_time + min_overlap_count_, latest_end_time);
}
int64_t InstructionCountPrefetchIntervalPicker::LatestPrefetchStartTime(
const Shape& shape, int64_t start_time, int64_t end_time,
const HloUse* use) const {
return end_time - min_overlap_count_;
}
int64_t InstructionCountPrefetchIntervalPicker::PreferredPrefetchStartTime(
const Shape& shape, int64_t earliest_prefetch_start_time,
int64_t latest_prefetch_start_time, int64_t prefetch_end_time) const {
return std::max(earliest_prefetch_start_time,
prefetch_end_time - max_overlap_count_);
}
int64_t InstructionCountPrefetchIntervalPicker::EstimatedPrefetchEndTime(
const Shape& shape, int64_t start_time, int64_t end_time) const {
return end_time;
}
float InstructionCountPrefetchIntervalPicker::GetLogicalIntervalElapsed(
int64_t start_time, int64_t end_time) const {
return static_cast<float>(end_time - start_time - 1);
}
void InstructionCountPrefetchIntervalPicker::Begin(
const HloUse& use, int64_t start_time, int64_t end_time,
std::optional<int64_t> preferred_time) {
end_time_ = end_time;
const Shape& shape = ShapeUtil::GetSubshape(
use.instruction->operand(use.operand_number)->shape(), use.operand_index);
if (preferred_time) {
current_prefetch_time_ = *preferred_time;
} else {
current_prefetch_time_ =
PreferredPrefetchStartTime(shape, start_time, end_time, end_time);
}
}
int64_t InstructionCountPrefetchIntervalPicker::Next() {
CHECK(!Done()) << "Prefetch interval picker's Next() is called even though "
"Done() is false";
return current_prefetch_time_++;
}
bool InstructionCountPrefetchIntervalPicker::Done() const {
return end_time_ - current_prefetch_time_ <= min_overlap_count_;
}
int64_t InstructionCountPrefetchIntervalPicker::latest_time() const {
return end_time_ - min_overlap_count_ - 1;
}
std::string InstructionCountPrefetchIntervalPicker::ToDebugString() const {
return absl::StrCat("Overlapped HLOs = ", end_time_ - current_prefetch_time_);
}
std::string InstructionCountPrefetchIntervalPicker::ToNoCopyDebugString(
const Shape& shape, int64_t start_time, int64_t end_time) const {
return absl::StrCat("Overlapped HLOs = ", end_time - start_time);
}
CostAnalysisPrefetchIntervalPicker::CostAnalysisPrefetchIntervalPicker(
const CostAnalysis& cost_analysis, float min_overlap_to_async_copy_ratio,
float preferred_overlap_to_async_copy_ratio,
float max_overlap_to_mem_size_async_copy_ratio, int64_t mem_size_bytes,
const Shape* shape_override)
: while_nest_level_(
cost_analysis.hlo_live_range().instruction_schedule().size() + 1, 0),
computation_nest_level_(
cost_analysis.hlo_live_range().instruction_schedule().size() + 1, 0),
cost_analysis_(cost_analysis),
min_overlap_to_async_copy_ratio_(min_overlap_to_async_copy_ratio),
preferred_overlap_to_async_copy_ratio_(
preferred_overlap_to_async_copy_ratio),
max_async_copy_elapsed_(
cost_analysis_.GetAsyncCopyElapsed(
ShapeUtil::MakeShape(S32, {mem_size_bytes / 4})) *
max_overlap_to_mem_size_async_copy_ratio),
shape_override_(shape_override ? std::optional(*shape_override)
: std::nullopt) {
instruction_schedule_ =
&cost_analysis_.hlo_live_range().instruction_schedule();
std::vector<float> instructions_elapsed_time(
instruction_schedule_->size() + 1, 0.0);
int max_while_nest_level = 0;
for (const auto& instruction_and_logical_time : *instruction_schedule_) {
const HloInstruction* instruction = instruction_and_logical_time.first;
int64_t logical_time = instruction_and_logical_time.second;
if (logical_time >= instructions_elapsed_time.size()) {
instructions_elapsed_time.resize(logical_time + 1, 0.0);
while_nest_level_.resize(logical_time + 1, 0);
}
int while_nest_level = cost_analysis_.CalculateComputationNestLevel(
instruction_and_logical_time.first, true);
while_nest_level_[logical_time] = while_nest_level;
max_while_nest_level = std::max(max_while_nest_level, while_nest_level);
int computation_nest_level = cost_analysis_.CalculateComputationNestLevel(
instruction_and_logical_time.first, false);
computation_nest_level_[logical_time] = computation_nest_level;
if (instruction->opcode() == HloOpcode::kWhile ||
instruction->opcode() == HloOpcode::kConditional) {
continue;
}
float elapsed_time = cost_analysis_.GetInstructionElapsed(
*instruction_and_logical_time.first);
instructions_elapsed_time[logical_time] =
elapsed_time * cost_analysis_.GetWhileNestMultiplier(while_nest_level);
}
float cumsum = 0.0;
elapsed_time_cumsum_.reserve(instructions_elapsed_time.size());
for (float elapsed_time : instructions_elapsed_time) {
cumsum += elapsed_time;
elapsed_time_cumsum_.push_back(cumsum);
}
const int64_t size = instructions_elapsed_time.size();
CHECK_EQ(size, while_nest_level_.size());
std::vector<int> most_recent_by_level(while_nest_level_.size(), -1);
int prev_nest_level = 0;
int change_idx = -1;
while_nest_level_change_.reserve(size);
for (int i = 0; i < size; ++i) {
int nest_level = while_nest_level_[i];
if (nest_level != prev_nest_level) {
prev_nest_level = nest_level;
change_idx = -1;
for (int smaller_level = 0; smaller_level < nest_level; smaller_level++) {
change_idx = std::max(change_idx, most_recent_by_level[smaller_level]);
}
}
most_recent_by_level[nest_level] = i;
while_nest_level_change_.push_back(change_idx);
}
for (int i = 0; i <= max_while_nest_level; ++i) {
while_execution_counts_.push_back(cost_analysis_.GetWhileNestMultiplier(i));
}
}
float CostAnalysisPrefetchIntervalPicker::GetMaxElapsedInAlternateMemory(
float async_copy_elapsed) const {
return max_async_copy_elapsed_;
}
bool CostAnalysisPrefetchIntervalPicker::CanAllocateInAlternateMemoryNoCopy(
const Shape& shape, int64_t start_time, int64_t end_time) const {
float async_copy_elapsed = cost_analysis_.GetAsyncCopyElapsed(
shape_override_ ? *shape_override_ : shape);
float logical_interval_elapsed =
GetLogicalIntervalElapsed(start_time, end_time);
return GetMaxElapsedInAlternateMemory(async_copy_elapsed) >
logical_interval_elapsed;
}
int64_t CostAnalysisPrefetchIntervalPicker::PreferredEvictionEndTime(
const Shape& shape, int64_t start_time, int64_t latest_end_time) const {
float async_copy_elapsed = cost_analysis_.GetAsyncCopyElapsed(
shape_override_ ? *shape_override_ : shape);
int64_t end_time;
for (end_time = start_time + 1; end_time <= latest_end_time; ++end_time) {
float logical_interval_elapsed =
GetLogicalIntervalElapsed(start_time, end_time);
if (logical_interval_elapsed >=
(1 + kEvictionRetryMultiplier * retry_number_) *
preferred_overlap_to_async_copy_ratio_ * async_copy_elapsed) {
break;
}
}
return end_time;
}
int64_t CostAnalysisPrefetchIntervalPicker::LatestPrefetchStartTime(
const Shape& shape, int64_t start_time, int64_t end_time,
const HloUse* use) const {
float async_copy_elapsed = cost_analysis_.GetAsyncCopyElapsed(
shape_override_ ? *shape_override_ : shape);
float inst_elapsed_reduction = 0.0f;
if (use) {
float elapsed_time =
cost_analysis_.GetInstructionElapsed(*use->instruction);
float elapsed_time_in_alternate_mem =
cost_analysis_.GetInstructionElapsedInAlternateMemory(
*use->instruction,
{std::make_pair(use->operand_number, use->operand_index)},
{});
inst_elapsed_reduction = elapsed_time - elapsed_time_in_alternate_mem;
}
int end_nest_level = computation_nest_level_[end_time];
float min_interval = min_overlap_to_async_copy_ratio_ * async_copy_elapsed;
int latest_prefetch_time;
for (latest_prefetch_time = end_time - 1;
latest_prefetch_time >= start_time &&
(computation_nest_level_[latest_prefetch_time] != end_nest_level ||
min_interval >
GetLogicalIntervalElapsed(latest_prefetch_time, end_time) +
inst_elapsed_reduction);
--latest_prefetch_time) {
}
return latest_prefetch_time;
}
int64_t CostAnalysisPrefetchIntervalPicker::PreferredPrefetchStartTime(
const Shape& shape, int64_t earliest_prefetch_start_time,
int64_t latest_prefetch_start_time, int64_t prefetch_end_time) const {
float async_copy_elapsed = cost_analysis_.GetAsyncCopyElapsed(
shape_override_ ? *shape_override_ : shape);
int64_t preferred_prefetch_start_time = earliest_prefetch_start_time;
float preferred_interval =
preferred_overlap_to_async_copy_ratio_ * async_copy_elapsed;
float best_interval = GetLogicalIntervalElapsed(earliest_prefetch_start_time,
prefetch_end_time);
int end_nest_level = computation_nest_level_[prefetch_end_time];
for (int64_t prefetch_start_time = earliest_prefetch_start_time + 1;
prefetch_start_time <= latest_prefetch_start_time;
++prefetch_start_time) {
float interval =
GetLogicalIntervalElapsed(prefetch_start_time, prefetch_end_time);
if (computation_nest_level_[prefetch_start_time] == end_nest_level &&
std::abs(preferred_interval - interval) <
std::abs(preferred_interval - best_interval)) {
best_interval = interval;
preferred_prefetch_start_time = prefetch_start_time;
}
}
return preferred_prefetch_start_time;
}
int64_t CostAnalysisPrefetchIntervalPicker::LatestPrefetchEndTime(
int64_t original_prefetch_end_time,
int64_t proposed_prefetch_end_time) const {
int64_t original_nest_level =
computation_nest_level_[original_prefetch_end_time];
int64_t new_prefetch_end_time;
for (new_prefetch_end_time = proposed_prefetch_end_time;
computation_nest_level_[new_prefetch_end_time] != original_nest_level;
--new_prefetch_end_time) {
}
return new_prefetch_end_time;
}
int64_t CostAnalysisPrefetchIntervalPicker::EstimatedPrefetchEndTime(
const Shape& shape, int64_t start_time, int64_t end_time) const {
float async_copy_elapsed = cost_analysis_.GetAsyncCopyElapsed(
shape_override_ ? *shape_override_ : shape);
int64_t estimated_end_time;
for (estimated_end_time = start_time + 1; estimated_end_time < end_time;
++estimated_end_time) {
float interval = GetLogicalIntervalElapsed(start_time, estimated_end_time);
if (interval >= async_copy_elapsed) {
break;
}
}
return estimated_end_time;
}
void CostAnalysisPrefetchIntervalPicker::Begin(
const HloUse& use, int64_t start_time, int64_t end_time,
std::optional<int64_t> preferred_time) {
const Shape& shape = ShapeUtil::GetSubshape(
use.instruction->operand(use.operand_number)->shape(), use.operand_index);
async_copy_elapsed_ = cost_analysis_.GetAsyncCopyElapsed(
shape_override_ ? *shape_override_ : shape);
float elapsed_time = cost_analysis_.GetInstructionElapsed(*use.instruction);
float elapsed_time_in_alternate_mem =
cost_analysis_.GetInstructionElapsedInAlternateMemory(
*use.instruction,
{std::make_pair(use.operand_number, use.operand_index)},
{});
inst_elapsed_reduction_ = elapsed_time - elapsed_time_in_alternate_mem;
end_logical_time_ = end_time;
int end_nest_level = computation_nest_level_[end_logical_time_];
float min_interval = min_overlap_to_async_copy_ratio_ * async_copy_elapsed_;
latest_prefetch_time_ =
LatestPrefetchStartTime(shape, start_time, end_time, &use);
float max_interval = GetMaxElapsedInAlternateMemory(async_copy_elapsed_);
for (earliest_prefetch_time_ = start_time;
earliest_prefetch_time_ < latest_prefetch_time_ &&
(computation_nest_level_[earliest_prefetch_time_] != end_nest_level ||
max_interval < GetLogicalIntervalElapsed(earliest_prefetch_time_,
end_logical_time_));
++earliest_prefetch_time_) {
}
if (earliest_prefetch_time_ > latest_prefetch_time_) {
increasing_prefetch_time_iterator_ = earliest_prefetch_time_;
decreasing_prefetch_time_iterator_ = latest_prefetch_time_;
CHECK(Done());
return;
}
int64_t starting_prefetch_time;
if (preferred_time && *preferred_time <= latest_prefetch_time_) {
starting_prefetch_time = *preferred_time;
} else {
starting_prefetch_time =
PreferredPrefetchStartTime(shape, earliest_prefetch_time_,
latest_prefetch_time_, end_logical_time_);
}
float preferred_interval =
preferred_overlap_to_async_copy_ratio_ * async_copy_elapsed_;
VLOG(4) << "Interval min/max/preferred = " << min_interval << " "
<< max_interval << " " << preferred_interval
<< " prefetch time earliest/latest/starting = "
<< earliest_prefetch_time_ << " " << latest_prefetch_time_ << " "
<< starting_prefetch_time;
increasing_prefetch_time_iterator_ = starting_prefetch_time;
decreasing_prefetch_time_iterator_ = starting_prefetch_time;
using_increasing_prefetch_time_iterator_ = true;
Next();
}
int64_t CostAnalysisPrefetchIntervalPicker::Next() {
CHECK(!Done()) << "Prefetch interval picker's Next() is called even though "
"Done() is false";
if (using_increasing_prefetch_time_iterator_) {
int64_t prefetch_time = increasing_prefetch_time_iterator_++;
while (increasing_prefetch_time_iterator_ <= latest_prefetch_time_ &&
computation_nest_level_[increasing_prefetch_time_iterator_] !=
computation_nest_level_[end_logical_time_]) {
++increasing_prefetch_time_iterator_;
}
if (decreasing_prefetch_time_iterator_ >= earliest_prefetch_time_) {
using_increasing_prefetch_time_iterator_ = false;
}
return prefetch_time;
} else {
int64_t prefetch_time = decreasing_prefetch_time_iterator_--;
float next_target_interval_elapsed = 0;
if (increasing_prefetch_time_iterator_ > latest_prefetch_time_) {
next_target_interval_elapsed =
GetLogicalIntervalElapsed(prefetch_time, end_logical_time_) +
(GetLogicalIntervalElapsed(earliest_prefetch_time_,
end_logical_time_) /
kNumExploredDecreasingIntervals);
VLOG(3) << "Next target interval elapsed: "
<< next_target_interval_elapsed;
}
while (decreasing_prefetch_time_iterator_ >= earliest_prefetch_time_ &&
(computation_nest_level_[decreasing_prefetch_time_iterator_] !=
computation_nest_level_[end_logical_time_] ||
GetLogicalIntervalElapsed(decreasing_prefetch_time_iterator_,
end_logical_time_) <
next_target_interval_elapsed)) {
--decreasing_prefetch_time_iterator_;
}
if (increasing_prefetch_time_iterator_ <= latest_prefetch_time_) {
using_increasing_prefetch_time_iterator_ = true;
}
return prefetch_time;
}
}
bool CostAnalysisPrefetchIntervalPicker::Done() const {
return increasing_prefetch_time_iterator_ > latest_prefetch_time_ &&
decreasing_prefetch_time_iterator_ < earliest_prefetch_time_;
}
int64_t CostAnalysisPrefetchIntervalPicker::latest_time() const {
return latest_prefetch_time_;
}
void CostAnalysisPrefetchIntervalPicker::SetRetryNumber(int retry_number) {
retry_number_ = retry_number;
}
int CostAnalysisPrefetchIntervalPicker::GetMinWhileNestLevel(
int64_t start_time, int64_t end_time) const {
int min_nest_level =
std::min(while_nest_level_[start_time], while_nest_level_[end_time]);
int change_idx = while_nest_level_change_[end_time];
while (change_idx >= start_time) {
min_nest_level = std::min(min_nest_level, while_nest_level_[change_idx]);
change_idx = while_nest_level_change_[change_idx];
}
return min_nest_level;
}
float CostAnalysisPrefetchIntervalPicker::GetLogicalIntervalElapsed(
int64_t start_time, int64_t end_time) const {
CHECK_LE(start_time, end_time);
if (start_time == end_time) {
return 0.0;
}
if (start_time < 0) {
start_time = 0;
}
int interval_while_nest_level = GetMinWhileNestLevel(start_time, end_time);
return (elapsed_time_cumsum_[end_time - 1] -
elapsed_time_cumsum_[start_time]) /
while_execution_counts_[interval_while_nest_level];
}
std::string CostAnalysisPrefetchIntervalPicker::ToDebugString() const {
int current_logical_prefetch_time = using_increasing_prefetch_time_iterator_
? increasing_prefetch_time_iterator_
: decreasing_prefetch_time_iterator_;
float logical_interval_elapsed = GetLogicalIntervalElapsed(
current_logical_prefetch_time, end_logical_time_);
return absl::StrCat(
"Async copy elapsed (s) = ", async_copy_elapsed_,
", inst elapsed reduction (s) = ", inst_elapsed_reduction_,
", logical interval elapsed (s) = ", logical_interval_elapsed,
", interval = (", current_logical_prefetch_time, ", ", end_logical_time_,
")");
}
std::string CostAnalysisPrefetchIntervalPicker::ToNoCopyDebugString(
const Shape& shape, int64_t start_time, int64_t end_time) const {
float async_copy_elapsed = cost_analysis_.GetAsyncCopyElapsed(
shape_override_ ? *shape_override_ : shape);
float logical_interval_elapsed =
GetLogicalIntervalElapsed(start_time, end_time);
return absl::StrCat(
"Async copy elapsed (s) = ", async_copy_elapsed,
", logical interval elapsed (s) = ", logical_interval_elapsed);
}
std::optional<float>
CostAnalysisPrefetchIntervalPicker::BufferIntervalAlternateMemoryBenefit(
const GlobalDecreasingSizeBestFitHeap<HloValue>::BufferInterval& interval)
const {
return cost_analysis_.GetMemoryBoundedness(interval);
}
}
} | #include "xla/service/memory_space_assignment/prefetch_interval_picker.h"
#include <cstdint>
#include <optional>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include "xla/service/memory_space_assignment/testing_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace memory_space_assignment {
namespace {
constexpr int64_t kPointerSize = 8;
int64_t ShapeSize(const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
}
using CostAnalysisPrefetchIntervalPickerTest = HloTestBase;
TEST_F(CostAnalysisPrefetchIntervalPickerTest, PrefetchIntervalOrder) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
ENTRY Entry {
param0 = f32[2,4] parameter(0)
a = f32[2,4] negate(param0)
b = f32[2,4] negate(a)
c = f32[2,4] negate(b)
d = f32[2,4] negate(c)
e = f32[2,4] negate(d)
f = f32[2,4] negate(e)
g = f32[2,4] negate(f)
h = f32[2,4] negate(g)
i = f32[2,4] negate(h)
j = f32[2,4] negate(i)
k = f32[2,4] negate(j)
l = f32[2,4] negate(k)
m = f32[2,4] negate(l)
n = f32[2,4] negate(m)
o = f32[2,4] negate(n)
p = f32[2,4] negate(o)
q = f32[2,4] negate(p)
r = f32[2,4] negate(q)
s = f32[2,4] negate(r)
t = f32[2,4] negate(s)
u = f32[2,4] negate(t)
ROOT v = f32[2,4] add(u, param0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloCostAnalysis hlo_cost_analysis(ShapeSize);
CostAnalysisOptions options;
HloCostAnalysisCosts hlo_cost_analysis_costs(hlo_cost_analysis);
TF_ASSERT_OK_AND_ASSIGN(
auto cost_analysis,
FakeCostAnalysis::Create(hlo_cost_analysis_costs, *module, options));
CostAnalysisPrefetchIntervalPicker interval_picker(
*cost_analysis,
1.0,
2.0,
4.0,
32);
HloInstruction* root = module->entry_computation()->root_instruction();
const HloUse use{root, 1, {}};
interval_picker.Begin(use, 0, 22, std::nullopt);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 15);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 16);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 14);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 17);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 13);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 18);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 12);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 11);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 10);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 9);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_TRUE(interval_picker.Done());
interval_picker.Begin(use, 19, 22, std::nullopt);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_TRUE(interval_picker.Done());
}
TEST_F(CostAnalysisPrefetchIntervalPickerTest, PrefetchIntervalOrderWhile) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
while_condition {
param1 = (f32[2,4]) parameter(0)
ROOT cond = pred[] constant(true)
}
while_body {
param2 = (f32[2,4]) parameter(0)
gte2 = f32[2,4] get-tuple-element(param2), index=0
add = f32[2,4] add(gte2, gte2)
ROOT tuple2 = (f32[2,4]) tuple(add)
}
ENTRY Entry {
param0 = f32[2,4] parameter(0)
a = f32[2,4] negate(param0)
b = f32[2,4] negate(a)
c = f32[2,4] negate(b)
d = f32[2,4] negate(c)
e = f32[2,4] negate(d)
f = f32[2,4] negate(e)
g = f32[2,4] negate(f)
h = f32[2,4] negate(g)
i = f32[2,4] negate(h)
j = f32[2,4] negate(i)
k = f32[2,4] negate(j)
l = f32[2,4] negate(k)
m = f32[2,4] negate(l)
n = f32[2,4] negate(m)
o = f32[2,4] negate(n)
p = f32[2,4] negate(o)
q = f32[2,4] negate(p)
tuple = (f32[2,4]) tuple(q)
while = (f32[2,4]) while(tuple), condition=while_condition, body=while_body
gte1 = f32[2,4] get-tuple-element(while), index=0
r = f32[2,4] negate(gte1)
s = f32[2,4] negate(r)
t = f32[2,4] negate(s)
u = f32[2,4] negate(t)
ROOT v = f32[2,4] add(u, param0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloCostAnalysis hlo_cost_analysis(ShapeSize);
CostAnalysisOptions options;
HloCostAnalysisCosts hlo_cost_analysis_costs(hlo_cost_analysis);
TF_ASSERT_OK_AND_ASSIGN(
auto cost_analysis,
FakeCostAnalysis::Create(hlo_cost_analysis_costs, *module, options));
CostAnalysisPrefetchIntervalPicker interval_picker(
*cost_analysis,
1.0,
2.0,
12.0,
32);
EXPECT_EQ(cost_analysis->GetWhileNestMultiplier(1), 5.0);
HloInstruction* root = module->entry_computation()->root_instruction();
const HloUse use{root, 1, {}};
interval_picker.Begin(use, 0, 31, std::nullopt);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 25);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 26);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 18);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 27);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 17);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_TRUE(interval_picker.Done());
}
TEST_F(CostAnalysisPrefetchIntervalPickerTest, NestedWhile) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
while_condition.2 {
param1 = (f32[2,4]) parameter(0)
ROOT cond = pred[] constant(true)
}
while_body.2 {
param2 = (f32[2,4]) parameter(0)
gte2 = f32[2,4] get-tuple-element(param2), index=0
add = f32[2,4] add(gte2, gte2)
ROOT tuple2 = (f32[2,4]) tuple(add)
}
while_condition.1 {
param3 = (f32[2,4]) parameter(0)
ROOT cond = pred[] constant(true)
}
while_body.1 {
param4 = (f32[2,4]) parameter(0)
gte1 = f32[2,4] get-tuple-element(param4), index=0
add1 = f32[2,4] add(gte1, gte1)
tuple1 = (f32[2,4]) tuple(add1)
while = (f32[2,4]) while(tuple1), condition=while_condition.2, body=while_body.2
gte2 = f32[2,4] get-tuple-element(while), index=0
add2 = f32[2,4] add(gte2, gte2)
ROOT tuple2 = (f32[2,4]) tuple(add2)
}
ENTRY Entry {
param0 = f32[2,4] parameter(0)
a = f32[2,4] negate(param0)
b = f32[2,4] negate(a)
c = f32[2,4] negate(b)
tuple = (f32[2,4]) tuple(c)
while = (f32[2,4]) while(tuple), condition=while_condition.1, body=while_body.1
gte1 = f32[2,4] get-tuple-element(while), index=0
ROOT root = f32[2,4] add(gte1, param0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloCostAnalysis hlo_cost_analysis(ShapeSize);
CostAnalysisOptions options;
HloCostAnalysisCosts hlo_cost_analysis_costs(hlo_cost_analysis);
TF_ASSERT_OK_AND_ASSIGN(
auto cost_analysis,
FakeCostAnalysis::Create(hlo_cost_analysis_costs, *module, options));
CostAnalysisPrefetchIntervalPicker interval_picker(
*cost_analysis,
1.0,
2.0,
12.0,
32);
HloInstruction* root = module->entry_computation()->root_instruction();
const HloUse use{root, 1, {}};
const Shape& shape = root->operand(1)->shape();
EXPECT_EQ(interval_picker.LatestPrefetchStartTime(shape, 0,
23, &use),
4);
}
TEST_F(CostAnalysisPrefetchIntervalPickerTest, ConsecutiveConditionals) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
true_computation.0 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg1 = f32[3]{0} negate(gte)
}
false_computation.0 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg2 = f32[3]{0} negate(gte)
}
true_computation.1 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg1 = f32[3]{0} negate(gte)
}
false_computation.1 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg2 = f32[3]{0} negate(gte)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = f32[3]{0} parameter(1)
p2 = pred[] parameter(2)
tuple0 = (f32[3]{0}) tuple(p0)
tuple1 = (f32[3]{0}) tuple(p1)
conditional0 = f32[3]{0} conditional(p2, tuple0, tuple0), true_computation=true_computation.0, false_computation=false_computation.0
conditional1 = f32[3]{0} conditional(p2, tuple1, tuple1), true_computation=true_computation.1, false_computation=false_computation.1
ROOT tuple2 = (f32[3]{0}, f32[3]{0}) tuple(conditional0, conditional1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloCostAnalysis hlo_cost_analysis(ShapeSize);
CostAnalysisOptions options;
HloCostAnalysisCosts hlo_cost_analysis_costs(hlo_cost_analysis);
TF_ASSERT_OK_AND_ASSIGN(
auto cost_analysis,
FakeCostAnalysis::Create(hlo_cost_analysis_costs, *module, options));
CostAnalysisPrefetchIntervalPicker interval_picker(
*cost_analysis,
1.0,
2.0,
12.0,
32);
LOG(INFO) << module->ToString();
HloInstruction* conditional1 =
module->entry_computation()->GetInstructionWithName("conditional1");
const HloUse use{conditional1, 1, {0}};
const Shape& shape =
module->entry_computation()->parameter_instruction(0)->shape();
EXPECT_LT(interval_picker.LatestPrefetchStartTime(shape, 0,
11, &use),
5);
}
TEST_F(CostAnalysisPrefetchIntervalPickerTest, EarliestLatestWindowTooSmall) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
ENTRY Entry {
param0 = f32[2,4] parameter(0)
negate = f32[2,4] negate(param0)
tanh = f32[2,4] tanh(param0)
ROOT add = f32[2,4] add(tanh, negate)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloCostAnalysis hlo_cost_analysis(ShapeSize);
CostAnalysisOptions options;
HloCostAnalysisCosts hlo_cost_analysis_costs(hlo_cost_analysis);
TF_ASSERT_OK_AND_ASSIGN(
auto cost_analysis,
FakeCostAnalysis::Create(hlo_cost_analysis_costs, *module, options));
cost_analysis->SetOverrideForGetInstructionElapsed(
[](const HloInstruction& hlo) {
if (hlo.opcode() == HloOpcode::kTanh) {
return 20.0;
}
return 1.0;
});
CostAnalysisPrefetchIntervalPicker interval_picker(
*cost_analysis,
1.0,
2.0,
12.0,
32);
HloInstruction* root = module->entry_computation()->root_instruction();
const HloUse use{root, 1, {}};
interval_picker.Begin(use, 1, 3, std::nullopt);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_FALSE(interval_picker.Done());
EXPECT_EQ(interval_picker.Next(), 1);
EXPECT_TRUE(interval_picker.Done());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_assignment/prefetch_interval_picker.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_assignment/prefetch_interval_picker_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0587d70c-ea77-47c1-b1b0-a74f752d5522 | cpp | tensorflow/tensorflow | memory_bound_loop_optimizer | third_party/xla/xla/service/memory_space_assignment/memory_bound_loop_optimizer.cc | third_party/xla/xla/service/memory_space_assignment/memory_bound_loop_optimizer_test.cc | #include "xla/service/memory_space_assignment/memory_bound_loop_optimizer.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/buffer_value.h"
#include "xla/service/heap_simulator/allocation_block.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/allocation.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.pb.h"
#include "xla/service/memory_space_assignment/options.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace memory_space_assignment {
namespace {
std::optional<int64_t> GetInstructionIndex(
const HloInstruction* instruction,
const absl::flat_hash_map<const HloInstruction*, int64_t>&
instructions_to_index) {
auto it = instructions_to_index.find(instruction);
return it == instructions_to_index.end() ? std::nullopt
: std::optional<int64_t>(it->second);
}
}
void LoopOptimizerBestFitHeap::CreateBufferInterval(
const AllocationBlock& allocation_block,
const AllocationBlock* colocated_with) {
buffer_intervals_[&allocation_block] =
BufferInterval({&allocation_block,
allocation_block.size,
allocation_block.inclusive_start_time,
allocation_block.end_time,
{},
colocated_with == nullptr});
if (colocated_with) {
buffer_intervals_[colocated_with].colocations.push_back(&allocation_block);
}
}
std::optional<HeapSimulator::Chunk>
LoopOptimizerBestFitHeap::MaybeFindChunkCandidate(
const AllocationBlock& allocation_block, int64_t preferred_offset) {
Chunk chunk_candidate = FindChunkCandidate(
buffer_intervals_[&allocation_block], preferred_offset);
if (chunk_candidate.chunk_end() <= size_limit_per_heap_) {
return chunk_candidate;
}
return std::nullopt;
}
std::optional<HeapSimulator::Chunk>
LoopOptimizerBestFitHeap::FindAndCommitChunkCandidate(
const AllocationBlock& allocation_block, int64_t preferred_offset) {
std::optional<Chunk> chunk =
MaybeFindChunkCandidate(allocation_block, preferred_offset);
if (chunk.has_value()) {
CommitChunk(buffer_intervals_[&allocation_block], chunk.value());
}
return chunk;
}
void LoopOptimizerBestFitHeap::RemoveChunk(int64_t start_time, int64_t end_time,
Chunk chunk) {
CHECK(interval_tree_.Remove(start_time, end_time, chunk));
}
void LoopOptimizerBestFitHeap::RemoveEvenChunks(
int64_t begin_idx_in_loop, int64_t end_idx_in_loop,
std::optional<HeapSimulator::Chunk>& chunk) {
RemoveChunk(begin_idx_in_loop, end_idx_in_loop, chunk.value());
RemoveChunk(begin_idx_in_loop + 2 * loop_size_,
end_idx_in_loop + 2 * loop_size_, chunk.value());
}
void LoopOptimizerBestFitHeap::RemoveOddChunks(
int64_t begin_idx_in_loop, int64_t end_idx_in_loop,
std::optional<HeapSimulator::Chunk>& chunk) {
RemoveChunk(begin_idx_in_loop + loop_size_, end_idx_in_loop + loop_size_,
chunk.value());
RemoveChunk(begin_idx_in_loop + 3 * loop_size_,
end_idx_in_loop + 3 * loop_size_, chunk.value());
}
void LoopOptimizerBestFitHeap::RemoveEvenOddChunkPair(
int64_t begin_idx_in_loop, int64_t end_idx_in_loop,
EvenOddChunkPair& chunks) {
CheckAllocationIntervalValid(begin_idx_in_loop, end_idx_in_loop);
ShiftAllocationIntervalIfRequired(begin_idx_in_loop, end_idx_in_loop);
auto [even_chunk, odd_chunk] = chunks;
RemoveEvenChunks(begin_idx_in_loop, end_idx_in_loop, even_chunk);
RemoveOddChunks(begin_idx_in_loop, end_idx_in_loop, odd_chunk);
}
const AllocationBlock& LoopOptimizerBestFitHeap::GetAllocationBlock(
int64_t start_time, int64_t end_time, int64_t size) {
allocation_blocks_.push_back(
{start_time, end_time, size, static_cast<int64_t>(-1),
static_cast<int64_t>(-1),
static_cast<int64_t>(allocation_blocks_.size())});
return allocation_blocks_.back();
}
const AllocationBlock& LoopOptimizerBestFitHeap::CreateEvenAllocationBlock(
int64_t begin_idx_in_loop, int64_t end_idx_in_loop, int64_t size) {
const AllocationBlock& first_allocation_block =
GetAllocationBlock(begin_idx_in_loop, end_idx_in_loop, size);
CreateBufferInterval(first_allocation_block);
const AllocationBlock& second_allocation_block =
GetAllocationBlock(begin_idx_in_loop + 2 * loop_size_,
end_idx_in_loop + 2 * loop_size_, size);
CreateBufferInterval(second_allocation_block, &first_allocation_block);
return first_allocation_block;
}
const AllocationBlock& LoopOptimizerBestFitHeap::CreateOddAllocationBlock(
int64_t begin_idx_in_loop, int64_t end_idx_in_loop, int64_t size) {
const AllocationBlock& first_allocation_block = GetAllocationBlock(
begin_idx_in_loop + loop_size_, end_idx_in_loop + loop_size_, size);
CreateBufferInterval(first_allocation_block);
const AllocationBlock& second_allocation_block =
GetAllocationBlock(begin_idx_in_loop + 3 * loop_size_,
end_idx_in_loop + 3 * loop_size_, size);
CreateBufferInterval(second_allocation_block, &first_allocation_block);
return first_allocation_block;
}
void LoopOptimizerBestFitHeap::CheckAllocationIntervalValid(
int64_t begin_idx_in_loop, int64_t end_idx_in_loop) const {
CHECK_LE(begin_idx_in_loop, end_idx_in_loop);
CHECK_LE(-1 * loop_size_, begin_idx_in_loop);
CHECK_LT(begin_idx_in_loop, loop_size_);
CHECK_LE(0, end_idx_in_loop);
CHECK_LT(end_idx_in_loop, 2 * loop_size_);
CHECK_LE(end_idx_in_loop - begin_idx_in_loop + 1, 2 * loop_size_);
}
void LoopOptimizerBestFitHeap::ShiftAllocationIntervalIfRequired(
int64_t& begin_idx_in_loop, int64_t& end_idx_in_loop) const {
if (begin_idx_in_loop < 0) {
begin_idx_in_loop += loop_size_;
end_idx_in_loop += loop_size_;
}
}
EvenOddChunkPair LoopOptimizerBestFitHeap::FindEvenAndOddAllocationBetween(
int64_t begin_idx_in_loop, int64_t end_idx_in_loop, int64_t size,
std::pair<int64_t, int64_t> preferred_offsets) {
CheckAllocationIntervalValid(begin_idx_in_loop, end_idx_in_loop);
ShiftAllocationIntervalIfRequired(begin_idx_in_loop, end_idx_in_loop);
auto [even_offset, odd_offset] = preferred_offsets;
const AllocationBlock& even_allocation =
CreateEvenAllocationBlock(begin_idx_in_loop, end_idx_in_loop, size);
const AllocationBlock& odd_allocation =
CreateOddAllocationBlock(begin_idx_in_loop, end_idx_in_loop, size);
std::optional<HeapSimulator::Chunk> even_chunk =
FindAndCommitChunkCandidate(even_allocation, even_offset);
if (!even_chunk.has_value()) {
return {std::nullopt, std::nullopt};
}
std::optional<HeapSimulator::Chunk> odd_chunk =
MaybeFindChunkCandidate(odd_allocation, odd_offset);
RemoveEvenChunks(begin_idx_in_loop, end_idx_in_loop, even_chunk);
if (odd_chunk.has_value()) {
return {even_chunk, odd_chunk};
}
return {std::nullopt, std::nullopt};
}
EvenOddChunkPair LoopOptimizerBestFitHeap::AllocateEvenAndOddBetween(
int64_t begin_idx_in_loop, int64_t end_idx_in_loop, int64_t size,
std::pair<int64_t, int64_t> preferred_offsets) {
CheckAllocationIntervalValid(begin_idx_in_loop, end_idx_in_loop);
ShiftAllocationIntervalIfRequired(begin_idx_in_loop, end_idx_in_loop);
auto [even_offset, odd_offset] = preferred_offsets;
const AllocationBlock& even_allocation =
CreateEvenAllocationBlock(begin_idx_in_loop, end_idx_in_loop, size);
const AllocationBlock& odd_allocation =
CreateOddAllocationBlock(begin_idx_in_loop, end_idx_in_loop, size);
std::optional<HeapSimulator::Chunk> even_chunk =
FindAndCommitChunkCandidate(even_allocation, even_offset);
if (!even_chunk.has_value()) {
return {std::nullopt, std::nullopt};
}
std::optional<HeapSimulator::Chunk> odd_chunk =
FindAndCommitChunkCandidate(odd_allocation, odd_offset);
if (odd_chunk.has_value()) {
return {even_chunk, odd_chunk};
}
RemoveEvenChunks(begin_idx_in_loop, end_idx_in_loop, even_chunk);
return {std::nullopt, std::nullopt};
}
const AllocationBlock&
LoopOptimizerBestFitHeap::CreateSameEvenAndOddAllocationBlock(
int64_t begin_idx_in_loop, int64_t end_idx_in_loop, int64_t size) {
const AllocationBlock& first_allocation_block =
GetAllocationBlock(begin_idx_in_loop, end_idx_in_loop, size);
CreateBufferInterval(first_allocation_block);
const AllocationBlock& second_allocation_block =
GetAllocationBlock(begin_idx_in_loop + 1 * loop_size_,
end_idx_in_loop + 1 * loop_size_, size);
CreateBufferInterval(second_allocation_block, &first_allocation_block);
const AllocationBlock& third_allocation_block =
GetAllocationBlock(begin_idx_in_loop + 2 * loop_size_,
end_idx_in_loop + 2 * loop_size_, size);
CreateBufferInterval(third_allocation_block, &first_allocation_block);
const AllocationBlock& fourth_allocation_block =
GetAllocationBlock(begin_idx_in_loop + 3 * loop_size_,
end_idx_in_loop + 3 * loop_size_, size);
CreateBufferInterval(fourth_allocation_block, &first_allocation_block);
return first_allocation_block;
}
EvenOddChunkPair LoopOptimizerBestFitHeap::FindSameEvenAndOddAllocationBetween(
int64_t begin_idx_in_loop, int64_t end_idx_in_loop, int64_t size,
int64_t preferred_offset) {
CheckAllocationIntervalValid(begin_idx_in_loop, end_idx_in_loop);
ShiftAllocationIntervalIfRequired(begin_idx_in_loop, end_idx_in_loop);
CHECK_LE(end_idx_in_loop - begin_idx_in_loop + 1, loop_size_);
const AllocationBlock& allocation = CreateSameEvenAndOddAllocationBlock(
begin_idx_in_loop, end_idx_in_loop, size);
std::optional<HeapSimulator::Chunk> chunk =
MaybeFindChunkCandidate(allocation, preferred_offset);
return {chunk, chunk};
}
EvenOddChunkPair LoopOptimizerBestFitHeap::AllocateSameEvenAndOddBetween(
int64_t begin_idx_in_loop, int64_t end_idx_in_loop, int64_t size,
int64_t preferred_offset) {
CheckAllocationIntervalValid(begin_idx_in_loop, end_idx_in_loop);
ShiftAllocationIntervalIfRequired(begin_idx_in_loop, end_idx_in_loop);
CHECK_LE(end_idx_in_loop - begin_idx_in_loop + 1, loop_size_);
const AllocationBlock& allocation = CreateSameEvenAndOddAllocationBlock(
begin_idx_in_loop, end_idx_in_loop, size);
std::optional<HeapSimulator::Chunk> chunk =
FindAndCommitChunkCandidate(allocation, preferred_offset);
return {chunk, chunk};
}
std::string LoopOptimizerBestFitHeap::MemoryUsageToAsciiArt(
int64_t begin_iteration, int64_t end_iteration) const {
CHECK_LE(0, begin_iteration);
CHECK_LE(begin_iteration, end_iteration);
return interval_tree_.NodesOverlappingInTimeToAsciiArt(
loop_size_ * begin_iteration, loop_size_ * (end_iteration + 1) - 1,
loop_size_);
}
std::vector<int64_t> LoopOptimizerBestFitHeap::RemainingMemoryByTime() const {
std::vector<int64_t> memory_used_by_time =
interval_tree_.MemoryUsedInInterval(loop_size_ * 2, loop_size_ * 3 - 1);
std::vector<int64_t> remaining_memory_by_time(loop_size_);
for (int i = 0; i < loop_size_; ++i) {
remaining_memory_by_time[i] = size_limit_per_heap_ - memory_used_by_time[i];
}
return remaining_memory_by_time;
}
int64_t LoopOptimizerBestFitHeap::LastMemoryOffsetOccupied() const {
return interval_tree_.HeapSizeInInterval(loop_size_ * 2, loop_size_ * 4 - 1);
}
absl::StatusOr<std::unique_ptr<MemoryBoundLoopOptimizer>>
MemoryBoundLoopOptimizer::Create(
int loop_start, int loop_end, uint64_t alternate_memory_size,
const MemoryBoundLoopOptimizerOptions& options,
const HloLiveRange& hlo_live_range, const HloAliasAnalysis& alias_analysis,
const CostAnalysis& cost_analysis,
const BufferValue::SizeFunction& size_function,
const ReservedScopedMemoryFunction& reserved_scoped_memory_fn) {
std::unique_ptr<MemoryBoundLoopOptimizer> optimizer =
absl::WrapUnique(new MemoryBoundLoopOptimizer(
loop_start, loop_end, alternate_memory_size, options, hlo_live_range,
alias_analysis, cost_analysis, size_function,
reserved_scoped_memory_fn));
TF_RETURN_IF_ERROR(optimizer->Initialize());
return std::move(optimizer);
}
MemoryBoundLoopOptimizer::MemoryBoundLoopOptimizer(
int loop_start, int loop_end, uint64_t alternate_memory_size,
const MemoryBoundLoopOptimizerOptions& options,
const HloLiveRange& hlo_live_range, const HloAliasAnalysis& alias_analysis,
const CostAnalysis& cost_analysis,
const BufferValue::SizeFunction& size_function,
const ReservedScopedMemoryFunction& reserved_scoped_memory_fn)
: loop_start_(loop_start),
loop_end_(loop_end),
loop_size_(loop_end - loop_start),
alternate_memory_size_(alternate_memory_size),
options_(options),
hlo_live_range_(hlo_live_range),
alias_analysis_(alias_analysis),
cost_analysis_(cost_analysis),
size_function_(size_function),
reserved_scoped_memory_fn_(reserved_scoped_memory_fn) {}
absl::Status MemoryBoundLoopOptimizer::Initialize() {
const auto& instruction_sequence =
hlo_live_range_.flattened_instruction_sequence().instructions();
VLOG(3) << "MemoryBoundLoopOptimizer::Initialize, loop start: " << loop_start_
<< ", loop end: " << loop_end_ << ", loop size: " << loop_size_;
const HloComputation* loop_computation = nullptr;
int prev_iteration_start = loop_start_ - loop_size_;
int next_iteration_start = loop_start_ + loop_size_;
for (int i = 0; i < loop_size_; ++i) {
const HloInstruction* loop_inst = instruction_sequence[loop_start_ + i];
instructions_in_loop_[loop_inst] = i;
const HloInstruction* prev_iteration_inst =
instruction_sequence[prev_iteration_start + i];
instructions_in_prev_iteration_[prev_iteration_inst] = i;
const HloInstruction* next_iteration_inst =
instruction_sequence[next_iteration_start + i];
instructions_in_next_iteration_[next_iteration_inst] = i;
VLOG(3) << " inst in loop [" << (i) << "]: " << loop_inst->name();
if (!loop_computation) {
loop_computation = loop_inst->parent();
} else {
TF_RET_CHECK(loop_computation == loop_inst->parent());
}
remaining_memory_.push_back(
alternate_memory_size_ -
reserved_scoped_memory_fn_(loop_inst,
{},
{}));
}
std::set<const HloBuffer*> buffers_to_process;
for (const auto& [instruction, idx] : instructions_in_loop_) {
auto maybe_add_buffer = [&](const HloInstruction* instruction) {
return [this, &buffers_to_process, instruction](const Shape& subshape,
const ShapeIndex& index) {
if (!subshape.IsArray()) {
return;
}
const HloBuffer& buffer =
alias_analysis_.GetUniqueBufferAt(instruction, index);
if (buffers_to_process.find(&buffer) == buffers_to_process.end()) {
buffers_to_process.insert(&buffer);
}
};
};
ShapeUtil::ForEachSubshape(instruction->shape(),
maybe_add_buffer(instruction));
for (const HloInstruction* operand : instruction->operands()) {
ShapeUtil::ForEachSubshape(operand->shape(), maybe_add_buffer(operand));
}
}
for (const HloBuffer* buffer : buffers_to_process) {
MaybeCreateLoopValue(*buffer, loop_computation);
}
return absl::OkStatus();
}
void MemoryBoundLoopOptimizer::MaybeCreateLoopValue(
const HloBuffer& buffer, const HloComputation* loop_computation) {
loop_values_.push_back({});
LoopValue& loop_value = loop_values_.back();
float pos_bytes = 0;
float use_bytes = 0;
bool has_footer_consumer = false;
for (const HloValue* value : buffer.values()) {
for (const HloPosition& position : value->positions()) {
if (position.instruction->opcode() == HloOpcode::kGetTupleElement) {
continue;
}
std::optional<int64_t> loop_index =
GetInstructionIndex(position.instruction, instructions_in_loop_);
std::optional<int64_t> prev_iteration_index;
if (loop_index) {
loop_value.loop_positions.push_back({*loop_index, position});
VLOG(3) << "Pos match: " << position.instruction->name() << " at "
<< *loop_index;
} else if ((prev_iteration_index = GetInstructionIndex(
position.instruction, instructions_in_prev_iteration_))) {
loop_value.prev_iteration_positions.push_back(
{*prev_iteration_index, position});
VLOG(3) << "Pos match (prev iteration): "
<< position.instruction->name() << " at "
<< *prev_iteration_index;
} else if (loop_value.prev_iteration_positions.empty() &&
loop_value.loop_positions.empty() &&
position.instruction->parent() == loop_computation &&
!loop_value.header_position) {
loop_value.header_position = position;
}
if (loop_index || prev_iteration_index) {
float bytes_accessed = cost_analysis_.base_costs().OutputBytesAccessed(
*position.instruction, position.index);
pos_bytes += bytes_accessed;
VLOG(3) << " accessed: " << bytes_accessed;
}
}
for (const HloUse& use : value->GetUses()) {
if (use.instruction->opcode() == HloOpcode::kGetTupleElement) {
continue;
}
std::optional<int64_t> loop_index =
GetInstructionIndex(use.instruction, instructions_in_loop_);
std::optional<int64_t> next_iteration_index;
if (loop_index) {
loop_value.loop_uses.push_back({*loop_index, use});
VLOG(3) << "Use match: " << use.instruction->name() << " at "
<< *loop_index;
} else if ((next_iteration_index = GetInstructionIndex(
use.instruction, instructions_in_next_iteration_))) {
loop_value.next_iteration_uses.push_back({*next_iteration_index, use});
VLOG(3) << "Use match (next iteration): " << use.instruction->name()
<< " at " << *next_iteration_index;
} else if (!loop_value.loop_positions.empty() ||
!loop_value.loop_uses.empty()) {
has_footer_consumer = true;
}
if (loop_index || next_iteration_index) {
float bytes_accessed = cost_analysis_.base_costs().OperandBytesAccessed(
*use.instruction, use.operand_number, use.operand_index);
use_bytes += bytes_accessed;
VLOG(3) << " accessed: " << bytes_accessed;
}
}
}
if ((!loop_value.loop_positions.empty() || !loop_value.loop_uses.empty()) &&
loop_value.prev_iteration_positions.empty()) {
loop_value.size = size_function_(**buffer.values().begin());
VLOG(3) << "Size: " << loop_value.size;
loop_value.allocation_type = LoopValue::AllocationType::kUnsupported;
auto position_compare = [](const std::pair<int64_t, HloPosition>& a,
const std::pair<int64_t, HloPosition>& b) {
return a.first < b.first;
};
auto use_compare = [](const std::pair<int64_t, HloUse>& a,
const std::pair<int64_t, HloUse>& b) {
return a.first < b.first;
};
absl::c_sort(loop_value.loop_positions, position_compare);
absl::c_sort(loop_value.prev_iteration_positions, position_compare);
absl::c_sort(loop_value.loop_uses, use_compare);
absl::c_sort(loop_value.next_iteration_uses, use_compare);
if (!loop_value.loop_positions.empty()) {
if (loop_value.next_iteration_uses.empty() &&
!loop_value.loop_uses.empty()) {
loop_value.allocation_type = LoopValue::AllocationType::kTemporary;
} else if (!loop_value.next_iteration_uses.empty()) {
if (loop_value.next_iteration_uses.back().first >=
loop_value.loop_positions.front().first) {
loop_value.allocation_type =
LoopValue::AllocationType::kLoopCarriedDependence;
} else {
loop_value.allocation_type = LoopValue::AllocationType::kTemporary;
}
}
} else if (loop_value.header_position && !loop_value.loop_uses.empty()) {
if (loop_value.loop_uses.size() ==
loop_value.next_iteration_uses.size() &&
loop_value.loop_uses.front().first ==
loop_value.next_iteration_uses.front().first) {
loop_value.allocation_type = LoopValue::AllocationType::kPinned;
} else if (loop_value.next_iteration_uses.empty() ||
loop_value.next_iteration_uses.back().first <
loop_value.loop_uses.front().first) {
loop_value.allocation_type = LoopValue::AllocationType::kPrefetch;
}
}
VLOG(3) << "Allocation type "
<< LoopValue::AllocationTypeToString(loop_value.allocation_type);
VLOG(3) << "Pos bytes: " << pos_bytes << " use bytes: " << use_bytes;
float savings = pos_bytes + use_bytes;
if (loop_value.header_position) {
savings -= loop_value.size;
}
if (!loop_value.loop_positions.empty() && has_footer_consumer) {
savings -= loop_value.size;
}
loop_value.savings = savings;
loop_value.savings_per_byte = savings / loop_value.size;
VLOG(3) << "Savings: " << loop_value.savings;
VLOG(3) << "Savings per byte: " << loop_value.savings_per_byte;
for (const HloValue* value : buffer.values()) {
VLOG(3) << value->ToString();
}
loop_value.hlo_values = buffer.values();
} else {
loop_values_.pop_back();
}
}
void MemoryBoundLoopOptimizer::Optimize() {
SortLoopValues();
AllocateLoopValues();
PostProcess();
}
float MemoryBoundLoopOptimizer::CalculateExecutionTime() const {
std::vector<std::pair<const CopyAllocation*, float>> prefetches;
for (const LoopValue& value : loop_values_) {
if (!value.allocations.empty() &&
value.allocations.back()->is_copy_allocation()) {
prefetches.push_back(
{static_cast<const CopyAllocation*>(value.allocations.back().get()),
cost_analysis_.GetAsyncCopyElapsed(
value.hlo_values.front()->shape())});
}
}
auto get_effective_done_time =
[&](int64_t copy_start_schedule_after,
int64_t copy_done_schedule_before) -> int64_t {
if (copy_start_schedule_after == loop_size_ - 1 &&
copy_done_schedule_before == 0) {
return 2 * loop_size_;
}
if (copy_start_schedule_after + 1 >= copy_done_schedule_before) {
return copy_done_schedule_before + loop_size_;
}
return copy_done_schedule_before;
};
absl::c_sort(
prefetches, [&](const std::pair<const CopyAllocation*, float>& a,
const std::pair<const CopyAllocation*, float>& b) {
return std::forward_as_tuple(
a.first->copy_start_schedule_after(),
get_effective_done_time(
a.first->copy_start_schedule_after(),
a.first->copy_done_schedule_before())) <
std::forward_as_tuple(b.first->copy_start_schedule_after(),
get_effective_done_time(
b.first->copy_start_schedule_after(),
b.first->copy_done_schedule_before()));
});
std::vector<std::optional<int>> required_prefetch_completions(loop_size_);
for (int i = 0; i < prefetches.size(); ++i) {
const auto& [prefetch, elapsed] = prefetches[i];
int required_prefetch_completion = i;
if (prefetch->copy_start_schedule_after() == loop_size_ - 1 &&
prefetch->copy_done_schedule_before() == 0) {
required_prefetch_completion -= 2 * prefetches.size();
} else if (prefetch->copy_start_schedule_after() + 1 >=
prefetch->copy_done_schedule_before()) {
required_prefetch_completion -= prefetches.size();
}
VLOG(3) << "Prefetch #" << i << " (elapsed " << elapsed
<< "): " << prefetch->ToString();
if (required_prefetch_completions[prefetch->copy_done_schedule_before()]) {
required_prefetch_completions[prefetch->copy_done_schedule_before()] =
std::max(
*required_prefetch_completions[prefetch
->copy_done_schedule_before()],
required_prefetch_completion);
} else {
required_prefetch_completions[prefetch->copy_done_schedule_before()] =
required_prefetch_completion;
}
VLOG(4)
<< "Required completion at " << prefetch->copy_done_schedule_before()
<< " = "
<< *required_prefetch_completions[prefetch
->copy_done_schedule_before()];
}
float result;
std::vector<float> bandwidth_idle_times;
std::vector<float> instructions_elapsed;
bandwidth_idle_times.reserve(loop_size_);
instructions_elapsed.reserve(loop_size_);
for (int i = 0; i < loop_size_; ++i) {
bandwidth_idle_times.push_back(GetBandwidthIdleTime(i));
instructions_elapsed.push_back(GetInstructionElapsed(i));
}
const int kNumIterations = 3;
std::vector<float> prefetch_remaining_elapsed_times(prefetches.size() *
kNumIterations);
int prefetch_start_index = 0;
int prefetch_done_index = 0;
int prefetch_completed_index = 0;
for (int iteration = 0; iteration < kNumIterations; ++iteration) {
float total_elapsed = 0;
float total_bandwidth_idle_time = 0;
float total_critical_prefetch = 0;
for (int i = 0; i < loop_size_; ++i) {
std::optional<int> required_prefetch_completion =
required_prefetch_completions[i];
if (required_prefetch_completion) {
int required_prefetch_done_index =
iteration * static_cast<int>(prefetches.size()) +
*required_prefetch_completion;
VLOG(4) << "Prefetch #"
<< ((*required_prefetch_completion + prefetches.size()) %
prefetches.size())
<< " (" << required_prefetch_done_index
<< ") is required to be completed at " << i;
for (; prefetch_done_index <= required_prefetch_done_index;
++prefetch_done_index) {
CHECK_LE(prefetch_done_index, prefetch_start_index);
if (prefetch_done_index == prefetch_completed_index) {
float& prefetch_remaining =
prefetch_remaining_elapsed_times[prefetch_done_index];
VLOG(4) << "Prefetch #" << (prefetch_done_index % prefetches.size())
<< " (" << prefetch_done_index
<< ") did not complete, remaining elapsed = "
<< prefetch_remaining;
total_critical_prefetch += prefetch_remaining;
prefetch_remaining = 0;
++prefetch_completed_index;
}
}
}
float elapsed = instructions_elapsed[i];
total_elapsed += elapsed;
float bandwidth_idle_time = bandwidth_idle_times[i];
for (; prefetch_completed_index < prefetch_start_index;
++prefetch_completed_index) {
float& prefetch_remaining =
prefetch_remaining_elapsed_times[prefetch_completed_index];
if (bandwidth_idle_time < prefetch_remaining) {
prefetch_remaining -= bandwidth_idle_time;
bandwidth_idle_time = 0;
VLOG(4) << "Prefetch #"
<< (prefetch_completed_index % prefetches.size()) << " ("
<< prefetch_completed_index << ") still ongoing at " << i
<< ", remaining elapsed = " << prefetch_remaining;
break;
}
bandwidth_idle_time -= prefetch_remaining;
prefetch_remaining = 0;
VLOG(4) << "Prefetch #"
<< (prefetch_completed_index % prefetches.size()) << " ("
<< prefetch_completed_index << ") completed at " << i
<< ", bandwidth idle time = " << bandwidth_idle_time;
}
if (bandwidth_idle_time > 0) {
VLOG(4) << "Bandwidth idle time at " << i << " = "
<< bandwidth_idle_time;
total_bandwidth_idle_time += bandwidth_idle_time;
}
for (; prefetch_start_index < (iteration + 1) * prefetches.size() &&
prefetches[prefetch_start_index % prefetches.size()]
.first->copy_start_schedule_after() == i;
++prefetch_start_index) {
float& prefetch_remaining =
prefetch_remaining_elapsed_times[prefetch_start_index];
prefetch_remaining =
prefetches[prefetch_start_index % prefetches.size()].second;
VLOG(4) << "Prefetch #" << (prefetch_start_index % prefetches.size())
<< " (" << prefetch_start_index << ") started at " << i
<< ", remaining elapsed = " << prefetch_remaining;
}
}
VLOG(3) << "Iteration " << iteration;
VLOG(3) << "Total elapsed: " << total_elapsed
<< ", total critical prefetch: " << total_critical_prefetch
<< ", total bandwidth idle time: " << total_bandwidth_idle_time;
result = total_elapsed + total_critical_prefetch;
}
return result;
}
std::string
MemoryBoundLoopOptimizer::LoopValue::AllocationTypeToString(
LoopValue::AllocationType allocation_type) {
switch (allocation_type) {
case AllocationType::kTemporary:
return "temporary";
case AllocationType::kLoopCarriedDependence:
return "loop-carried dependence";
case AllocationType::kPinned:
return "pinned";
case AllocationType::kPrefetch:
return "prefetch";
default:
CHECK(allocation_type == AllocationType::kUnsupported);
return "unsupported";
}
}
std::string MemoryBoundLoopOptimizer::LoopValue::ToString() const {
std::string values_str;
absl::StrAppend(&values_str, "Values:");
for (const HloValue* hlo_value : hlo_values) {
absl::StrAppend(&values_str, "\n - ", hlo_value->ToShortString());
}
std::string allocations_str;
if (!allocations.empty()) {
absl::StrAppend(&allocations_str, "Allocations:");
}
for (const auto& allocation : allocations) {
absl::StrAppend(&allocations_str, "\n - ", allocation->ToString());
}
return absl::StrCat(
"Size: ", size, " savings: ", savings,
" savings per byte: ", savings_per_byte,
" allocation type: ", AllocationTypeToString(allocation_type), "\n",
values_str, "\n", allocations_str);
}
bool MemoryBoundLoopOptimizer::LoopValue::IsAllocationTypeSupported() const {
return allocation_type == AllocationType::kTemporary ||
allocation_type == AllocationType::kPinned ||
allocation_type == AllocationType::kPrefetch;
}
void MemoryBoundLoopOptimizer::SortLoopValues() {
absl::c_stable_sort(loop_values_, [](const LoopValue& a, const LoopValue& b) {
return a.savings_per_byte > b.savings_per_byte;
});
}
void MemoryBoundLoopOptimizer::AllocateLoopValues() {
std::vector<LoopValue*> prefetch_values;
VLOG(3) << "Pre optimization execution time: " << CalculateExecutionTime();
for (LoopValue& value : loop_values_) {
switch (value.allocation_type) {
case LoopValue::AllocationType::kTemporary:
AllocateTemporary(value);
break;
case LoopValue::AllocationType::kPinned:
if (value.savings > 0) {
AllocatePinned(value);
}
break;
case LoopValue::AllocationType::kPrefetch:
prefetch_values.push_back(&value);
break;
case LoopValue::AllocationType::kLoopCarriedDependence:
case LoopValue::AllocationType::kUnsupported:
VLOG(1) << "Unsupported allocation: " << value.ToString();
}
}
VLOG(3) << "Execution time after allocating temporaries: "
<< CalculateExecutionTime();
AllocatePrefetches(absl::MakeSpan(prefetch_values));
VLOG(3) << "Execution time after allocating prefetches: "
<< CalculateExecutionTime();
}
void MemoryBoundLoopOptimizer::PostProcess() {
for (LoopValue& value : loop_values_) {
absl::flat_hash_set<HloUse> allocated_uses;
for (const auto& allocation : value.allocations) {
for (const HloUse& use : allocation->uses()) {
allocated_uses.insert(use);
}
}
std::vector<HloUse> unallocated_uses;
absl::flat_hash_set<int> use_indices;
for (const auto& [idx, use] : value.loop_uses) {
use_indices.insert(idx);
if (!allocated_uses.contains(use)) {
unallocated_uses.push_back(use);
}
}
for (const auto& [next_iteration_idx, use] : value.next_iteration_uses) {
if (use_indices.contains(next_iteration_idx)) {
continue;
}
HloInstruction* loop_instruction =
hlo_live_range_.flattened_instruction_sequence().instructions().at(
loop_start_ + next_iteration_idx);
HloUse loop_use{loop_instruction, use.operand_number, use.operand_index};
if (!allocated_uses.contains(loop_use)) {
unallocated_uses.push_back(loop_use);
}
}
if (!unallocated_uses.empty()) {
value.allocations.push_back(std::make_unique<PinnedAllocation>(
value.hlo_values.front()->defining_position(), MemorySpace::kDefault,
std::nullopt, 0, loop_size_, false));
for (const HloUse& use : unallocated_uses) {
value.allocations.back()->AddUse(use);
}
}
}
}
bool MemoryBoundLoopOptimizer::AllocateBetween(int64_t begin_idx,
int64_t end_idx, int64_t size) {
int64_t end_idx_sentinel = end_idx;
if (end_idx < begin_idx) {
end_idx_sentinel += loop_size_;
}
for (int64_t i = begin_idx; i <= end_idx_sentinel; ++i) {
if (remaining_memory_[i % loop_size_] < size) {
return false;
}
}
for (int64_t i = begin_idx; i <= end_idx_sentinel; ++i) {
remaining_memory_[i % loop_size_] -= size;
}
return true;
}
bool MemoryBoundLoopOptimizer::AllocateTemporary(LoopValue& value) {
VLOG(3) << "AllocateTemporary: " << value.ToString();
if (value.hlo_values.size() > 1) {
VLOG(3) << "LoopValue has more than one hlo value associated.";
return false;
}
int64_t definition_idx = value.loop_positions.front().first;
int64_t max_use_idx;
if (!value.next_iteration_uses.empty()) {
max_use_idx = value.next_iteration_uses.back().first;
CHECK_LT(max_use_idx, definition_idx);
} else {
max_use_idx = value.loop_uses.back().first;
}
bool success = AllocateBetween(definition_idx, max_use_idx, value.size);
if (success) {
VLOG(3) << "Pos: " << value.loop_positions[0].second;
value.allocations.push_back(std::make_unique<PinnedAllocation>(
value.loop_positions[0].second, MemorySpace::kAlternate, std::nullopt,
definition_idx, max_use_idx,
false));
AddAllLoopPositionsAndUses(value, true);
}
return success;
}
bool MemoryBoundLoopOptimizer::AllocatePinned(LoopValue& value) {
bool success = AllocateBetween(0, loop_size_ - 1, value.size);
if (success) {
CHECK(value.header_position);
value.allocations.push_back(std::make_unique<PinnedAllocation>(
*value.header_position, MemorySpace::kAlternate, std::nullopt, 0,
loop_size_,
false));
AddAllLoopPositionsAndUses(value, false);
}
return success;
}
bool MemoryBoundLoopOptimizer::AllocatePrefetches(
absl::Span<LoopValue*> values) {
VLOG(3) << "Allocating prefetches num values: " << values.size();
AllocatePrefetchesContext context;
context.values = values;
context.value_indices.resize(values.size());
absl::c_iota(context.value_indices, 0);
absl::c_stable_sort(context.value_indices, [&](int a, int b) {
return std::forward_as_tuple(
values[a]->loop_uses.begin()->first,
values[a]->loop_uses.begin()->second.operand_number) >
std::forward_as_tuple(
values[b]->loop_uses.begin()->first,
values[b]->loop_uses.begin()->second.operand_number);
});
absl::flat_hash_map<const HloInstruction*,
std::vector<std::pair<int64_t, ShapeIndex>>>
additional_uses_in_alternate_mem;
absl::flat_hash_map<const HloInstruction*, std::vector<ShapeIndex>>
additional_positions_in_alternate_mem;
for (const LoopValue* value : values) {
VLOG(3) << " prefetch value: " << value->ToString();
for (const auto& [idx, use] : value->loop_uses) {
additional_uses_in_alternate_mem[use.instruction].push_back(
{use.operand_number, use.operand_index});
}
for (const auto& [idx, position] : value->loop_positions) {
additional_positions_in_alternate_mem[position.instruction].push_back(
position.index);
}
}
for (int i = 0; i < loop_size_; ++i) {
context.bandwidth_idle_times.push_back(
GetBandwidthIdleTime(i, additional_uses_in_alternate_mem,
additional_positions_in_alternate_mem));
VLOG(3) << "Remaining bandwidth at " << i << " = "
<< *context.bandwidth_idle_times.rbegin();
}
context.additional_memory_used.resize(loop_size_, 0);
for (int value_index : context.value_indices) {
AllocatePrefetch(value_index, context);
}
for (int i = 0; i < loop_size_; ++i) {
remaining_memory_[i] -= context.additional_memory_used[i];
VLOG(3) << "Additional memory [" << i
<< "]: " << context.additional_memory_used[i];
VLOG(3) << "Remaining memory [" << i << "]: " << remaining_memory_[i];
VLOG(3) << "Remaining bandwidth [" << i
<< "] : " << context.bandwidth_idle_times[i];
}
return true;
}
bool MemoryBoundLoopOptimizer::AllocatePrefetch(
int value_index, AllocatePrefetchesContext& context) {
LoopValue* value = context.values.at(value_index);
VLOG(3) << "Allocating value: " << value->ToString();
int first_use_idx = value->loop_uses.front().first;
int last_use_idx = value->loop_uses.back().first;
int last_use_idx_sentinel = last_use_idx;
if (!value->next_iteration_uses.empty()) {
last_use_idx = value->next_iteration_uses.back().first;
last_use_idx_sentinel = last_use_idx + loop_size_;
CHECK_LT(last_use_idx, first_use_idx);
}
bool out_of_memory = false;
for (int i = first_use_idx; i <= last_use_idx_sentinel; ++i) {
int loop_idx = i % loop_size_;
if (context.additional_memory_used[loop_idx] + value->size >
remaining_memory_[loop_idx]) {
VLOG(3) << "Ran out of memory allocating for uses.";
out_of_memory = true;
}
}
if (out_of_memory) {
return false;
}
float copy_resource =
cost_analysis_.GetAsyncCopyElapsed(value->hlo_values.front()->shape());
VLOG(3) << "First use: " << value->loop_uses.begin()->second
<< " use idx: " << first_use_idx
<< " copy resource: " << copy_resource;
std::optional<int> copy_start_time;
float accumulated_copy_resource = 0;
std::vector<int> early_forced_prefetch_value_indices;
int early_forced_prefetch_value_search_index = 0;
float early_forced_prefetch_additional_memory = 0;
for (int i = first_use_idx - 1; i >= last_use_idx_sentinel - loop_size_;
--i) {
int loop_idx = (i + loop_size_) % loop_size_;
if (i < 0) {
for (; context.value_indices[early_forced_prefetch_value_search_index] !=
value_index;
++early_forced_prefetch_value_search_index) {
VLOG(3) << "Searching for early forced: "
<< early_forced_prefetch_value_search_index;
LoopValue* early_forced_value = context.values.at(
context.value_indices[early_forced_prefetch_value_search_index]);
if (early_forced_value->allocations.empty()) {
continue;
}
const CopyAllocation* early_forced_prefetch =
static_cast<const CopyAllocation*>(
early_forced_value->allocations.back().get());
VLOG(3) << "Prefetch: " << early_forced_prefetch->ToString();
if (early_forced_prefetch->copy_done_schedule_before() <=
early_forced_prefetch->copy_start_schedule_after() + 1 ||
(early_forced_prefetch->copy_start_schedule_after() ==
loop_size_ - 1 &&
early_forced_prefetch->copy_done_schedule_before() == 0)) {
break;
}
if (early_forced_prefetch->copy_start_schedule_after() != loop_idx) {
break;
}
early_forced_prefetch_value_indices.push_back(
early_forced_prefetch_value_search_index);
early_forced_prefetch_additional_memory += early_forced_value->size;
VLOG(3) << "Found early-forced prefetch value: "
<< early_forced_value->ToString();
VLOG(3) << "Early forced prefetch additional memory: "
<< early_forced_prefetch_additional_memory;
}
}
int64_t overlap_memory_overhead = 0;
if (loop_idx == last_use_idx) {
overlap_memory_overhead = value->size;
VLOG(3) << "Loop idx == last use idx (" << loop_idx
<< "), overlap memory overhead = " << overlap_memory_overhead;
}
if (context.additional_memory_used[loop_idx] + value->size +
overlap_memory_overhead + early_forced_prefetch_additional_memory >
remaining_memory_[loop_idx]) {
VLOG(3) << "Ran out of memory. Accumulated copy resource "
<< accumulated_copy_resource << " out of " << copy_resource
<< " at " << loop_idx;
break;
}
float bandwidth_idle_time = context.bandwidth_idle_times[loop_idx];
VLOG(3) << "Idx " << loop_idx
<< " bandwidth_idle_time: " << bandwidth_idle_time
<< " copy resource remaining: "
<< (copy_resource - accumulated_copy_resource) << " diff: "
<< (bandwidth_idle_time -
(copy_resource - accumulated_copy_resource));
if (bandwidth_idle_time >= copy_resource - accumulated_copy_resource) {
accumulated_copy_resource = copy_resource;
copy_start_time = loop_idx;
VLOG(3) << "Found the complete copy ratio and updated accumulated copy "
"resource: "
<< accumulated_copy_resource;
break;
} else if (!copy_start_time &&
accumulated_copy_resource + bandwidth_idle_time >=
copy_resource * options_.desired_copy_ratio()) {
accumulated_copy_resource += bandwidth_idle_time;
copy_start_time = loop_idx;
VLOG(3) << "Found the desired copy ratio and updated accumulated copy "
"resource: "
<< accumulated_copy_resource;
} else if (options_.allow_unsatisfied_fully_pipelined_prefetch() &&
loop_idx == last_use_idx) {
accumulated_copy_resource += bandwidth_idle_time;
copy_start_time = loop_idx;
VLOG(3) << "Could not reach the desired copy ratio but scheduling "
"fully pipelined prefetch anyway: "
<< accumulated_copy_resource;
break;
} else {
accumulated_copy_resource += bandwidth_idle_time;
VLOG(3) << "Updated accumulated copy resource: "
<< accumulated_copy_resource;
}
}
if (!copy_start_time) {
return false;
}
VLOG(3) << "Success: copy_start_time: " << *copy_start_time
<< " leftover copy resource: "
<< (copy_resource - accumulated_copy_resource);
auto update_additional_memory_used = [&](int loop_idx, int64_t addition) {
VLOG(4) << "Updating additional memory used at " << loop_idx << ". "
<< context.additional_memory_used[loop_idx] << " + " << addition
<< " => " << (context.additional_memory_used[loop_idx] + addition)
<< " (remaining: " << remaining_memory_[loop_idx] << ")";
context.additional_memory_used[loop_idx] += addition;
CHECK_LE(context.additional_memory_used[loop_idx],
remaining_memory_[loop_idx]);
};
for (int i = first_use_idx; i <= last_use_idx_sentinel; ++i) {
int loop_idx = i % loop_size_;
update_additional_memory_used(loop_idx, value->size);
}
accumulated_copy_resource = 0.0;
for (int i = first_use_idx - 1; i >= last_use_idx_sentinel - loop_size_;
--i) {
int loop_idx = (i + loop_size_) % loop_size_;
float& bandwidth_idle_time = context.bandwidth_idle_times[loop_idx];
int64_t overlap_memory_overhead = 0;
update_additional_memory_used(loop_idx,
value->size + overlap_memory_overhead);
if (bandwidth_idle_time < copy_resource - accumulated_copy_resource) {
accumulated_copy_resource += bandwidth_idle_time;
bandwidth_idle_time = 0;
if (loop_idx == *copy_start_time) {
VLOG(3) << "Remaining copy resource: "
<< (copy_resource - accumulated_copy_resource);
break;
}
} else {
bandwidth_idle_time -= copy_resource - accumulated_copy_resource;
CHECK_EQ(loop_idx, *copy_start_time);
break;
}
}
CHECK(value->header_position);
value->allocations.push_back(std::make_unique<PinnedAllocation>(
*value->header_position, MemorySpace::kDefault, std::nullopt, 0,
loop_size_, false));
value->allocations.push_back(std::make_unique<CopyAllocation>(
*value->allocations.back(), MemorySpace::kAlternate, std::nullopt,
((*copy_start_time - 1) + loop_size_) % loop_size_, first_use_idx,
last_use_idx_sentinel));
AddAllLoopPositionsAndUses(*value, true);
for (int early_forced_prefetch_value_index :
early_forced_prefetch_value_indices) {
LoopValue* early_forced_value = context.values.at(
context.value_indices[early_forced_prefetch_value_index]);
CHECK(!early_forced_value->allocations.empty());
CopyAllocation* early_forced_prefetch = static_cast<CopyAllocation*>(
early_forced_value->allocations.back().get());
for (int index = early_forced_prefetch->copy_start_schedule_after();
index >= *copy_start_time; --index) {
update_additional_memory_used(index, early_forced_value->size);
VLOG(3) << "Additional memory used: " << index << " "
<< context.additional_memory_used[index];
}
early_forced_prefetch->set_copy_start_schedule_after(
((*copy_start_time - 1) + loop_size_) % loop_size_);
VLOG(3) << "Updated prefetch: " << early_forced_prefetch->ToString();
}
return true;
}
void MemoryBoundLoopOptimizer::AddAllLoopPositionsAndUses(
LoopValue& value, bool allocate_next_iteration_uses) {
CHECK_GE(value.allocations.size(), 1);
Allocation& allocation = *value.allocations.back();
for (const auto& [idx, position] : value.loop_positions) {
positions_in_alternate_mem_[position.instruction].push_back(position.index);
}
for (const auto& [idx, use] : value.loop_uses) {
uses_in_alternate_mem_[use.instruction].push_back(
{use.operand_number, use.operand_index});
allocation.AddUse(use);
}
if (allocate_next_iteration_uses) {
for (const auto& [next_iteration_idx, use] : value.next_iteration_uses) {
HloInstruction* loop_instruction =
hlo_live_range_.flattened_instruction_sequence().instructions().at(
loop_start_ + next_iteration_idx);
uses_in_alternate_mem_[loop_instruction].push_back(
{use.operand_number, use.operand_index});
allocation.AddUse(
{loop_instruction, use.operand_number, use.operand_index});
}
}
}
float MemoryBoundLoopOptimizer::GetBandwidthIdleTime(int idx) const {
const HloInstruction* inst =
hlo_live_range_.flattened_instruction_sequence().instructions().at(
loop_start_ + idx);
std::vector<std::pair<int64_t, ShapeIndex>> empty_operands;
std::vector<ShapeIndex> empty_outputs;
const std::vector<std::pair<int64_t, ShapeIndex>>* operands_in_alternate_mem =
&empty_operands;
const std::vector<ShapeIndex>* outputs_in_alternate_mem = &empty_outputs;
auto uses_it = uses_in_alternate_mem_.find(inst);
if (uses_it != uses_in_alternate_mem_.end()) {
operands_in_alternate_mem = &uses_it->second;
}
auto positions_it = positions_in_alternate_mem_.find(inst);
if (positions_it != positions_in_alternate_mem_.end()) {
outputs_in_alternate_mem = &positions_it->second;
}
return cost_analysis_.GetDefaultMemoryBandwidthIdleTime(
*inst, *operands_in_alternate_mem, *outputs_in_alternate_mem);
}
float MemoryBoundLoopOptimizer::GetBandwidthIdleTime(
int idx,
const absl::flat_hash_map<const HloInstruction*,
std::vector<std::pair<int64_t, ShapeIndex>>>&
additional_uses_in_alternate_mem,
const absl::flat_hash_map<const HloInstruction*, std::vector<ShapeIndex>>&
additional_positions_in_alternate_mem) const {
const HloInstruction* inst =
hlo_live_range_.flattened_instruction_sequence().instructions().at(
loop_start_ + idx);
std::vector<std::pair<int64_t, ShapeIndex>> operands_in_alternate_mem;
std::vector<ShapeIndex> outputs_in_alternate_mem;
auto uses_it = uses_in_alternate_mem_.find(inst);
if (uses_it != uses_in_alternate_mem_.end()) {
operands_in_alternate_mem = uses_it->second;
}
auto additional_uses_it = additional_uses_in_alternate_mem.find(inst);
if (additional_uses_it != additional_uses_in_alternate_mem.end()) {
absl::c_copy(additional_uses_it->second,
std::back_inserter(operands_in_alternate_mem));
}
auto positions_it = positions_in_alternate_mem_.find(inst);
if (positions_it != positions_in_alternate_mem_.end()) {
outputs_in_alternate_mem = positions_it->second;
}
auto additional_positions_it =
additional_positions_in_alternate_mem.find(inst);
if (additional_positions_it != additional_positions_in_alternate_mem.end()) {
absl::c_copy(additional_positions_it->second,
std::back_inserter(outputs_in_alternate_mem));
}
return cost_analysis_.GetDefaultMemoryBandwidthIdleTime(
*inst, operands_in_alternate_mem, outputs_in_alternate_mem);
}
float MemoryBoundLoopOptimizer::GetInstructionElapsed(int idx) const {
const HloInstruction* inst =
hlo_live_range_.flattened_instruction_sequence().instructions().at(
loop_start_ + idx);
std::vector<std::pair<int64_t, ShapeIndex>> empty_operands;
std::vector<ShapeIndex> empty_outputs;
const std::vector<std::pair<int64_t, ShapeIndex>>* operands_in_alternate_mem =
&empty_operands;
const std::vector<ShapeIndex>* outputs_in_alternate_mem = &empty_outputs;
auto uses_it = uses_in_alternate_mem_.find(inst);
if (uses_it != uses_in_alternate_mem_.end()) {
operands_in_alternate_mem = &uses_it->second;
}
auto positions_it = positions_in_alternate_mem_.find(inst);
if (positions_it != positions_in_alternate_mem_.end()) {
outputs_in_alternate_mem = &positions_it->second;
}
return cost_analysis_.GetInstructionElapsedInAlternateMemory(
*inst, *operands_in_alternate_mem, *outputs_in_alternate_mem);
}
}
} | #include "xla/service/memory_space_assignment/memory_bound_loop_optimizer.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "re2/re2.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/buffer_value.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/allocation.h"
#include "xla/service/memory_space_assignment/buffer_interval_comparator.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.pb.h"
#include "xla/service/memory_space_assignment/options.h"
#include "xla/service/memory_space_assignment/prefetch_interval_picker.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace memory_space_assignment {
namespace {
using ::testing::ContainerEq;
using ::testing::HasSubstr;
constexpr int64_t kPointerSize = 8;
int64_t ShapeSize(const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
}
int64_t SizeFunction(const BufferValue& value) {
return ShapeSize(value.shape());
}
int64_t ReservedScopedMemoryFn(
const HloInstruction* instruction,
const absl::flat_hash_set<std::pair<int, ShapeIndex>>&
operands_in_alternate_memory,
const absl::flat_hash_set<ShapeIndex>& outputs_in_alternate_memory) {
return 0;
}
class LoopOptimizerBestFitHeapTest : public ::testing::Test {
public:
LoopOptimizerBestFitHeapTest()
: heap_(64, 6,
8) {}
bool IsAllocateSameEvenAndOddBetweenSuccessful(int64_t begin_idx_in_loop,
int64_t end_idx_in_loop,
int64_t size) {
EvenOddChunkPair chunks = heap_.AllocateSameEvenAndOddBetween(
begin_idx_in_loop, end_idx_in_loop, size);
return chunks.first.has_value() && chunks.second.has_value();
}
bool CanFindSameEvenAndOddAllocationBetween(int64_t begin_idx_in_loop,
int64_t end_idx_in_loop,
int64_t size) {
EvenOddChunkPair chunks = heap_.FindSameEvenAndOddAllocationBetween(
begin_idx_in_loop, end_idx_in_loop, size);
return chunks.first.has_value() && chunks.second.has_value();
}
bool IsAllocateEvenAndOddBetweenSuccessful(int64_t begin_idx_in_loop,
int64_t end_idx_in_loop,
int64_t size) {
EvenOddChunkPair chunks = heap_.AllocateEvenAndOddBetween(
begin_idx_in_loop, end_idx_in_loop, size);
return chunks.first.has_value() && chunks.second.has_value();
}
bool CanFindEvenAndOddAllocationBetween(int64_t begin_idx_in_loop,
int64_t end_idx_in_loop,
int64_t size) {
EvenOddChunkPair chunks = heap_.FindEvenAndOddAllocationBetween(
begin_idx_in_loop, end_idx_in_loop, size);
return chunks.first.has_value() && chunks.second.has_value();
}
std::string GetMemoryUsageAsciiArt() { return heap_.MemoryUsageToAsciiArt(); }
protected:
LoopOptimizerBestFitHeap heap_;
};
TEST_F(LoopOptimizerBestFitHeapTest, TestAllocateSameEvenAndOddBetween) {
EXPECT_TRUE(IsAllocateSameEvenAndOddBetweenSuccessful(3, 8, 16));
EXPECT_TRUE(IsAllocateSameEvenAndOddBetweenSuccessful(-3, 2, 16));
EXPECT_TRUE(IsAllocateSameEvenAndOddBetweenSuccessful(0, 2, 16));
EXPECT_TRUE(IsAllocateSameEvenAndOddBetweenSuccessful(3, 5, 16));
EXPECT_EQ(heap_.LastMemoryOffsetOccupied(), 48);
EXPECT_TRUE(IsAllocateSameEvenAndOddBetweenSuccessful(0, 5, 16));
EXPECT_FALSE(IsAllocateSameEvenAndOddBetweenSuccessful(0, 5, 16));
EXPECT_EQ(heap_.LastMemoryOffsetOccupied(), 64);
EXPECT_THAT(heap_.RemainingMemoryByTime(),
ContainerEq(std::vector<int64_t>{0, 0, 0, 0, 0, 0}));
std::string memory_usage = heap_.MemoryUsageToAsciiArt(2, 3);
EXPECT_THAT(memory_usage, HasSubstr("Memory map for time: [12,23], "
"memory_block_size: 16, group_size: 6"));
EXPECT_THAT(memory_usage, HasSubstr("###### ###### 64"));
EXPECT_THAT(memory_usage, HasSubstr("###### ###### 48"));
EXPECT_THAT(memory_usage, HasSubstr("###### ###### 32"));
EXPECT_THAT(memory_usage, HasSubstr("###### ###### 16"));
EXPECT_THAT(memory_usage, HasSubstr("234567 890123"));
}
TEST_F(LoopOptimizerBestFitHeapTest, TestAllocateEvenAndOddBetween) {
EXPECT_TRUE(IsAllocateEvenAndOddBetweenSuccessful(3, 11, 16));
EXPECT_EQ(heap_.LastMemoryOffsetOccupied(), 32);
EXPECT_TRUE(IsAllocateEvenAndOddBetweenSuccessful(-3, 8, 16));
EXPECT_EQ(heap_.LastMemoryOffsetOccupied(), 64);
EXPECT_THAT(heap_.RemainingMemoryByTime(),
ContainerEq(std::vector<int64_t>{16, 16, 16, 0, 0, 0}));
std::string memory_usage = heap_.MemoryUsageToAsciiArt();
EXPECT_THAT(
memory_usage,
HasSubstr(
"Memory map for time: [0,35], memory_block_size: 16, group_size: 6"));
EXPECT_THAT(memory_usage,
HasSubstr("...... ...### ###### ###### ###### ###... 64"));
EXPECT_THAT(memory_usage,
HasSubstr("...### ###### ###### ###### ###... ...... 48"));
EXPECT_THAT(memory_usage,
HasSubstr("...... ...### ###### ...### ###### ...... 32"));
EXPECT_THAT(memory_usage,
HasSubstr("...### ###### ...### ###### ...... ...... 16"));
EXPECT_THAT(memory_usage,
HasSubstr("012345 678901 234567 890123 456789 012345"));
}
TEST_F(LoopOptimizerBestFitHeapTest, TestRemoveChunk) {
EvenOddChunkPair chunks = heap_.AllocateEvenAndOddBetween(3, 11, 16);
EXPECT_TRUE(chunks.first.has_value() && chunks.second.has_value());
EvenOddChunkPair second_chunks = heap_.AllocateEvenAndOddBetween(-3, 8, 16);
EXPECT_TRUE(second_chunks.first.has_value() &&
second_chunks.second.has_value());
EXPECT_THAT(heap_.RemainingMemoryByTime(),
ContainerEq(std::vector<int64_t>{16, 16, 16, 0, 0, 0}));
EXPECT_EQ(heap_.LastMemoryOffsetOccupied(), 64);
std::string memory_usage = heap_.MemoryUsageToAsciiArt(2, 3);
EXPECT_THAT(memory_usage, HasSubstr("Memory map for time: [12,23], "
"memory_block_size: 16, group_size: 6"));
EXPECT_THAT(memory_usage, HasSubstr("###### ###### 64"));
EXPECT_THAT(memory_usage, HasSubstr("###### ###### 48"));
EXPECT_THAT(memory_usage, HasSubstr("###### ...### 32"));
EXPECT_THAT(memory_usage, HasSubstr("...### ###### 16"));
EXPECT_THAT(memory_usage, HasSubstr("234567 890123"));
EXPECT_TRUE(CanFindEvenAndOddAllocationBetween(0, 2, 16));
EXPECT_FALSE(IsAllocateSameEvenAndOddBetweenSuccessful(0, 2, 16));
EXPECT_FALSE(CanFindEvenAndOddAllocationBetween(0, 11, 16));
heap_.RemoveEvenOddChunkPair(3, 11, chunks);
EXPECT_TRUE(CanFindEvenAndOddAllocationBetween(0, 11, 16));
EXPECT_TRUE(CanFindEvenAndOddAllocationBetween(-3, 8, 16));
EXPECT_TRUE(CanFindEvenAndOddAllocationBetween(0, 5, 32));
EXPECT_TRUE(CanFindEvenAndOddAllocationBetween(-1, 4, 32));
EXPECT_TRUE(CanFindEvenAndOddAllocationBetween(2, 7, 32));
EXPECT_FALSE(CanFindEvenAndOddAllocationBetween(0, 6, 32));
EXPECT_TRUE(CanFindSameEvenAndOddAllocationBetween(0, 5, 32));
EXPECT_TRUE(CanFindSameEvenAndOddAllocationBetween(-1, 4, 32));
EXPECT_TRUE(CanFindSameEvenAndOddAllocationBetween(2, 7, 32));
std::string updated_memory_usage = heap_.MemoryUsageToAsciiArt(2, 3);
EXPECT_THAT(updated_memory_usage,
HasSubstr("Memory map for time: [12,23], "
"memory_block_size: 16, group_size: 6"));
EXPECT_THAT(updated_memory_usage, HasSubstr("###### ###### 64"));
EXPECT_THAT(updated_memory_usage, HasSubstr("###### ###### 48"));
EXPECT_THAT(updated_memory_usage, HasSubstr("...... ...... 32"));
EXPECT_THAT(updated_memory_usage, HasSubstr("...... ...... 16"));
EXPECT_THAT(updated_memory_usage, HasSubstr("234567 890123"));
heap_.RemoveEvenOddChunkPair(-3, 8, second_chunks);
EXPECT_EQ(heap_.LastMemoryOffsetOccupied(), 0);
}
class MemoryBoundLoopOptimizerTest : public HloTestBase {
public:
MemoryBoundLoopOptimizerTest() = default;
protected:
const int64_t kAlternateMemorySpace = 1;
const int64_t kDefaultMemorySpace = 0;
absl::Status Initialize(const HloModule* module,
uint64_t alternate_memory_size = 256) {
HloCostAnalysis::Options options;
MemoryBoundLoopOptimizerOptions optimizer_options;
optimizer_options.set_enabled(true);
optimizer_options.set_desired_copy_ratio(0.7);
optimizer_options.set_allow_unsatisfied_fully_pipelined_prefetch(false);
optimizer_options.set_min_num_iterations(3.0);
options_.memory_bound_loop_optimizer_options = optimizer_options;
cost_analysis_options_.alternate_mem_bandwidth_bytes_per_second = 128;
cost_analysis_options_.async_copy_bandwidth_bytes_per_second = 32;
cost_analysis_options_.pipeline_overhead_window_size_mib = 1;
options.shape_size = ShapeSize;
options.set_flops_per_second(16);
options.set_bytes_per_second(32);
options.set_transcendentals_per_second(16);
hlo_cost_analysis_ = std::make_unique<HloCostAnalysis>(options);
TF_RETURN_IF_ERROR(
module->entry_computation()->Accept(hlo_cost_analysis_.get()));
hlo_cost_analysis_costs_ =
std::make_unique<HloCostAnalysisCosts>(*hlo_cost_analysis_);
TF_ASSIGN_OR_RETURN(cost_analysis_,
CostAnalysis::Create(*hlo_cost_analysis_costs_,
cost_analysis_options_, *module));
TF_ASSIGN_OR_RETURN(alias_analysis_, HloAliasAnalysis::Run(module));
TF_ASSIGN_OR_RETURN(live_range_,
HloLiveRange::Run(module->schedule(), *alias_analysis_,
module->entry_computation()));
return absl::OkStatus();
}
absl::StatusOr<MemoryBoundLoopOptimizer*> CreateOptimizer(
int loop_start, int loop_end, const HloModule* module,
uint64_t alternate_memory_size = 256,
const ReservedScopedMemoryFunction& reserved_scoped_memory_fn =
ReservedScopedMemoryFn) {
TF_RETURN_IF_ERROR(Initialize(module, alternate_memory_size));
MemoryBoundLoopOptimizerOptions optimizer_options;
optimizer_options.set_enabled(true);
optimizer_options.set_desired_copy_ratio(0.7);
optimizer_options.set_allow_unsatisfied_fully_pipelined_prefetch(false);
TF_ASSIGN_OR_RETURN(
optimizer_,
MemoryBoundLoopOptimizer::Create(
loop_start, loop_end, alternate_memory_size, optimizer_options,
*live_range_, *alias_analysis_, *cost_analysis_, SizeFunction,
reserved_scoped_memory_fn));
return optimizer_.get();
}
absl::StatusOr<std::unique_ptr<HloModule>> ParseAndCreateOptimizer(
absl::string_view hlo_loop_str, uint64_t alternate_memory_size,
int& loop_start_idx, MemoryBoundLoopOptimizer** optimizer,
const ReservedScopedMemoryFunction& reserved_scoped_memory_fn =
ReservedScopedMemoryFn) {
int loop_end_idx;
TF_ASSIGN_OR_RETURN(
std::string module_str,
ParseAndCreateModuleString(hlo_loop_str, loop_start_idx, loop_end_idx));
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSIGN_OR_RETURN(
*optimizer,
CreateOptimizer(loop_start_idx, loop_end_idx, module.get(),
alternate_memory_size, reserved_scoped_memory_fn));
return std::move(module);
}
absl::StatusOr<std::string> ParseAndCreateModuleString(
absl::string_view hlo_loop_str, int& loop_start_idx, int& loop_end_idx) {
RE2 op_re("\\$op([0-9]+) += +(\\S+).*");
std::vector<absl::string_view> ops;
std::vector<absl::string_view> op_types;
int begin_pos = 0;
absl::string_view submatch[3];
while (op_re.Match(hlo_loop_str, begin_pos, hlo_loop_str.size(),
RE2::UNANCHORED, submatch, 3)) {
for (int i = 0; i < 3; ++i) {
if (submatch[i].data() == nullptr) {
VLOG(4) << "Submatch[" << i << "] = nullptr";
} else {
VLOG(4) << "Submatch[" << i << "] = " << submatch[i]
<< " (idx: " << (submatch[i].data() - hlo_loop_str.data())
<< ")";
}
}
int op_num;
if (!absl::SimpleAtoi(submatch[1], &op_num)) {
return InvalidArgument("Op name expects to contain a number, found %s.",
submatch[1]);
}
if (op_num != ops.size()) {
return InvalidArgument("Op number expected to be %d found %d.",
op_types.size(), op_num);
}
ops.push_back(submatch[0]);
op_types.push_back(submatch[2]);
begin_pos = submatch[0].data() - hlo_loop_str.data() + submatch[0].size();
}
RE2 param_re("([[:alnum:]]+\\[\\S*\\]) +\\$param([0-9]+)");
std::vector<absl::string_view> param_types;
begin_pos = 0;
while (param_re.Match(hlo_loop_str, begin_pos, hlo_loop_str.size(),
RE2::UNANCHORED, submatch, 3)) {
for (int i = 0; i < 3; ++i) {
if (submatch[i].data() == nullptr) {
VLOG(4) << "Submatch[" << i << "] = nullptr";
} else {
VLOG(4) << "Submatch[" << i << "] = " << submatch[i]
<< " (idx: " << (submatch[i].data() - hlo_loop_str.data())
<< ")";
}
}
int param_num;
if (!absl::SimpleAtoi(submatch[2], ¶m_num)) {
return InvalidArgument(
"Param name expects to contain a number, found %s.", submatch[2]);
}
while (param_num >= param_types.size()) {
param_types.push_back({});
}
param_types[param_num] = submatch[1];
begin_pos = submatch[0].data() - hlo_loop_str.data() + submatch[0].size();
}
RE2 root_re("ROOT \\$root += +tuple\\((.*)\\)");
absl::string_view root_values;
if (root_re.Match(hlo_loop_str, 0, hlo_loop_str.size(), RE2::UNANCHORED,
submatch, 2)) {
for (int i = 0; i < 2; ++i) {
if (submatch[i].data() == nullptr) {
VLOG(4) << "Submatch[" << i << "] = nullptr";
} else {
VLOG(4) << "Submatch[" << i << "] = " << submatch[i]
<< " (idx: " << (submatch[i].data() - hlo_loop_str.data())
<< ")";
}
}
root_values = submatch[1];
}
for (absl::string_view op_type : op_types) {
VLOG(4) << "op_type: " << op_type;
}
for (absl::string_view param_type : param_types) {
VLOG(4) << "param_type: " << param_type;
}
std::string hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY Entry {
)";
int total_instructions = 0;
for (absl::string_view param_prefix : {"prev_", "", "next_"}) {
for (int i = 0; i < param_types.size(); ++i) {
int parameter_number = total_instructions;
absl::StrAppend(&hlo_string, " ", param_prefix, "param", i, " = ",
param_types[i], " parameter(", parameter_number,
")
}
}
for (int i = 0; i < op_types.size(); ++i) {
int parameter_number = total_instructions;
absl::StrAppend(&hlo_string, " ", "prev_prev_op", i, " = ", op_types[i],
" parameter(", parameter_number, ")
total_instructions++, "\n");
}
std::string new_root_values;
auto print_ops =
[&](const std::vector<std::pair<const absl::string_view, std::string>>&
replacements) {
for (int i = 0; i < ops.size(); ++i) {
absl::StrAppend(&hlo_string, " ",
absl::StrReplaceAll(ops[i], replacements), "
total_instructions++, "\n");
}
if (!root_values.empty()) {
absl::StrAppend(&new_root_values,
new_root_values.empty() ? "" : ", ",
absl::StrReplaceAll(root_values, replacements));
}
};
std::vector<std::pair<const absl::string_view, std::string>>
prev_replacements;
prev_replacements.push_back({"$prev_op", "prev_prev_op"});
prev_replacements.push_back({"$op", "prev_op"});
prev_replacements.push_back({"$param", "prev_param"});
absl::StrAppend(&hlo_string, "
print_ops(prev_replacements);
loop_start_idx = total_instructions;
std::vector<std::pair<const absl::string_view, std::string>> replacements;
replacements.push_back({"$", ""});
absl::StrAppend(&hlo_string, "
print_ops(replacements);
loop_end_idx = total_instructions;
std::vector<std::pair<const absl::string_view, std::string>>
next_replacements;
next_replacements.push_back({"$prev_op", "op"});
next_replacements.push_back({"$op", "next_op"});
next_replacements.push_back({"$param", "next_param"});
absl::StrAppend(&hlo_string, "
print_ops(next_replacements);
absl::StrAppend(&hlo_string, " ROOT root = tuple(", new_root_values,
")\n");
absl::StrAppend(&hlo_string, "}");
VLOG(1) << hlo_string;
return hlo_string;
}
absl::StatusOr<std::unique_ptr<PresetAssignments>> RunMsa(
HloModule* module, uint64_t alternate_memory_size = 256) {
options_.max_size_in_bytes = alternate_memory_size;
options_.alignment_in_bytes = 8;
options_.verify = true;
options_.alternate_memory_space = kAlternateMemorySpace;
if (!cost_analysis_) {
TF_RETURN_IF_ERROR(Initialize(module, alternate_memory_size));
}
CostAnalysis::Cache cache;
MemoryBoundednessBufferIntervalComparator comparator(*cost_analysis_,
&cache);
options_.buffer_interval_comparator = &comparator;
CostAnalysisPrefetchIntervalPicker prefetch_interval_picker(
CostAnalysisPrefetchIntervalPicker(
*cost_analysis_, 0.8,
1.5,
10.0,
alternate_memory_size));
options_.prefetch_interval_picker = &prefetch_interval_picker;
auto size_fn = [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), 8);
};
options_.size_fn = size_fn;
auto is_allowed_in_alternate_mem = [](const HloValue& value) {
HloInstruction* instruction = value.instruction();
HloComputation* computation = instruction->parent();
bool in_entry_computation =
(computation == computation->parent()->entry_computation());
if (in_entry_computation &&
instruction->opcode() == HloOpcode::kParameter) {
return false;
}
return true;
};
options_.is_allowed_in_alternate_mem_fn = is_allowed_in_alternate_mem;
options_.max_outstanding_prefetches = -1;
options_.max_outstanding_evictions = -1;
options_.cost_analysis = cost_analysis_.get();
std::unique_ptr<PresetAssignments> preset_assignments =
MemorySpaceAssignment::Run(module, *live_range_, *alias_analysis_,
options_)
.value();
return preset_assignments;
}
absl::Status VerifyMsaEquivalence(
HloModule* module, bool expect_unsupported_allocations = false) {
absl::flat_hash_map<std::pair<int, int>, const Allocation*> allocation_map;
for (const MemoryBoundLoopOptimizer::LoopValue& value :
optimizer_->loop_values()) {
if (!value.IsAllocationTypeSupported()) {
continue;
}
for (const auto& allocation : value.allocations) {
for (const HloUse& use : allocation->uses()) {
absl::string_view inst_name = use.instruction->name();
TF_RET_CHECK(absl::StartsWith(inst_name, "op"));
int inst_number;
TF_RET_CHECK(absl::SimpleAtoi(inst_name.substr(2), &inst_number));
allocation_map[{inst_number, use.operand_number}] = allocation.get();
}
}
}
auto get_inst_prefix_in_iter = [](int iteration) {
switch (iteration) {
case 0:
return "prev_";
case 1:
return "";
case 2:
return "next_";
default:
LOG(FATAL) << "Invalid iteration " << iteration;
return "INVALID";
}
};
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module));
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloLiveRange> live_range,
HloLiveRange::Run(module->schedule(), *alias_analysis,
module->entry_computation()));
const auto& flattened_instructions =
live_range->flattened_instruction_sequence().instructions();
for (int iteration = 1; iteration < 3; ++iteration) {
for (int inst_number = 0; inst_number < optimizer_->loop_size();
++inst_number) {
HloInstruction* inst = FindInstruction(
module, absl::StrCat(get_inst_prefix_in_iter(iteration), "op",
inst_number));
for (int operand_number = 0; operand_number < 2; ++operand_number) {
const HloInstruction* operand = inst->operand(operand_number);
LOG(INFO) << inst->name() << ", operand " << operand_number;
if (!allocation_map.contains({inst_number, operand_number})) {
TF_RET_CHECK(expect_unsupported_allocations);
continue;
}
const Allocation* allocation =
allocation_map.at({inst_number, operand_number});
if (!allocation->is_copy_allocation()) {
EXPECT_NE(operand->opcode(), HloOpcode::kCopyDone);
int expected_memory_space =
allocation->memory_space() == MemorySpace::kDefault
? kDefaultMemorySpace
: kAlternateMemorySpace;
EXPECT_EQ(operand->shape().layout().memory_space(),
expected_memory_space);
} else {
EXPECT_EQ(allocation->memory_space(), MemorySpace::kAlternate);
TF_RET_CHECK(operand->opcode() == HloOpcode::kCopyDone);
const CopyAllocation* copy_allocation =
static_cast<const CopyAllocation*>(allocation);
if (copy_allocation->copy_done_schedule_before() != inst_number) {
EXPECT_NE(allocation->uses().front(),
(HloUse{inst, operand_number}));
continue;
}
int expected_copy_start_iteration = iteration;
if (copy_allocation->copy_start_schedule_after() ==
optimizer_->loop_size() &&
copy_allocation->copy_done_schedule_before() == 0) {
expected_copy_start_iteration -= 2;
} else if (copy_allocation->copy_start_schedule_after() + 1 >=
copy_allocation->copy_done_schedule_before()) {
expected_copy_start_iteration -= 1;
}
if (expected_copy_start_iteration >= 0) {
const HloInstruction* expected_copy_start_schedule_after =
FindInstruction(
module,
absl::StrCat(
get_inst_prefix_in_iter(
expected_copy_start_iteration),
"op", copy_allocation->copy_start_schedule_after()));
LOG(INFO) << "Expected copy start schedule after: "
<< expected_copy_start_schedule_after->name();
const HloInstruction* copy_start = operand->operand(0);
TF_RET_CHECK(copy_start->opcode() == HloOpcode::kCopyStart);
int copy_start_idx =
live_range->instruction_schedule().at(copy_start);
const HloInstruction* copy_start_schedule_after = nullptr;
for (int i = copy_start_idx - 1; i >= 0; --i) {
HloOpcode opcode = flattened_instructions.at(i)->opcode();
if (opcode != HloOpcode::kCopyStart &&
opcode != HloOpcode::kCopyDone &&
opcode != HloOpcode::kGetTupleElement &&
opcode != HloOpcode::kParameter) {
copy_start_schedule_after = flattened_instructions.at(i);
break;
}
}
TF_RET_CHECK(copy_start_schedule_after != nullptr);
EXPECT_EQ(copy_start_schedule_after,
expected_copy_start_schedule_after);
}
}
}
}
}
return absl::OkStatus();
}
private:
Options options_;
CostAnalysisOptions cost_analysis_options_;
std::unique_ptr<HloCostAnalysis> hlo_cost_analysis_;
std::unique_ptr<HloCostAnalysisCosts> hlo_cost_analysis_costs_;
std::unique_ptr<CostAnalysis> cost_analysis_;
std::unique_ptr<HloAliasAnalysis> alias_analysis_;
std::unique_ptr<HloLiveRange> live_range_;
std::unique_ptr<MemoryBoundLoopOptimizer> optimizer_;
};
TEST_F(MemoryBoundLoopOptimizerTest, SimplePrefetch) {
absl::string_view hlo_loop_str = R"(
$op0 = f32[1,4] add(f32[1,4] $prev_op3, f32[1,4] $prev_op4)
$op1 = f32[1,4] add(f32[1,4] $prev_op4, f32[1,4] $op0)
$op2 = f32[1,4] add(f32[1,4] $op0, f32[1,4] $op1)
$op3 = f32[1,4] add(f32[1,4] $op1, f32[1,4] $op2)
$op4 = f32[1,4] add(f32[1,4] $param0, f32[1,4] $op3)
ROOT $root = tuple($op4, $param0)
)";
int loop_start_idx;
MemoryBoundLoopOptimizer* optimizer;
int64_t alternate_memory_size = 64;
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndCreateOptimizer(hlo_loop_str, alternate_memory_size,
loop_start_idx, &optimizer));
optimizer->Optimize();
absl::flat_hash_set<HloUse> seen_uses;
for (const MemoryBoundLoopOptimizer::LoopValue& loop_value :
optimizer->loop_values()) {
LOG(INFO) << loop_value.ToString();
if (loop_value.hlo_values.front()
->defining_position()
.instruction->name() == "param0") {
EXPECT_TRUE(loop_value.allocations.back()->is_copy_allocation());
}
for (const auto& allocation : loop_value.allocations) {
for (const HloUse& use : allocation->uses()) {
EXPECT_FALSE(seen_uses.contains(use)) << use.ToString();
seen_uses.insert(use);
}
}
}
for (absl::string_view inst_name : {"op0", "op1", "op2", "op3", "op4"}) {
HloInstruction* inst =
module->entry_computation()->GetInstructionWithName(inst_name);
EXPECT_TRUE(seen_uses.contains(HloUse{inst, 0})) << inst_name;
EXPECT_TRUE(seen_uses.contains(HloUse{inst, 1})) << inst_name;
}
EXPECT_EQ(optimizer->CalculateExecutionTime(), 1.875);
EXPECT_EQ(optimizer->MaxAlternateMemoryUsed(), alternate_memory_size);
}
TEST_F(MemoryBoundLoopOptimizerTest, ReservedScopedMemory) {
absl::string_view hlo_loop_str = R"(
$op0 = f32[1,4] add(f32[1,4] $prev_op3, f32[1,4] $prev_op4)
$op1 = f32[1,4] add(f32[1,4] $prev_op4, f32[1,4] $op0)
$op2 = f32[1,4] add(f32[1,4] $op0, f32[1,4] $op1)
$op3 = f32[1,4] add(f32[1,4] $op1, f32[1,4] $op2)
$op4 = f32[1,4] add(f32[1,4] $param0, f32[1,4] $op3)
ROOT $root = tuple($op4, $param0)
)";
int loop_start_idx;
MemoryBoundLoopOptimizer* optimizer;
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndCreateOptimizer(
hlo_loop_str,
128, loop_start_idx, &optimizer,
[](const HloInstruction*,
const absl::flat_hash_set<std::pair<int, ShapeIndex>>&,
const absl::flat_hash_set<ShapeIndex>&) { return 128; }));
optimizer->Optimize();
for (const MemoryBoundLoopOptimizer::LoopValue& loop_value :
optimizer->loop_values()) {
LOG(INFO) << "Loop value: " << loop_value.ToString();
for (const auto& allocation : loop_value.allocations) {
ASSERT_NE(static_cast<int64_t>(allocation->memory_space()),
kAlternateMemorySpace);
}
}
}
TEST_F(MemoryBoundLoopOptimizerTest, GetTupleElement) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[1,4] parameter(0)
p1 = f32[1,4] parameter(1)
p2 = f32[1,4] parameter(2)
p3 = f32[1,4] parameter(3)
p4 = f32[1,4] parameter(4)
p5 = f32[1,4] parameter(5)
p6 = f32[1,4] parameter(6)
tupleparam = (f32[1,4], f32[1,4]) parameter(7)
op1 = tanh(p0)
op2 = tanh(p1)
op3 = tanh(op2)
op4 = add(op1, op3)
op5 = tanh(p2)
op6 = tanh(p3)
op7 = tanh(op6)
op8 = add(op5, op7)
op9 = tanh(p4)
op10 = tanh(p5)
op11 = tanh(op10)
op12 = add(op9, op11)
op13 = tanh(p6)
gte = get-tuple-element(tupleparam), index=1
op14 = tanh(gte)
op15 = tanh(op14)
op16 = add(op13, op15)
ROOT root = tuple(tupleparam, op4, op8, op12, op16)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
VLOG(1) << "Original module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
TF_ASSERT_OK_AND_ASSIGN(auto preset_assignments, RunMsa(module.get()));
}
TEST_F(MemoryBoundLoopOptimizerTest, NoAlternateMem) {
absl::string_view hlo_loop_str = R"(
$op0 = f32[1,4] add(f32[1,4] $prev_op3, f32[1,4] $prev_op4)
$op1 = f32[1,4] add(f32[1,4] $prev_op4, f32[1,4] $op0)
$op2 = f32[1,4] add(f32[1,4] $op0, f32[1,4] $op1)
$op3 = f32[1,4] add(f32[1,4] $op1, f32[1,4] $op2)
$op4 = f32[1,4] add(f32[1,4] $param0, f32[1,4] $op3)
ROOT $root = tuple($op4, $param0)
)";
int loop_start_idx;
MemoryBoundLoopOptimizer* optimizer;
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndCreateOptimizer(hlo_loop_str,
0,
loop_start_idx, &optimizer));
optimizer->Optimize();
absl::flat_hash_set<HloUse> seen_uses;
for (const MemoryBoundLoopOptimizer::LoopValue& loop_value :
optimizer->loop_values()) {
LOG(INFO) << loop_value.ToString();
for (const auto& allocation : loop_value.allocations) {
EXPECT_EQ(allocation->memory_space(), MemorySpace::kDefault);
for (const HloUse& use : allocation->uses()) {
EXPECT_FALSE(seen_uses.contains(use)) << use.ToString();
seen_uses.insert(use);
}
}
}
for (absl::string_view inst_name : {"op0", "op1", "op2", "op3", "op4"}) {
HloInstruction* inst =
module->entry_computation()->GetInstructionWithName(inst_name);
EXPECT_TRUE(seen_uses.contains(HloUse{inst, 0})) << inst_name;
EXPECT_TRUE(seen_uses.contains(HloUse{inst, 1})) << inst_name;
}
}
TEST_F(MemoryBoundLoopOptimizerTest, PrefetchFifoOrderWithOverlap) {
absl::string_view hlo_loop_str = R"(
$op0 = f32[1,4] add(f32[1,4] $prev_op13, f32[1,4] $prev_op14)
$op1 = f32[8,4] add(f32[8,4] $param0, f32[8,4] $param1)
$op2 = f32[1,4] add(f32[1,4] $prev_op14, f32[1,4] $op0)
$op3 = f32[1,4] add(f32[1,4] $op0, f32[1,4] $op2)
$op4 = f32[1,4] add(f32[1,4] $op2, f32[1,4] $op3)
$op5 = f32[1,4] add(f32[1,4] $op3, f32[1,4] $op4)
$op6 = f32[1,4] add(f32[1,4] $op4, f32[1,4] $op5)
$op7 = f32[1,4] add(f32[1,4] $op5, f32[1,4] $op6)
$op8 = f32[1,4] add(f32[1,4] $op6, f32[1,4] $op7)
$op9 = f32[1,4] add(f32[1,4] $op7, f32[1,4] $op8)
$op10 = f32[1,4] add(f32[1,4] $op8, f32[1,4] $op9)
$op11 = f32[1,4] add(f32[1,4] $op9, f32[1,4] $op10)
$op12 = f32[1,4] add(f32[1,4] $op10, f32[1,4] $op11)
$op13 = f32[1,4] add(f32[1,4] $op11, f32[1,4] $op12)
$op14 = f32[1,4] add(f32[1,4] $param2, f32[1,4] $op13)
)";
int loop_start_idx;
MemoryBoundLoopOptimizer* optimizer;
int64_t alternate_memory_size = 432;
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndCreateOptimizer(hlo_loop_str, alternate_memory_size,
loop_start_idx, &optimizer));
optimizer->Optimize();
std::vector<const CopyAllocation*> prefetches;
for (const MemoryBoundLoopOptimizer::LoopValue& loop_value :
optimizer->loop_values()) {
if (!loop_value.allocations.empty() &&
loop_value.allocations.back()->is_copy_allocation()) {
prefetches.push_back(static_cast<const CopyAllocation*>(
loop_value.allocations.back().get()));
}
}
EXPECT_EQ(prefetches.size(), 3);
bool seen_overlap = false;
bool seen_nonoverlap = false;
for (const CopyAllocation* prefetch : prefetches) {
const HloUse& use = *prefetch->uses().begin();
if (use.instruction->name() == "op14") {
EXPECT_EQ(prefetch->copy_done_schedule_before(), 14);
EXPECT_EQ(prefetch->copy_start_schedule_after(), 0);
} else {
ASSERT_EQ(use.instruction->name(), "op1");
EXPECT_EQ(prefetch->copy_done_schedule_before(), 1);
if (prefetch->copy_start_schedule_after() == 0) {
EXPECT_FALSE(seen_overlap);
seen_overlap = true;
} else {
EXPECT_GT(prefetch->copy_start_schedule_after(), 1);
EXPECT_FALSE(seen_nonoverlap);
seen_nonoverlap = true;
}
}
}
EXPECT_EQ(optimizer->CalculateExecutionTime(), 12.5);
const std::vector<int64_t>& remaining_memory = optimizer->remaining_memory();
EXPECT_EQ(remaining_memory.at(0),
alternate_memory_size - (3 * 16 + 128 + 128));
EXPECT_EQ(remaining_memory.at(1),
alternate_memory_size - (2 * 16 + 2 * 128 + 128 + 16));
EXPECT_EQ(remaining_memory.at(2),
alternate_memory_size - (3 * 16 + 128 + 16));
EXPECT_EQ(remaining_memory.at(3),
alternate_memory_size - (3 * 16 + 128 + 16));
for (int i = 4; i <= 13; ++i) {
EXPECT_EQ(remaining_memory.at(i),
alternate_memory_size - (3 * 16 + 128 + 128 + 16));
}
EXPECT_EQ(remaining_memory.at(14),
alternate_memory_size - (2 * 16 + 128 + 128 + 16));
EXPECT_EQ(optimizer->MaxAlternateMemoryUsed(), alternate_memory_size);
}
TEST_F(MemoryBoundLoopOptimizerTest, PrefetchFifoOrderWithoutOverlap) {
absl::string_view hlo_loop_str = R"(
$op0 = f32[1,4] add(f32[1,4] $prev_op13, f32[1,4] $prev_op14)
$op1 = f32[8,4] add(f32[8,4] $param0, f32[8,4] $param1)
$op2 = f32[1,4] add(f32[1,4] $prev_op14, f32[1,4] $op0)
$op3 = f32[1,4] add(f32[1,4] $op0, f32[1,4] $op2)
$op4 = f32[1,4] add(f32[1,4] $op2, f32[1,4] $op3)
$op5 = f32[1,4] add(f32[1,4] $op3, f32[1,4] $op4)
$op6 = f32[1,4] add(f32[1,4] $op4, f32[1,4] $op5)
$op7 = f32[1,4] add(f32[1,4] $op5, f32[1,4] $op6)
$op8 = f32[1,4] add(f32[1,4] $op6, f32[1,4] $op7)
$op9 = f32[1,4] add(f32[1,4] $op7, f32[1,4] $op8)
$op10 = f32[1,4] add(f32[1,4] $op8, f32[1,4] $op9)
$op11 = f32[1,4] add(f32[1,4] $op9, f32[1,4] $op10)
$op12 = f32[1,4] add(f32[1,4] $op10, f32[1,4] $op11)
$op13 = f32[1,4] add(f32[1,4] $op11, f32[1,4] $op12)
$op14 = f32[1,4] add(f32[1,4] $param2, f32[1,4] $op13)
)";
int loop_start_idx;
MemoryBoundLoopOptimizer* optimizer;
int64_t alternate_memory_size = 192;
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndCreateOptimizer(hlo_loop_str, alternate_memory_size,
loop_start_idx, &optimizer));
optimizer->Optimize();
std::vector<const CopyAllocation*> prefetches;
for (const MemoryBoundLoopOptimizer::LoopValue& loop_value :
optimizer->loop_values()) {
if (!loop_value.allocations.empty() &&
loop_value.allocations.back()->is_copy_allocation()) {
prefetches.push_back(static_cast<const CopyAllocation*>(
loop_value.allocations.back().get()));
}
}
EXPECT_EQ(prefetches.size(), 2);
std::optional<int> expected_op14_copy_start_time;
for (const CopyAllocation* prefetch : prefetches) {
const HloUse& use = *prefetch->uses().begin();
if (use.instruction->name() == "op1") {
EXPECT_EQ(prefetch->copy_done_schedule_before(), 1);
EXPECT_GT(prefetch->copy_start_schedule_after(), 1);
expected_op14_copy_start_time = prefetch->copy_start_schedule_after();
}
}
EXPECT_TRUE(expected_op14_copy_start_time.has_value());
for (const CopyAllocation* prefetch : prefetches) {
const HloUse& use = *prefetch->uses().begin();
if (use.instruction->name() == "op14") {
EXPECT_EQ(prefetch->copy_done_schedule_before(), 14);
EXPECT_EQ(prefetch->copy_start_schedule_after(),
*expected_op14_copy_start_time);
}
}
EXPECT_GT(optimizer->CalculateExecutionTime(), 12.5);
EXPECT_EQ(optimizer->MaxAlternateMemoryUsed(), alternate_memory_size);
}
TEST_F(MemoryBoundLoopOptimizerTest, PrefetchFifoOrderWithOverlap2) {
absl::string_view hlo_loop_str = R"(
$op0 = f32[8,4] add(f32[8,4] $param0, f32[8,4] $param1)
$op1 = f32[1,4] add(f32[1,4] $prev_op13, f32[1,4] $prev_op14)
$op2 = f32[1,4] add(f32[1,4] $prev_op14, f32[1,4] $op1)
$op3 = f32[1,4] add(f32[1,4] $op1, f32[1,4] $op2)
$op4 = f32[1,4] add(f32[1,4] $op2, f32[1,4] $op3)
$op5 = f32[1,4] add(f32[1,4] $op3, f32[1,4] $op4)
$op6 = f32[1,4] add(f32[1,4] $op4, f32[1,4] $op5)
$op7 = f32[1,4] add(f32[1,4] $op5, f32[1,4] $op6)
$op8 = f32[1,4] add(f32[1,4] $op6, f32[1,4] $op7)
$op9 = f32[1,4] add(f32[1,4] $op7, f32[1,4] $op8)
$op10 = f32[1,4] add(f32[1,4] $op8, f32[1,4] $op9)
$op11 = f32[1,4] add(f32[1,4] $op9, f32[1,4] $op10)
$op12 = f32[1,4] add(f32[1,4] $op10, f32[1,4] $op11)
$op13 = f32[1,4] add(f32[1,4] $param2, f32[1,4] $op12)
$op14 = f32[1,4] add(f32[1,4] $op12, f32[1,4] $op13)
)";
int loop_start_idx;
MemoryBoundLoopOptimizer* optimizer;
int64_t alternate_memory_size = 432;
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndCreateOptimizer(hlo_loop_str, alternate_memory_size,
loop_start_idx, &optimizer));
optimizer->Optimize();
std::vector<const CopyAllocation*> prefetches;
for (const MemoryBoundLoopOptimizer::LoopValue& loop_value :
optimizer->loop_values()) {
if (!loop_value.allocations.empty() &&
loop_value.allocations.back()->is_copy_allocation()) {
prefetches.push_back(static_cast<const CopyAllocation*>(
loop_value.allocations.back().get()));
}
}
EXPECT_EQ(prefetches.size(), 3);
bool seen_overlap = false;
bool seen_nonoverlap = false;
for (const CopyAllocation* prefetch : prefetches) {
const HloUse& use = *prefetch->uses().begin();
if (use.instruction->name() == "op13") {
EXPECT_EQ(prefetch->copy_done_schedule_before(), 13);
EXPECT_EQ(prefetch->copy_start_schedule_after(), 14);
} else {
ASSERT_EQ(use.instruction->name(), "op0");
EXPECT_EQ(prefetch->copy_done_schedule_before(), 0);
if (prefetch->copy_start_schedule_after() == 14) {
EXPECT_FALSE(seen_overlap);
seen_overlap = true;
} else {
EXPECT_LT(prefetch->copy_start_schedule_after(), 14);
EXPECT_FALSE(seen_nonoverlap);
seen_nonoverlap = true;
}
}
}
EXPECT_EQ(optimizer->CalculateExecutionTime(), 12.5);
EXPECT_EQ(optimizer->MaxAlternateMemoryUsed(), alternate_memory_size);
}
TEST_F(MemoryBoundLoopOptimizerTest, OptimizerEndToEnd) {
absl::string_view hlo_loop_str = R"(
$op0 = f32[1,4] add(f32[1,4] $prev_op13, f32[1,4] $prev_op14)
$op1 = f32[8,4] add(f32[8,4] $param0, f32[8,4] $param1)
$op2 = f32[1,4] add(f32[1,4] $prev_op14, f32[1,4] $op0)
$op3 = f32[1,4] add(f32[1,4] $op0, f32[1,4] $op2)
$op4 = f32[1,4] add(f32[1,4] $op2, f32[1,4] $op3)
$op5 = f32[1,4] add(f32[1,4] $op3, f32[1,4] $op4)
$op6 = f32[1,4] add(f32[1,4] $op4, f32[1,4] $op5)
$op7 = f32[1,4] add(f32[1,4] $op5, f32[1,4] $op6)
$op8 = f32[1,4] add(f32[1,4] $op6, f32[1,4] $op7)
$op9 = f32[1,4] add(f32[1,4] $op7, f32[1,4] $op8)
$op10 = f32[1,4] add(f32[1,4] $op8, f32[1,4] $op9)
$op11 = f32[1,4] add(f32[1,4] $op9, f32[1,4] $op10)
$op12 = f32[1,4] add(f32[1,4] $op10, f32[1,4] $op11)
$op13 = f32[1,4] add(f32[1,4] $op11, f32[1,4] $op12)
$op14 = f32[1,4] add(f32[1,4] $param2, f32[1,4] $op13)
ROOT $root = tuple($op1, $op14)
)";
int loop_start_idx;
MemoryBoundLoopOptimizer* optimizer;
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndCreateOptimizer(hlo_loop_str,
1024,
loop_start_idx, &optimizer));
optimizer->Optimize();
TF_ASSERT_OK_AND_ASSIGN(auto preset_assignments,
RunMsa(module.get(), 1024));
TF_ASSERT_OK(VerifyMsaEquivalence(module.get()));
}
TEST_F(MemoryBoundLoopOptimizerTest, OptimizerEndToEndUnsupportedAllocation) {
absl::string_view hlo_loop_str = R"(
$op0 = f32[1,4] add(f32[1,4] $prev_op3, f32[1,4] $prev_op4)
$op1 = f32[8,4] add(f32[8,4] $param0, f32[8,4] $param1)
$op2 = f32[1,4] add(f32[1,4] $prev_op2, f32[1,4] $op0)
$op3 = f32[1,4] add(f32[1,4] $op0, f32[1,4] $op2)
$op4 = f32[1,4] add(f32[1,4] $op2, f32[1,4] $op3)
ROOT $root = tuple($op1, $op4)
)";
int loop_start_idx;
MemoryBoundLoopOptimizer* optimizer;
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndCreateOptimizer(hlo_loop_str,
1024,
loop_start_idx, &optimizer));
optimizer->Optimize();
TF_ASSERT_OK_AND_ASSIGN(auto preset_assignments,
RunMsa(module.get(), 1024));
TF_ASSERT_OK(VerifyMsaEquivalence(module.get(),
true));
const HloInstruction* op2 = FindInstruction(module.get(), "op2");
EXPECT_EQ(op2->shape().layout().memory_space(), kAlternateMemorySpace);
}
TEST_F(MemoryBoundLoopOptimizerTest, TempAndPinnedAllocations) {
absl::string_view hlo_str = R"(
HloModule module, is_scheduled=true
while_cond {
while_cond_param = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) parameter(0)
ROOT p = pred[] get-tuple-element(while_cond_param), index=5
}
while_body {
while_body_param = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) parameter(0)
pinned_prev_param0 = f32[1,4] get-tuple-element(while_body_param), index=0
next_param0 = f32[1,4] get-tuple-element(while_body_param), index=1
prev_prev_op3 = f32[1,4] get-tuple-element(while_body_param), index=2
prev_prev_op4 = f32[1,4] get-tuple-element(while_body_param), index=3
prev_op0 = f32[1,4] add(f32[1,4] prev_prev_op3, f32[1,4] prev_prev_op4)
prev_op1 = f32[1,4] add(f32[1,4] prev_prev_op4, f32[1,4] prev_op0)
prev_op2 = f32[1,4] add(f32[1,4] prev_op0, f32[1,4] prev_op1)
prev_op3 = f32[1,4] add(f32[1,4] prev_op1, f32[1,4] prev_op2)
prev_op4 = f32[1,4] multiply(f32[1,4] pinned_prev_param0, f32[1,4] prev_op3)
op0 = f32[1,4] add(f32[1,4] prev_op3, f32[1,4] prev_op4)
op1 = f32[1,4] add(f32[1,4] prev_op4, f32[1,4] op0)
op2 = f32[1,4] add(f32[1,4] op0, f32[1,4] op1)
op3 = f32[1,4] add(f32[1,4] op1, f32[1,4] op2)
op4 = f32[1,4] multiply(f32[1,4] pinned_prev_param0, f32[1,4] op3)
next_op0 = f32[1,4] add(f32[1,4] op3, f32[1,4] op4)
next_op1 = f32[1,4] add(f32[1,4] op4, f32[1,4] next_op0)
next_op2 = f32[1,4] add(f32[1,4] next_op0, f32[1,4] next_op1)
next_op3 = f32[1,4] add(f32[1,4] next_op1, f32[1,4] next_op2)
next_op4 = f32[1,4] multiply(f32[1,4] pinned_prev_param0, f32[1,4] next_op3)
p = pred[] get-tuple-element(while_body_param), index=5
ROOT root = tuple(pinned_prev_param0, next_param0, prev_prev_op3, prev_prev_op4, next_op4, p)
}
ENTRY entry {
p0 = f32[1,4] parameter(0)
p1 = f32[1,4] parameter(1)
p2 = f32[1,4] parameter(2)
p3 = f32[1,4] parameter(3)
p4 = pred[] parameter(4)
copy = f32[1,4] copy(p3)
tuple = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) tuple(p0, p1, p2, p3, copy, p4)
while = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) while(tuple), condition=while_cond, body=while_body
ROOT root = f32[1,4] get-tuple-element(while), index=4
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_str));
int64_t alternate_memory_size = 64;
TF_ASSERT_OK_AND_ASSIGN(
auto optimizer,
CreateOptimizer(19, 24, module.get(), alternate_memory_size));
optimizer->Optimize();
const std::vector<int64_t>& remaining_memory = optimizer->remaining_memory();
EXPECT_EQ(remaining_memory.at(0), alternate_memory_size - (3 * 16 + 16));
EXPECT_EQ(remaining_memory.at(1), alternate_memory_size - (3 * 16 + 16));
EXPECT_EQ(remaining_memory.at(2), alternate_memory_size - (3 * 16 + 16));
EXPECT_EQ(remaining_memory.at(3), alternate_memory_size - (3 * 16 + 16));
EXPECT_EQ(remaining_memory.at(4), alternate_memory_size - (2 * 16 + 16));
EXPECT_EQ(optimizer->MaxAlternateMemoryUsed(), alternate_memory_size);
}
TEST_F(MemoryBoundLoopOptimizerTest, NegativeSavingNotPinned) {
absl::string_view hlo_str = R"(
HloModule module, is_scheduled=true
while_cond {
while_cond_param = (f32[28,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) parameter(0)
ROOT p = pred[] get-tuple-element(while_cond_param), index=5
}
while_body {
while_body_param = (f32[28,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) parameter(0)
pinned_prev_param0 = f32[28,4] get-tuple-element(while_body_param), index=0
zero = s32[] constant(0)
next_param0 = f32[1,4] get-tuple-element(while_body_param), index=1
prev_prev_op3 = f32[1,4] get-tuple-element(while_body_param), index=2
prev_prev_op4 = f32[1,4] get-tuple-element(while_body_param), index=3
prev_op0 = f32[1,4] add(f32[1,4] prev_prev_op3, f32[1,4] prev_prev_op4)
prev_op1 = f32[1,4] add(f32[1,4] prev_prev_op4, f32[1,4] prev_op0)
prev_op2 = f32[1,4] add(f32[1,4] prev_op0, f32[1,4] prev_op1)
prev_op3 = f32[1,4] add(f32[1,4] prev_op1, f32[1,4] prev_op2)
pinned_slice = f32[1,4] dynamic-slice(pinned_prev_param0, zero, zero), dynamic_slice_sizes={1,4}
prev_op4 = f32[1,4] multiply(f32[1,4] pinned_slice, f32[1,4] prev_op3)
op0 = f32[1,4] add(f32[1,4] prev_op3, f32[1,4] prev_op4)
op1 = f32[1,4] add(f32[1,4] prev_op4, f32[1,4] op0)
op2 = f32[1,4] add(f32[1,4] op0, f32[1,4] op1)
op3 = f32[1,4] add(f32[1,4] op1, f32[1,4] op2)
pinned_slice2 = f32[1,4] dynamic-slice(pinned_prev_param0, zero, zero), dynamic_slice_sizes={1,4}
op4 = f32[1,4] multiply(f32[1,4] pinned_slice2, f32[1,4] op3)
next_op0 = f32[1,4] add(f32[1,4] op3, f32[1,4] op4)
next_op1 = f32[1,4] add(f32[1,4] op4, f32[1,4] next_op0)
next_op2 = f32[1,4] add(f32[1,4] next_op0, f32[1,4] next_op1)
next_op3 = f32[1,4] add(f32[1,4] next_op1, f32[1,4] next_op2)
pinned_slice3 = f32[1,4] dynamic-slice(pinned_prev_param0, zero, zero), dynamic_slice_sizes={1,4}
next_op4 = f32[1,4] multiply(f32[1,4] pinned_slice3, f32[1,4] next_op3)
p = pred[] get-tuple-element(while_body_param), index=5
ROOT root = tuple(pinned_prev_param0, next_param0, prev_prev_op3, prev_prev_op4, next_op4, p)
}
ENTRY entry {
p0 = f32[28,4] parameter(0)
p1 = f32[1,4] parameter(1)
p2 = f32[1,4] parameter(2)
p3 = f32[1,4] parameter(3)
p4 = pred[] parameter(4)
copy = f32[1,4] copy(p3)
tuple = (f32[28,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) tuple(p0, p1, p2, p3, copy, p4)
while = (f32[28,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) while(tuple), condition=while_cond, body=while_body
ROOT root = f32[1,4] get-tuple-element(while), index=4
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_str));
int64_t alternate_memory_size = 52;
TF_ASSERT_OK_AND_ASSIGN(
auto optimizer,
CreateOptimizer(21, 27, module.get(), alternate_memory_size));
optimizer->Optimize();
const std::vector<int64_t>& remaining_memory = optimizer->remaining_memory();
EXPECT_EQ(remaining_memory.at(0), alternate_memory_size - (3 * 16 + 4));
EXPECT_EQ(optimizer->MaxAlternateMemoryUsed(), alternate_memory_size);
}
TEST_F(MemoryBoundLoopOptimizerTest, OptimizerEndToEndWhileLoop) {
absl::string_view hlo_str = R"(
HloModule module, is_scheduled=true
while_cond {
while_cond_param = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) parameter(0)
ROOT p = pred[] get-tuple-element(while_cond_param), index=6
}
while_body {
while_body_param = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) parameter(0)
prev_param0 = f32[1,4] get-tuple-element(while_body_param), index=0
param0 = f32[1,4] get-tuple-element(while_body_param), index=1
next_param0 = f32[1,4] get-tuple-element(while_body_param), index=2
prev_prev_op3 = f32[1,4] get-tuple-element(while_body_param), index=3
prev_prev_op4 = f32[1,4] get-tuple-element(while_body_param), index=4
prev_op0 = f32[1,4] add(f32[1,4] prev_prev_op3, f32[1,4] prev_prev_op4)
prev_op1 = f32[1,4] add(f32[1,4] prev_prev_op4, f32[1,4] prev_op0)
prev_op2 = f32[1,4] add(f32[1,4] prev_op0, f32[1,4] prev_op1)
prev_op3 = f32[1,4] add(f32[1,4] prev_op1, f32[1,4] prev_op2)
prev_op4 = f32[1,4] multiply(f32[1,4] prev_param0, f32[1,4] prev_op3)
op0 = f32[1,4] add(f32[1,4] prev_op3, f32[1,4] prev_op4)
op1 = f32[1,4] add(f32[1,4] prev_op4, f32[1,4] op0)
op2 = f32[1,4] add(f32[1,4] op0, f32[1,4] op1)
op3 = f32[1,4] add(f32[1,4] op1, f32[1,4] op2)
op4 = f32[1,4] multiply(f32[1,4] param0, f32[1,4] op3)
next_op0 = f32[1,4] add(f32[1,4] op3, f32[1,4] op4)
next_op1 = f32[1,4] add(f32[1,4] op4, f32[1,4] next_op0)
next_op2 = f32[1,4] add(f32[1,4] next_op0, f32[1,4] next_op1)
next_op3 = f32[1,4] add(f32[1,4] next_op1, f32[1,4] next_op2)
next_op4 = f32[1,4] multiply(f32[1,4] next_param0, f32[1,4] next_op3)
p = pred[] get-tuple-element(while_body_param), index=6
ROOT root = tuple(prev_param0, param0, next_param0, prev_prev_op3, prev_prev_op4, next_op4, p)
}
ENTRY entry {
p0 = f32[1,4] parameter(0)
p1 = f32[1,4] parameter(1)
p2 = f32[1,4] parameter(2)
p3 = f32[1,4] parameter(3)
p4 = f32[1,4] parameter(4)
p5 = pred[] parameter(5)
copy = f32[1,4] copy(p4)
tuple = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) tuple(p0, p1, p2, p3, p4, copy, p5)
while = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) while(tuple), condition=while_cond, body=while_body
ROOT root = f32[1,4] get-tuple-element(while), index=5
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_str));
TF_ASSERT_OK_AND_ASSIGN(auto preset_assignments,
RunMsa(module.get(), 512));
TF_ASSERT_OK_AND_ASSIGN(auto alias_analysis,
HloAliasAnalysis::Run(module.get()));
TF_ASSERT_OK_AND_ASSIGN(auto hlo_live_range,
HloLiveRange::Run(module->schedule(), *alias_analysis,
module->entry_computation()));
const HloInstruction* prev_copy_done =
FindInstruction(module.get(), "prev_op4")->operand(0);
const HloInstruction* copy_done =
FindInstruction(module.get(), "op4")->operand(0);
const HloInstruction* next_copy_done =
FindInstruction(module.get(), "next_op4")->operand(0);
ASSERT_EQ(prev_copy_done->opcode(), HloOpcode::kCopyDone);
ASSERT_EQ(copy_done->opcode(), HloOpcode::kCopyDone);
ASSERT_EQ(next_copy_done->opcode(), HloOpcode::kCopyDone);
EXPECT_EQ(prev_copy_done->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_EQ(copy_done->shape().layout().memory_space(), kAlternateMemorySpace);
EXPECT_EQ(next_copy_done->shape().layout().memory_space(),
kAlternateMemorySpace);
auto prefetch_distance = [&](const HloInstruction* copy_done) {
return hlo_live_range->instruction_schedule().at(copy_done) -
hlo_live_range->instruction_schedule().at(copy_done->operand(0));
};
EXPECT_EQ(prefetch_distance(prev_copy_done), prefetch_distance(copy_done));
EXPECT_EQ(prefetch_distance(next_copy_done), prefetch_distance(copy_done));
}
TEST_F(MemoryBoundLoopOptimizerTest, OptimizerEndToEndNestedWhileLoopBug) {
absl::string_view hlo_str = R"(
HloModule module, is_scheduled=true
prev_while_cond {
prev_while_cond_param = (f32[1,4], pred[]) parameter(0)
ROOT p = pred[] get-tuple-element(prev_while_cond_param), index=1
}
prev_while_body {
prev_while_body_param = (f32[1,4], pred[]) parameter(0)
prev_while_body_gte = f32[1,4] get-tuple-element(prev_while_body_param), index=0
prev_while_body_pred = pred[] get-tuple-element(prev_while_body_param), index=1
prev_while_body_op = f32[1,4] negate(prev_while_body_gte)
ROOT prev_while_body_root = (f32[1,4], pred[]) tuple(prev_while_body_op, prev_while_body_pred)
}
current_while_cond {
current_while_cond_param = (f32[1,4], pred[]) parameter(0)
ROOT p = pred[] get-tuple-element(current_while_cond_param), index=1
}
current_while_body {
current_while_body_param = (f32[1,4], pred[]) parameter(0)
current_while_body_gte = f32[1,4] get-tuple-element(current_while_body_param), index=0
current_while_body_pred = pred[] get-tuple-element(current_while_body_param), index=1
current_while_body_op = f32[1,4] negate(current_while_body_gte)
ROOT current_while_body_root = (f32[1,4], pred[]) tuple(current_while_body_op, current_while_body_pred)
}
next_while_cond {
next_while_cond_param = (f32[1,4], pred[]) parameter(0)
ROOT p = pred[] get-tuple-element(next_while_cond_param), index=1
}
next_while_body {
next_while_body_param = (f32[1,4], pred[]) parameter(0)
next_while_body_gte = f32[1,4] get-tuple-element(next_while_body_param), index=0
next_while_body_pred = pred[] get-tuple-element(next_while_body_param), index=1
next_while_body_op = f32[1,4] negate(next_while_body_gte)
ROOT next_while_body_root = (f32[1,4], pred[]) tuple(next_while_body_op, next_while_body_pred)
}
while_cond {
while_cond_param = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) parameter(0)
ROOT p = pred[] get-tuple-element(while_cond_param), index=6
}
while_body {
while_body_param = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) parameter(0)
prev_param0 = f32[1,4] get-tuple-element(while_body_param), index=0
param0 = f32[1,4] get-tuple-element(while_body_param), index=1
next_param0 = f32[1,4] get-tuple-element(while_body_param), index=2
prev_prev_op3 = f32[1,4] get-tuple-element(while_body_param), index=3
prev_prev_op4 = f32[1,4] get-tuple-element(while_body_param), index=4
while_pred = pred[] get-tuple-element(while_body_param), index=6
prev_op0 = f32[1,4] add(f32[1,4] prev_prev_op3, f32[1,4] prev_prev_op4)
prev_op1 = f32[1,4] add(f32[1,4] prev_prev_op4, f32[1,4] prev_op0)
prev_op2 = f32[1,4] add(f32[1,4] prev_op0, f32[1,4] prev_op1)
prev_op3 = f32[1,4] add(f32[1,4] prev_op1, f32[1,4] prev_op2)
prev_tuple = (f32[1,4], pred[]) tuple(prev_op3, while_pred)
prev_while = (f32[1,4], pred[]) while(prev_tuple), condition=prev_while_cond, body=prev_while_body
prev_gte = f32[1,4] get-tuple-element(prev_while), index=0
prev_op4 = f32[1,4] multiply(f32[1,4] prev_param0, f32[1,4] prev_gte)
op0 = f32[1,4] add(f32[1,4] prev_op3, f32[1,4] prev_op4)
op1 = f32[1,4] add(f32[1,4] prev_op4, f32[1,4] op0)
op2 = f32[1,4] add(f32[1,4] op0, f32[1,4] op1)
op3 = f32[1,4] add(f32[1,4] op1, f32[1,4] op2)
current_tuple = (f32[1,4], pred[]) tuple(op3, while_pred)
current_while = (f32[1,4], pred[]) while(current_tuple), condition=current_while_cond, body=current_while_body
current_gte = f32[1,4] get-tuple-element(current_while), index=0
op4 = f32[1,4] multiply(f32[1,4] param0, f32[1,4] current_gte)
next_op0 = f32[1,4] add(f32[1,4] op3, f32[1,4] op4)
next_op1 = f32[1,4] add(f32[1,4] op4, f32[1,4] next_op0)
next_op2 = f32[1,4] add(f32[1,4] next_op0, f32[1,4] next_op1)
next_op3 = f32[1,4] add(f32[1,4] next_op1, f32[1,4] next_op2)
next_tuple = (f32[1,4], pred[]) tuple(next_op3, while_pred)
next_while = (f32[1,4], pred[]) while(next_tuple), condition=next_while_cond, body=next_while_body
next_gte = f32[1,4] get-tuple-element(next_while), index=0
next_op4 = f32[1,4] multiply(f32[1,4] next_param0, f32[1,4] next_gte)
ROOT root = tuple(prev_param0, param0, next_param0, prev_prev_op3, prev_prev_op4, next_op4, while_pred)
}
ENTRY entry {
p0 = f32[1,4] parameter(0)
p1 = f32[1,4] parameter(1)
p2 = f32[1,4] parameter(2)
p3 = f32[1,4] parameter(3)
p4 = f32[1,4] parameter(4)
p5 = pred[] parameter(5)
copy = f32[1,4] copy(p4)
tuple = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) tuple(p0, p1, p2, p3, p4, copy, p5)
while = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) while(tuple), condition=while_cond, body=while_body
ROOT root = f32[1,4] get-tuple-element(while), index=5
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_str));
TF_ASSERT_OK_AND_ASSIGN(auto preset_assignments,
RunMsa(module.get(), 512));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_assignment/memory_bound_loop_optimizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_assignment/memory_bound_loop_optimizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
53563334-ed7d-4388-a521-27a3effba8b4 | cpp | tensorflow/tensorflow | algorithm | tensorflow/core/graph/algorithm.cc | tensorflow/core/graph/algorithm_test.cc | #include "tensorflow/core/graph/algorithm.h"
#include <algorithm>
#include <deque>
#include <vector>
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace {
template <typename T>
void DFSFromHelper(const Graph& g, gtl::ArraySlice<T> start,
const std::function<void(T)>& enter,
const std::function<void(T)>& leave,
const NodeComparator& stable_comparator,
const EdgeFilter& edge_filter) {
struct Work {
T node;
bool leave;
};
std::vector<Work> stack(start.size());
for (int i = 0; i < start.size(); ++i) {
stack[i] = Work{start[i], false};
}
std::vector<bool> visited(g.num_node_ids(), false);
while (!stack.empty()) {
Work w = stack.back();
stack.pop_back();
T n = w.node;
if (w.leave) {
leave(n);
continue;
}
if (visited[n->id()]) continue;
visited[n->id()] = true;
if (enter) enter(n);
if (leave) stack.push_back(Work{n, true});
auto add_work = [&visited, &stack](Node* out) {
if (!visited[out->id()]) {
stack.push_back(Work{out, false});
}
};
if (stable_comparator) {
std::vector<Node*> nodes_sorted;
for (const Edge* out_edge : n->out_edges()) {
if (!edge_filter || edge_filter(*out_edge)) {
nodes_sorted.emplace_back(out_edge->dst());
}
}
std::sort(nodes_sorted.begin(), nodes_sorted.end(), stable_comparator);
for (Node* out : nodes_sorted) {
add_work(out);
}
} else {
for (const Edge* out_edge : n->out_edges()) {
if (!edge_filter || edge_filter(*out_edge)) {
add_work(out_edge->dst());
}
}
}
}
}
}
void DFS(const Graph& g, const std::function<void(Node*)>& enter,
const std::function<void(Node*)>& leave,
const NodeComparator& stable_comparator,
const EdgeFilter& edge_filter) {
DFSFromHelper(g, {g.source_node()}, enter, leave, stable_comparator,
edge_filter);
}
void DFSFrom(const Graph& g, absl::Span<Node* const> start,
const std::function<void(Node*)>& enter,
const std::function<void(Node*)>& leave,
const NodeComparator& stable_comparator,
const EdgeFilter& edge_filter) {
DFSFromHelper(g, start, enter, leave, stable_comparator, edge_filter);
}
void DFSFrom(const Graph& g, absl::Span<const Node* const> start,
const std::function<void(const Node*)>& enter,
const std::function<void(const Node*)>& leave,
const NodeComparator& stable_comparator,
const EdgeFilter& edge_filter) {
DFSFromHelper(g, start, enter, leave, stable_comparator, edge_filter);
}
void ReverseDFS(const Graph& g, const std::function<void(Node*)>& enter,
const std::function<void(Node*)>& leave,
const NodeComparator& stable_comparator,
const EdgeFilter& edge_filter) {
ReverseDFSFrom(g, {g.sink_node()}, enter, leave, stable_comparator,
edge_filter);
}
namespace {
template <typename T>
void ReverseDFSFromHelper(const Graph& g, gtl::ArraySlice<T> start,
const std::function<void(T)>& enter,
const std::function<void(T)>& leave,
const NodeComparator& stable_comparator,
const EdgeFilter& edge_filter) {
struct Work {
T node;
bool leave;
};
std::vector<Work> stack(start.size());
for (int i = 0; i < start.size(); ++i) {
stack[i] = Work{start[i], false};
}
std::vector<bool> visited(g.num_node_ids(), false);
while (!stack.empty()) {
Work w = stack.back();
stack.pop_back();
T n = w.node;
if (w.leave) {
leave(n);
continue;
}
if (visited[n->id()]) continue;
visited[n->id()] = true;
if (enter) enter(n);
if (leave) stack.push_back(Work{n, true});
auto add_work = [&visited, &stack](T out) {
if (!visited[out->id()]) {
stack.push_back(Work{out, false});
}
};
if (stable_comparator) {
std::vector<T> nodes_sorted;
for (const Edge* in_edge : n->in_edges()) {
if (!edge_filter || edge_filter(*in_edge)) {
nodes_sorted.emplace_back(in_edge->src());
}
}
std::sort(nodes_sorted.begin(), nodes_sorted.end(), stable_comparator);
for (T in : nodes_sorted) {
add_work(in);
}
} else {
for (const Edge* in_edge : n->in_edges()) {
if (!edge_filter || edge_filter(*in_edge)) {
add_work(in_edge->src());
}
}
}
}
}
}
void ReverseDFSFrom(const Graph& g, absl::Span<const Node* const> start,
const std::function<void(const Node*)>& enter,
const std::function<void(const Node*)>& leave,
const NodeComparator& stable_comparator,
const EdgeFilter& edge_filter) {
ReverseDFSFromHelper(g, start, enter, leave, stable_comparator, edge_filter);
}
void ReverseDFSFrom(const Graph& g, absl::Span<Node* const> start,
const std::function<void(Node*)>& enter,
const std::function<void(Node*)>& leave,
const NodeComparator& stable_comparator,
const EdgeFilter& edge_filter) {
ReverseDFSFromHelper(g, start, enter, leave, stable_comparator, edge_filter);
}
void GetPostOrder(const Graph& g, std::vector<Node*>* order,
const NodeComparator& stable_comparator,
const EdgeFilter& edge_filter) {
order->clear();
DFS(
g, nullptr, [order](Node* n) { order->push_back(n); }, stable_comparator,
edge_filter);
}
void GetReversePostOrder(const Graph& g, std::vector<Node*>* order,
const NodeComparator& stable_comparator,
const EdgeFilter& edge_filter) {
GetPostOrder(g, order, stable_comparator, edge_filter);
std::reverse(order->begin(), order->end());
}
bool PruneForReverseReachability(Graph* g,
std::unordered_set<const Node*> start) {
std::vector<bool> visited(g->num_node_ids());
for (auto node : start) {
visited[node->id()] = true;
}
std::deque<const Node*> queue(start.begin(), start.end());
while (!queue.empty()) {
const Node* n = queue.front();
queue.pop_front();
for (const Node* in : n->in_nodes()) {
if (!visited[in->id()]) {
visited[in->id()] = true;
queue.push_back(in);
VLOG(2) << "Reverse reach : " << n->name() << " from " << in->name();
}
}
}
bool any_removed = false;
for (int i = 0; i < visited.size(); ++i) {
if (!visited[i]) {
Node* n = g->FindNodeId(i);
if (n != nullptr && !n->IsSource() && !n->IsSink()) {
g->RemoveNode(n);
any_removed = true;
}
}
}
return any_removed;
}
bool FixupSourceAndSinkEdges(Graph* g) {
bool changed = false;
for (Node* n : g->nodes()) {
if (!n->IsSource() && n->in_edges().empty()) {
g->AddControlEdge(g->source_node(), n,
true );
changed = true;
}
if (!n->IsSink() && n->out_edges().empty()) {
g->AddControlEdge(n, g->sink_node(), true );
changed = true;
}
}
return changed;
}
namespace {
template <class T>
void BreadthFirstTraversalHelper(const Graph& g, gtl::ArraySlice<T> start,
const std::function<void(T)>& visit,
NodeComparator stable_comparator) {
std::deque<T> stack;
if (start.empty()) {
for (T n : g.nodes()) {
if (n->in_edges().empty()) {
stack.push_back(n);
}
}
}
std::vector<bool> seen(g.num_node_ids(), false);
while (!stack.empty()) {
T n = stack.front();
stack.pop_front();
seen[n->id()] = true;
visit(n);
std::vector<T> nodes_sorted;
for (const Edge* out_edge : n->out_edges()) {
if (!seen[out_edge->dst()->id()]) {
seen[out_edge->dst()->id()] = true;
nodes_sorted.emplace_back(out_edge->dst());
}
}
std::sort(nodes_sorted.begin(), nodes_sorted.end(), stable_comparator);
for (T out : nodes_sorted) {
stack.push_back(out);
}
}
}
}
void BreadthFirstTraversal(const Graph& g, absl::Span<const Node* const> start,
const std::function<void(const Node*)>& visit,
NodeComparator stable_comparator) {
return BreadthFirstTraversalHelper<const Node*>(g, start, visit,
stable_comparator);
}
void BreadthFirstTraversal(Graph& g, absl::Span<Node* const> start,
const std::function<void(Node*)>& visit,
NodeComparator stable_comparator) {
return BreadthFirstTraversalHelper<Node*>(g, start, visit, stable_comparator);
}
} | #include "tensorflow/core/graph/algorithm.h"
#include <string>
#include <vector>
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/graph/benchmark_testlib.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
REGISTER_OP("TestParams").Output("o: float");
REGISTER_OP("TestInput").Output("a: float").Output("b: float");
REGISTER_OP("TestMul").Input("a: float").Input("b: float").Output("o: float");
REGISTER_OP("TestUnary").Input("a: float").Output("o: float");
REGISTER_OP("TestBinary")
.Input("a: float")
.Input("b: float")
.Output("o: float");
bool ExpectBefore(const std::vector<std::pair<string, string>>& ordered_pairs,
const std::vector<Node*>& inputs, string* error) {
for (const std::pair<string, string>& pair : ordered_pairs) {
const string& before_node = pair.first;
const string& after_node = pair.second;
bool seen_before = false;
bool seen_both = false;
for (const Node* node : inputs) {
if (!seen_before && after_node == node->name()) {
*error = strings::StrCat("Saw ", after_node, " before ", before_node);
return false;
}
if (before_node == node->name()) {
seen_before = true;
} else if (after_node == node->name()) {
seen_both = seen_before;
break;
}
}
if (!seen_both) {
*error = strings::StrCat("didn't see either ", before_node, " or ",
after_node);
return false;
}
}
return true;
}
TEST(AlgorithmTest, ReversePostOrder) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
using namespace ::tensorflow::ops;
Node* w1 = SourceOp("TestParams", b.opts().WithName("W1"));
Node* w2 = SourceOp("TestParams", b.opts().WithName("W2"));
Node* input =
SourceOp("TestInput", b.opts().WithName("input").WithControlInput(w1));
Node* t1 = BinaryOp("TestMul", w1, {input, 1}, b.opts().WithName("t1"));
BinaryOp("TestMul", w1, {input, 1},
b.opts().WithName("t2").WithControlInput(t1));
BinaryOp("TestMul", w2, {input, 1}, b.opts().WithName("t3"));
Graph g(OpRegistry::Global());
TF_ASSERT_OK(GraphDefBuilderToGraph(b, &g));
std::vector<Node*> order;
GetReversePostOrder(g, &order);
std::vector<std::pair<string, string>> reverse_orders = {
{"W1", "input"}, {"W1", "t1"}, {"W1", "t2"}, {"W1", "t3"},
{"input", "t1"}, {"input", "t3"}, {"t1", "t2"}, {"W2", "t3"}};
string error;
EXPECT_TRUE(ExpectBefore(reverse_orders, order, &error)) << error;
reverse_orders = {{"input", "W1"}};
EXPECT_FALSE(ExpectBefore(reverse_orders, order, &error));
GetPostOrder(g, &order);
std::vector<std::pair<string, string>> orders = {
{"input", "W1"}, {"t1", "W1"}, {"t2", "W1"}, {"t3", "W1"},
{"t1", "input"}, {"t3", "input"}, {"t2", "t1"}, {"t3", "W2"}};
EXPECT_TRUE(ExpectBefore(orders, order, &error)) << error;
orders = {{"W1", "t3"}};
EXPECT_FALSE(ExpectBefore(orders, order, &error));
}
TEST(AlgorithmTest, ReversePostOrderStable) {
int64_t run_count = 100;
using namespace ::tensorflow::ops;
for (int64_t i = 0; i < run_count; ++i) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
string error;
Node* w1 = SourceOp("TestParams", b.opts().WithName("W1"));
Node* input =
SourceOp("TestInput", b.opts().WithName("input").WithControlInput(w1));
BinaryOp("TestMul", w1, {input, 1}, b.opts().WithName("t2"));
for (int64_t j = 0; j < i; ++j) {
BinaryOp("TestMul", w1, {input, 1},
b.opts().WithName(strings::StrCat("internal", j)));
}
BinaryOp("TestMul", w1, {input, 1}, b.opts().WithName("t3"));
Graph g(OpRegistry::Global());
TF_ASSERT_OK(GraphDefBuilderToGraph(b, &g));
std::vector<Node*> order;
GetReversePostOrder(g, &order, NodeComparatorName());
EXPECT_TRUE(ExpectBefore({{"t2", "t3"}}, order, &error));
}
}
TEST(AlgorithmTest, PostOrderWithEdgeFilter) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* n0 = ops::SourceOp("TestParams", b.opts().WithName("n0"));
Node* n1 = ops::UnaryOp("TestUnary", n0, b.opts().WithName("n1"));
Node* n2 = ops::UnaryOp("TestUnary", n1, b.opts().WithName("n2"));
Node* n3 = ops::BinaryOp("TestBinary", n2, n0, b.opts().WithName("n3"));
Graph g(OpRegistry::Global());
TF_ASSERT_OK(GraphDefBuilderToGraph(b, &g));
g.AddEdge(g.FindNodeId(n3->id()), 0, g.FindNodeId(n1->id()), 1);
std::vector<Node*> post_order;
auto edge_filter = [&](const Edge& e) {
return !(e.src()->id() == n3->id() && e.dst()->id() == n1->id());
};
std::vector<Node*> expected_post_order = {
g.sink_node(), g.FindNodeId(n3->id()), g.FindNodeId(n2->id()),
g.FindNodeId(n1->id()), g.FindNodeId(n0->id()), g.source_node()};
std::vector<Node*> expected_reverse_post_order = expected_post_order;
std::reverse(expected_reverse_post_order.begin(),
expected_reverse_post_order.end());
GetPostOrder(g, &post_order, {},
edge_filter);
ASSERT_EQ(expected_post_order.size(), post_order.size());
for (int i = 0; i < post_order.size(); i++) {
CHECK_EQ(post_order[i], expected_post_order[i])
<< post_order[i]->name() << " vs. " << expected_post_order[i]->name();
}
std::vector<Node*> reverse_post_order;
GetReversePostOrder(g, &reverse_post_order, {},
edge_filter);
ASSERT_EQ(expected_reverse_post_order.size(), reverse_post_order.size());
for (int i = 0; i < reverse_post_order.size(); i++) {
CHECK_EQ(reverse_post_order[i], expected_reverse_post_order[i])
<< reverse_post_order[i]->name() << " vs. "
<< expected_reverse_post_order[i]->name();
}
}
void BM_PruneForReverseReachability(::testing::benchmark::State& state) {
const int num_nodes = state.range(0);
const int num_edges_per_node = state.range(1);
const GraphDef graph_def =
test::CreateGraphDef(num_nodes, num_edges_per_node);
const auto registry = OpRegistry::Global();
GraphConstructorOptions opts;
for (auto s : state) {
state.PauseTiming();
Graph graph(registry);
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
std::unordered_set<const Node*> visited;
visited.insert(graph.FindNodeId(graph.num_nodes() - 1));
state.ResumeTiming();
PruneForReverseReachability(&graph, std::move(visited));
}
}
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(10, 2);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 6, 2);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 9, 2);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 12, 2);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 15, 2);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(10, 4);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 6, 4);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 9, 4);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 12, 4);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 15, 4);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(10, 8);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 6, 8);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 9, 8);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 12, 8);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 15, 8);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(10, 16);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 6, 16);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 9, 16);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 12, 16);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 15, 16);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/algorithm.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/algorithm_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f32ec9fc-38c2-4fba-b539-b8d6306a2b4b | cpp | tensorflow/tensorflow | slice | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/slice.cc | tensorflow/lite/delegates/xnnpack/slice_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/slice.h"
#include <cstdint>
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/ValueRange.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/op_util_common.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir::odml {
namespace {
Value PackScalarIndices(mlir::ValueRange indices, OpBuilder& b) {
auto e_type =
llvm::cast<ShapedType>(indices.front().getType()).getElementType();
const int64_t num_indices = indices.size();
auto packed_indices_type = RankedTensorType::get({num_indices}, e_type);
auto values_count_attr = b.getI32IntegerAttr(num_indices);
auto pack_axis_attr = b.getI32IntegerAttr(0);
return b.create<TFL::PackOp>(indices.back().getLoc(), packed_indices_type,
indices, values_count_attr, pack_axis_attr);
}
Value BuildTFLCastOp(OpBuilder& b, Value value) {
return b.create<TFL::CastOp>(
value.getLoc(),
RankedTensorType::get(llvm::cast<ShapedType>(value.getType()).getShape(),
b.getI32Type()),
value);
}
class LegalizeSliceOp : public OpConversionPattern<mhlo::SliceOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::SliceOp slice_op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final {
auto begin = rewriter.create<arith::ConstantOp>(slice_op.getLoc(),
slice_op.getStartIndices());
auto end = rewriter.create<arith::ConstantOp>(slice_op.getLoc(),
slice_op.getLimitIndices());
auto strides = rewriter.create<arith::ConstantOp>(slice_op.getLoc(),
slice_op.getStrides());
auto zero = rewriter.getIntegerAttr(rewriter.getI32Type(), 0);
auto no_offset = rewriter.getBoolAttr(false);
rewriter.replaceOpWithNewOp<TFL::StridedSliceOp>(
slice_op, slice_op.getType(), slice_op.getOperand(),
BuildTFLCastOp(rewriter, begin), BuildTFLCastOp(rewriter, end),
BuildTFLCastOp(rewriter, strides), zero, zero, zero, zero, zero,
no_offset);
return success();
}
};
class CastSliceIndicesToSignless
: public OpRewritePattern<mhlo::DynamicSliceOp> {
public:
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(mhlo::DynamicSliceOp op,
PatternRewriter& rewriter) const final;
};
LogicalResult CastSliceIndicesToSignless::matchAndRewrite(
mhlo::DynamicSliceOp op, PatternRewriter& rewriter) const {
auto start_type =
llvm::cast<ShapedType>(op.getStartIndices().front().getType());
auto start_e_type = start_type.getElementType();
if (start_e_type.isSignlessIntOrFloat()) {
return rewriter.notifyMatchFailure(op, "Already signless.");
}
auto new_start_e_type =
rewriter.getIntegerType(start_e_type.getIntOrFloatBitWidth());
llvm::SmallVector<Value> casted_start_inds;
for (auto start_ind_opr : op.getStartIndices()) {
auto casted_start_ind_opr = rewriter.create<mhlo::ConvertOp>(
start_ind_opr.getLoc(), start_ind_opr, new_start_e_type);
casted_start_inds.push_back(casted_start_ind_opr.getResult());
}
rewriter.replaceOpWithNewOp<mhlo::DynamicSliceOp>(
op, op.getOperand(), casted_start_inds, op.getSliceSizes());
return success();
}
bool IsDynamicSliceLegal(mhlo::DynamicSliceOp op) {
return !llvm::cast<ShapedType>(op.getOperand().getType()).hasStaticShape();
}
class LegalizeDynamicSliceOp
: public OpConversionPattern<mhlo::DynamicSliceOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::DynamicSliceOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult LegalizeDynamicSliceOp::matchAndRewrite(
mhlo::DynamicSliceOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
auto start_type =
llvm::cast<ShapedType>(op.getStartIndices().front().getType());
auto start_e_type = start_type.getElementType();
if (!start_e_type.isSignlessIntOrFloat()) {
return rewriter.notifyMatchFailure(
op, "Must be signless integer for start indices.");
}
auto input_type = llvm::cast<ShapedType>(op.getOperand().getType());
if (!input_type.hasStaticShape()) {
return rewriter.notifyMatchFailure(op, "Input must be statically shaped.");
}
Value clamp_left_cst = rewriter.create<arith::ConstantOp>(
op->getLoc(), rewriter.getZeroAttr(start_type));
llvm::SmallVector<Value> new_start_indices;
const auto stride_sizes = UnrollI64Splat(op.getSliceSizes());
for (auto [dim_size, start_ind_opr, stride_size] :
llvm::zip(input_type.getShape(), op.getStartIndices(), stride_sizes)) {
const int64_t clamp_right_val = dim_size - stride_size;
auto clamp_right_cst = rewriter.create<arith::ConstantOp>(
op->getLoc(),
DenseElementsAttr::get(start_type, rewriter.getIntegerAttr(
start_e_type, clamp_right_val)));
Value new_start_ind = rewriter.create<TFL::MaximumOp>(
op->getLoc(), start_type, clamp_left_cst, start_ind_opr);
new_start_ind = rewriter.create<TFL::MinimumOp>(
op->getLoc(), start_type, clamp_right_cst, new_start_ind);
new_start_indices.push_back(new_start_ind);
}
auto packed_indices = PackScalarIndices(new_start_indices, rewriter);
auto slice_sizes_cst =
rewriter.create<arith::ConstantOp>(op->getLoc(), op.getSliceSizes());
rewriter.replaceOpWithNewOp<TFL::SliceOp>(op, op.getType(), op.getOperand(),
packed_indices, slice_sizes_cst);
return success();
}
class LegalizeRealDynamicSliceOp
: public OpConversionPattern<mhlo::RealDynamicSliceOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::RealDynamicSliceOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult LegalizeRealDynamicSliceOp::matchAndRewrite(
mhlo::RealDynamicSliceOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
auto start_indices_type =
mlir::cast<RankedTensorType>(op.getStartIndices().getType());
auto end_indices_type =
mlir::cast<RankedTensorType>(op.getLimitIndices().getType());
if (start_indices_type.getNumDynamicDims() != 0 ||
end_indices_type.getNumDynamicDims() != 0) {
return rewriter.notifyMatchFailure(
op,
"Start indices and limit indices must not have dynamic dimensions.");
}
auto zero = rewriter.getIntegerAttr(rewriter.getI32Type(), 0);
auto no_offset = rewriter.getBoolAttr(false);
rewriter.replaceOpWithNewOp<TFL::StridedSliceOp>(
op, op.getType(), op.getOperand(),
BuildTFLCastOp(rewriter, op.getStartIndices()),
BuildTFLCastOp(rewriter, op.getLimitIndices()),
BuildTFLCastOp(rewriter, op.getStrides()), zero, zero, zero, zero, zero,
no_offset);
return success();
};
class LegalizeDynamicUpdateSliceOp
: public OpConversionPattern<mhlo::DynamicUpdateSliceOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::DynamicUpdateSliceOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult LegalizeDynamicUpdateSliceOp::matchAndRewrite(
mhlo::DynamicUpdateSliceOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
auto packed_indices = PackScalarIndices(op.getStartIndices(), rewriter);
rewriter.replaceOpWithNewOp<TFL::DynamicUpdateSliceOp>(
op, op.getType(), op.getOperand(), op.getUpdate(), packed_indices);
return success();
};
}
void PopulateLegalizeSlicePatterns(MLIRContext* ctx,
RewritePatternSet& patterns,
ConversionTarget& target) {
patterns.add<LegalizeSliceOp, LegalizeDynamicSliceOp,
LegalizeDynamicUpdateSliceOp, LegalizeRealDynamicSliceOp>(ctx);
target.addIllegalOp<mhlo::SliceOp, mhlo::DynamicUpdateSliceOp,
mhlo::RealDynamicSliceOp>();
target.addDynamicallyLegalOp<mhlo::DynamicSliceOp>(IsDynamicSliceLegal);
}
void PopulatePrepareSlicePatterns(MLIRContext* ctx,
RewritePatternSet& patterns) {
patterns.add<CastSliceIndicesToSignless>(ctx);
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <numeric>
#include <random>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/slice_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Slice, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const std::vector<int32_t> input_shape = {shape_rng()};
const auto offsets = RandomOffsets(rng, input_shape);
const auto sizes = RandomSizes(rng, input_shape, offsets);
SliceTester()
.InputShape(input_shape)
.Offsets(offsets)
.Sizes(sizes)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
SliceTester()
.InputShape(input_shape)
.Offsets(offsets)
.Sizes(sizes)
.UseInt64OffsetsAndSize(true)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(Slice, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const std::vector<int32_t> input_shape = {shape_rng(), shape_rng()};
const auto offsets = RandomOffsets(rng, input_shape);
const auto sizes = RandomSizes(rng, input_shape, offsets);
SliceTester()
.InputShape(input_shape)
.Offsets(offsets)
.Sizes(sizes)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
SliceTester()
.InputShape(input_shape)
.Offsets(offsets)
.Sizes(sizes)
.UseInt64OffsetsAndSize(true)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(Slice, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const std::vector<int32_t> input_shape = {shape_rng(), shape_rng(),
shape_rng()};
const auto offsets = RandomOffsets(rng, input_shape);
const auto sizes = RandomSizes(rng, input_shape, offsets);
SliceTester()
.InputShape(input_shape)
.Offsets(offsets)
.Sizes(sizes)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
SliceTester()
.InputShape(input_shape)
.Offsets(offsets)
.Sizes(sizes)
.UseInt64OffsetsAndSize(true)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(Slice, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const std::vector<int32_t> input_shape = {shape_rng(), shape_rng(),
shape_rng(), shape_rng()};
const auto offsets = RandomOffsets(rng, input_shape);
const auto sizes = RandomSizes(rng, input_shape, offsets);
SliceTester()
.InputShape(input_shape)
.Offsets(offsets)
.Sizes(sizes)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
SliceTester()
.InputShape(input_shape)
.Offsets(offsets)
.Sizes(sizes)
.UseInt64OffsetsAndSize(true)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(Slice, 5D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const std::vector<int32_t> input_shape = {
shape_rng(), shape_rng(), shape_rng(), shape_rng(), shape_rng()};
const auto offsets = RandomOffsets(rng, input_shape);
const auto sizes = RandomSizes(rng, input_shape, offsets);
SliceTester()
.InputShape(input_shape)
.Offsets(offsets)
.Sizes(sizes)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
SliceTester()
.InputShape(input_shape)
.Offsets(offsets)
.Sizes(sizes)
.UseInt64OffsetsAndSize(true)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/slice.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/slice_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
81478791-4647-44ad-a8fe-a105c9dcf9ae | cpp | tensorflow/tensorflow | heap_simulator | third_party/xla/xla/service/heap_simulator/heap_simulator.cc | third_party/xla/xla/service/heap_simulator/heap_simulator_test.cc | #include "xla/service/heap_simulator/heap_simulator.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <iterator>
#include <limits>
#include <list>
#include <memory>
#include <numeric>
#include <optional>
#include <ostream>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/map_util.h"
#include "xla/service/buffer_value.h"
#include "xla/service/heap_simulator/allocation_block.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/service/logical_buffer.h"
#include "xla/service/time_utils.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
constexpr int64_t kMaxMemoryMapDimensionSize = 100;
struct AsciiMemoryMapParameters {
int64_t memory_block_size = 1;
int64_t end_of_last_occupied_chunk = -1;
};
AsciiMemoryMapParameters GetAsciiMemoryMapParameters(
std::vector<const BufferIntervalTreeNode*>& nodes) {
CHECK(!nodes.empty());
int64_t min_chunk_offset = std::numeric_limits<int64_t>::max();
int64_t end_of_last_occupied_chunk = -1;
int64_t memory_block_size = nodes.front()->chunk.offset;
for (const BufferIntervalTreeNode* node : nodes) {
min_chunk_offset = std::min(min_chunk_offset, node->chunk.offset);
end_of_last_occupied_chunk =
std::max(end_of_last_occupied_chunk, node->chunk.chunk_end());
memory_block_size = std::gcd(memory_block_size, node->chunk.offset);
memory_block_size = std::gcd(memory_block_size, node->chunk.chunk_end());
}
VLOG(3) << " min_chunk_offset: " << min_chunk_offset
<< " end_of_last_occupied_chunk: " << end_of_last_occupied_chunk
<< " memory_block_size: " << memory_block_size;
return {memory_block_size, end_of_last_occupied_chunk};
}
std::vector<std::vector<bool>> GetMemoryMap(
int64_t start, int64_t end, int64_t memory_block_size,
int64_t num_memory_blocks,
std::vector<const BufferIntervalTreeNode*>& nodes) {
int64_t total_time = end - start + 1;
std::vector<std::vector<bool>> memory_map(
num_memory_blocks, std::vector<bool>(total_time, false));
for (const BufferIntervalTreeNode* node : nodes) {
for (int64_t i = node->chunk.offset / memory_block_size;
i < node->chunk.chunk_end() / memory_block_size; ++i) {
for (int64_t j = std::max(node->start - start, int64_t{0});
j <= std::min(node->end - start, end - start); ++j) {
memory_map[i][j] = true;
}
}
}
return memory_map;
}
std::string BufferIntervalTreeNodesToString(
absl::Span<const BufferIntervalTreeNode* const> nodes) {
std::string output;
for (const BufferIntervalTreeNode* node : nodes) {
absl::StrAppend(&output, node->ToString(), "\n");
}
return output;
}
std::string MemoryMapToString(int64_t start, int64_t end,
int64_t memory_block_size, int64_t group_size,
std::vector<std::vector<bool>>& memory_map) {
int64_t num_memory_blocks = memory_map.size();
int64_t total_time = memory_map.front().size();
std::string output = "\n";
absl::StrAppend(&output, "Memory map for time: [", start, ",", end,
"], memory_block_size: ", memory_block_size,
", group_size: ", group_size, "\n\n");
for (int64_t i = num_memory_blocks - 1; i >= 0; --i) {
for (int64_t j = 0; j < total_time; ++j) {
if (group_size && j % group_size == 0) {
absl::StrAppend(&output, " ");
}
absl::StrAppend(&output, memory_map[i][j] ? "#" : ".");
}
absl::StrAppend(&output, " ", std::to_string((i + 1) * memory_block_size),
"\n");
}
for (int64_t j = start; j <= end; ++j) {
if (group_size && j % group_size == 0) {
absl::StrAppend(&output, " ");
}
absl::StrAppend(&output, std::to_string(j % 10));
}
absl::StrAppend(&output, "\n\n");
return output;
}
}
using absl::flat_hash_map;
using absl::flat_hash_set;
bool IsOdd(int x) { return (x % 2) == 1; }
bool IsEven(int x) { return (x % 2) == 0; }
HeapSimulator::Chunk HeapSimulator::Chunk::FromOffsetEnd(int64_t offset,
int64_t end) {
return FromOffsetSize(offset, end - offset);
}
HeapSimulator::Chunk HeapSimulator::Chunk::FromOffsetSize(int64_t offset,
int64_t size) {
return Chunk(offset, size);
}
std::string HeapSimulator::Chunk::ToString() const {
return absl::StrCat("[", offset, ",", chunk_end(), ")");
}
std::string BufferIntervalTreeNode::ToString() const {
return absl::StrCat("start: ", start, " end: ", end,
" chunk: ", chunk.ToString());
}
bool HeapSimulator::Chunk::OverlapsWith(Chunk other_chunk) const {
CHECK_NE(size, 0);
CHECK_NE(other_chunk.size, 0);
return offset < other_chunk.chunk_end() && other_chunk.offset < chunk_end();
}
std::ostream& operator<<(std::ostream& stream,
const HeapSimulator::Chunk& chunk) {
stream << chunk.ToString();
return stream;
}
absl::StatusOr<int64_t> HeapSimulator::MinimumMemoryForModule(
const HloSchedule& schedule,
const LogicalBuffer::SizeFunction& size_function) {
if (schedule.empty()) {
return 0;
}
const HloModule* module = schedule.module();
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module));
TF_ASSIGN_OR_RETURN(
HeapSimulator::Result<HloValue> result,
HeapSimulator::Run(std::make_unique<NoFragmentationStatsHeap<HloValue>>(),
*module, schedule, *alias_analysis, size_function));
return result.heap_size;
}
absl::StatusOr<int64_t> HeapSimulator::MinimumMemoryForComputation(
const HloComputation& computation, const HloInstructionSequence& sequence,
const HloAliasAnalysis& alias_analysis,
const LogicalBuffer::SizeFunction& size_function) {
TF_ASSIGN_OR_RETURN(
HeapSimulator::Result<HloValue> result,
HeapSimulator::Run(std::make_unique<NoFragmentationStatsHeap<HloValue>>(),
computation, sequence, alias_analysis, size_function,
HeapSimulator::Options()));
return result.heap_size;
}
absl::StatusOr<int64_t> HeapSimulator::MinimumMemoryForComputation(
const HloComputation& computation, const HloInstructionSequence& sequence,
const HloAliasAnalysis& alias_analysis,
const LogicalBuffer::SizeFunction& size_function,
const HloSchedule* schedule) {
TF_ASSIGN_OR_RETURN(
HeapSimulator::Result<HloValue> result,
HeapSimulator::Run(std::make_unique<NoFragmentationStatsHeap<HloValue>>(),
computation, sequence, alias_analysis, size_function,
schedule, HeapSimulator::Options()));
return result.heap_size;
}
absl::StatusOr<HeapSimulator::Result<HloValue>> HeapSimulator::Run(
std::unique_ptr<HeapAlgorithm<HloValue>> algorithm, const HloModule& module,
const HloSchedule& schedule, const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_fn, const Options& options) {
HeapSimulator heap(std::move(algorithm), size_fn, options, &schedule);
const HloComputation* entry_computation = module.entry_computation();
const HloInstructionSequence& instruction_sequence =
schedule.sequence(entry_computation);
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloLiveRange> hlo_live_range,
HloLiveRange::Run(schedule, alias_analysis, entry_computation));
TF_RETURN_IF_ERROR(heap.RunComputation(*entry_computation,
instruction_sequence, alias_analysis,
hlo_live_range.get()));
return heap.Finish();
}
absl::StatusOr<HeapSimulator::Result<HloValue>> HeapSimulator::Run(
std::unique_ptr<HeapAlgorithm<HloValue>> algorithm,
const HloComputation& computation,
const HloInstructionSequence& instruction_sequence,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_fn, const Options& options) {
HeapSimulator heap(std::move(algorithm), size_fn, options,
nullptr);
HloSchedule schedule(computation.parent());
schedule.set_sequence(&computation, instruction_sequence);
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloLiveRange> hlo_live_range,
HloLiveRange::Run(schedule, alias_analysis, &computation,
false));
TF_RETURN_IF_ERROR(heap.RunComputation(computation, instruction_sequence,
alias_analysis, hlo_live_range.get()));
return heap.Finish();
}
absl::StatusOr<HeapSimulator::Result<HloValue>> HeapSimulator::Run(
std::unique_ptr<HeapAlgorithm<HloValue>> algorithm,
const HloComputation& computation,
const HloInstructionSequence& instruction_sequence,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_fn, const HloSchedule* schedule,
const Options& options) {
HeapSimulator heap(std::move(algorithm), size_fn, options,
schedule);
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloLiveRange> hlo_live_range,
HloLiveRange::Run(*schedule, alias_analysis, &computation));
TF_RETURN_IF_ERROR(heap.RunComputation(computation, instruction_sequence,
alias_analysis, hlo_live_range.get()));
return heap.Finish();
}
absl::Status HeapSimulator::RunComputation(
const HloComputation& computation,
const HloInstructionSequence& instruction_sequence,
const HloAliasAnalysis& alias_analysis, HloLiveRange* hlo_live_range) {
XLA_VLOG_LINES(1, computation.parent()->ToString());
XLA_VLOG_LINES(2, computation.ToString());
VLOG(1) << hlo_live_range->ToString();
HloDataflowAnalysis& dataflow_analysis = alias_analysis.dataflow_analysis();
std::vector<std::vector<const HloValue*>> buffers_defined(
hlo_live_range->schedule_end_time() + 1);
std::vector<std::vector<const HloValue*>> buffers_freed(
hlo_live_range->schedule_end_time() + 1);
std::vector<const HloValue*> values_to_assign;
values_to_assign.reserve(dataflow_analysis.values().size());
auto& buffer_live_ranges = hlo_live_range->buffer_live_ranges();
for (const HloValue* value : dataflow_analysis.values()) {
if (!buffer_live_ranges.contains(value)) {
continue;
}
if (IgnoreBuffer(value)) {
continue;
}
values_to_assign.push_back(value);
}
absl::c_sort(values_to_assign,
[&](const HloValue* value1, const HloValue* value2) {
const auto& live_range1 = buffer_live_ranges.at(value1);
const auto& live_range2 = buffer_live_ranges.at(value2);
return std::forward_as_tuple(live_range1.start,
live_range1.end, value1->id()) <
std::forward_as_tuple(live_range2.start,
live_range2.end, value2->id());
});
for (const HloValue* value : values_to_assign) {
auto live_range = buffer_live_ranges.at(value);
buffers_defined[live_range.start].push_back(value);
buffers_freed[live_range.end].push_back(value);
}
absl::flat_hash_map<const HloBuffer*, const HloValue*> first_allocated_value;
VLOG(1) << "Program time" << hlo_live_range->schedule_end_time();
for (const HloBuffer& buffer : alias_analysis.buffers()) {
int64_t size = 0;
for (const HloValue* value : buffer.values()) {
size = std::max(size, size_fn_(*value));
}
for (const HloValue* value : buffer.values()) {
buffer_sizes_[value] = size;
}
}
for (int64_t i = 0; i < hlo_live_range->schedule_end_time() + 1; ++i) {
VLOG(1) << "Time step: " << i;
for (const HloValue* value : buffers_defined[i]) {
bool shared = false;
VLOG(1) << "Start buffer: " << value->ToShortString();
const HloBuffer* hlo_buffer =
&alias_analysis.GetBufferContainingValue(*value);
if (first_allocated_value.count(hlo_buffer) != 0) {
ShareBuffer(value, first_allocated_value[hlo_buffer],
value->instruction());
VLOG(1) << " ShareWith"
<< first_allocated_value[hlo_buffer]->ToShortString();
continue;
}
if (options_.may_reuse_operand_buffers &&
hlo_buffer->values().size() == 1) {
for (const HloInstruction* operand : value->instruction()->operands()) {
const HloValueSet operand_value_set =
dataflow_analysis.GetValueSet(operand);
for (const HloValue* operand_value : operand_value_set.values()) {
const HloBuffer* operand_buffer =
&alias_analysis.GetBufferContainingValue(*operand_value);
if (operand_buffer->values().size() > 1) {
continue;
}
auto it = buffer_live_ranges.find(operand_value);
if (it == buffer_live_ranges.end()) {
continue;
}
auto& operand_live_range = it->second;
auto& user_live_range = buffer_live_ranges[value];
if (operand_live_range.end != i) {
continue;
}
if (IgnoreBuffer(operand_value)) {
continue;
}
if (!absl::c_linear_search(buffers_freed[i], operand_value)) {
continue;
}
if (value->instruction()->IsUserOf(operand_value->instruction()) &&
value->instruction()->opcode() != HloOpcode::kCopy &&
dataflow_analysis.CanShareOperandBufferWithUser(
operand_value->instruction(), operand_value->index(),
value->instruction(), value->index())) {
Free(operand_value, operand_value->instruction());
buffers_freed[i].erase(
std::remove(buffers_freed[i].begin(), buffers_freed[i].end(),
operand_value),
buffers_freed[i].end());
ShareBuffer(value, operand_value, value->instruction());
operand_live_range.end = user_live_range.end;
VLOG(1) << "Sharing " << value->ToShortString() << " with "
<< operand_value->ToShortString()
<< ", size:" << size_fn_(*value);
shared = true;
break;
}
}
if (shared) {
break;
}
}
}
if (!shared) {
Alloc(value, value->instruction());
first_allocated_value[hlo_buffer] = value;
}
}
if (!buffers_freed[i].empty()) {
VLOG(1) << "Free Buffer: ";
}
for (const HloValue* value : buffers_freed[i]) {
VLOG(1) << " " << value->ToShortString();
Free(value, value->instruction());
}
}
return absl::OkStatus();
}
HeapSimulator::HeapSimulator(std::unique_ptr<HeapAlgorithm<HloValue>> algorithm,
const BufferValue::SizeFunction& size_fn,
const Options& options,
const HloSchedule* schedule)
: no_fragmentation_stats_(
std::make_unique<NoFragmentationStatsHeap<HloValue>>()),
algorithm_(std::move(algorithm)),
size_fn_(size_fn),
options_(options),
schedule_(schedule) {
debug_trace_.set_whole_module_simulation(schedule_ != nullptr);
}
HeapSimulator::~HeapSimulator() {}
bool HeapSimulator::IgnoreBuffer(const HloValue* buffer) const {
if (!options_.alloc_constants &&
buffer->instruction()->opcode() == HloOpcode::kConstant) {
return true;
}
return options_.buffers_to_assign != nullptr &&
!options_.buffers_to_assign->contains(buffer);
}
void HeapSimulator::Alloc(const HloValue* buffer,
const HloInstruction* instruction) {
CHECK(!allocated_buffers_.contains(buffer))
<< "Alloc called on allocated buffer: " << *buffer;
CHECK(!freed_buffers_.contains(buffer))
<< "Alloc called on freed buffer: " << *buffer;
allocated_buffers_.insert(buffer);
const int64_t size = GetBufferSize(buffer);
algorithm_->Alloc(buffer, size);
no_fragmentation_stats_->Alloc(buffer, size);
FillDebugTrace(HeapSimulatorTrace::Event::ALLOC, buffer, instruction,
nullptr);
}
void HeapSimulator::Free(const HloValue* buffer,
const HloInstruction* instruction) {
const int64_t size = GetBufferSize(buffer);
algorithm_->Free(buffer, size);
no_fragmentation_stats_->Free(buffer, size);
FillDebugTrace(HeapSimulatorTrace::Event::FREE, buffer, instruction, nullptr);
}
void HeapSimulator::ShareBuffer(const HloValue* buffer, const HloValue* shared,
const HloInstruction* instruction) {
algorithm_->ShareWith(buffer, shared, GetBufferSize(shared));
no_fragmentation_stats_->ShareWith(buffer, shared, GetBufferSize(shared));
FillDebugTrace(HeapSimulatorTrace::Event::SHARE_WITH, buffer, instruction,
shared);
}
int64_t HeapSimulator::GetBufferSize(const HloValue* buffer) const {
auto it = buffer_sizes_.find(buffer);
CHECK(it != buffer_sizes_.end());
return it->second;
}
absl::StatusOr<HeapSimulator::Result<HloValue>> HeapSimulator::Finish() {
TF_ASSIGN_OR_RETURN(Result<HloValue> result, algorithm_->Finish());
size_t total_chunk_count = absl::c_accumulate(
result.heap_results, static_cast<size_t>(0),
[&](size_t lhs, const HeapResult<HloValue>& rhs) -> size_t {
return lhs + rhs.chunk_map.size();
});
if (total_chunk_count != 0) {
if (options_.buffers_to_assign != nullptr) {
CHECK_EQ(options_.buffers_to_assign->size(), total_chunk_count);
}
}
TF_ASSIGN_OR_RETURN(const Result<HloValue> no_frag_result,
no_fragmentation_stats_->Finish());
result.fragmentation_size = result.heap_size - no_frag_result.heap_size;
result.debug_trace.Swap(&debug_trace_);
return result;
}
void HeapSimulator::FillDebugTrace(HeapSimulatorTrace::Event::Kind kind,
const HloValue* buffer,
const HloInstruction* instruction,
const HloValue* share_with_canonical) {
HeapSimulatorTrace::Event* event = debug_trace_.add_events();
event->set_kind(kind);
event->set_buffer_id(buffer->id());
*event->mutable_computation_name() =
std::string(instruction->parent()->name());
*event->mutable_instruction_name() = std::string(instruction->name());
if (kind == HeapSimulatorTrace::Event::SHARE_WITH) {
CHECK(share_with_canonical != nullptr);
event->set_share_with_canonical_id(share_with_canonical->id());
} else {
CHECK(share_with_canonical == nullptr);
}
}
template <typename BufferType>
void NoFragmentationStatsHeap<BufferType>::Alloc(const BufferType* buffer,
int64_t size) {
current_heap_size_ += size;
if (current_heap_size_ > max_heap_size_) {
max_heap_size_ = current_heap_size_;
}
}
template <typename BufferType>
void NoFragmentationStatsHeap<BufferType>::AccountForSubcomputationMemory(
const HloInstruction* instruction, int64_t alloc_size_by_instruction) {
int64_t max_subcomputation_bytes = 0;
if (max_subcomputation_bytes > 0 &&
(instruction->opcode() == HloOpcode::kWhile ||
instruction->opcode() == HloOpcode::kCall ||
instruction->opcode() == HloOpcode::kConditional)) {
max_subcomputation_bytes -= alloc_size_by_instruction;
}
max_heap_size_ =
std::max(max_heap_size_, current_heap_size_ + max_subcomputation_bytes);
}
template <typename BufferType>
void NoFragmentationStatsHeap<BufferType>::Free(const BufferType* buffer,
int64_t size) {
current_heap_size_ -= size;
}
template <typename BufferType>
absl::StatusOr<HeapSimulator::Result<BufferType>>
NoFragmentationStatsHeap<BufferType>::Finish() {
Result result;
result.heap_size = max_heap_size_;
return result;
}
template <typename BufferType>
GlobalDecreasingSizeBestFitHeap<BufferType>::GlobalDecreasingSizeBestFitHeap(
int64_t alignment, Type type, BufferIntervalCompare buffer_interval_compare,
SliceTimePermutationIterator::Ty slice_time_permutation_iterator_type)
: alignment_(alignment),
slice_time_permutation_iteration_type_(
slice_time_permutation_iterator_type) {
if (type == kTemporal) {
buffer_interval_compare_ = GetTemporalBufferIntervalCompare();
CHECK(buffer_interval_compare == nullptr);
} else if (type == kSpatial) {
buffer_interval_compare_ = GetSpatialBufferIntervalCompare();
CHECK(buffer_interval_compare == nullptr);
} else {
CHECK(type == kCustom);
CHECK(buffer_interval_compare != nullptr);
buffer_interval_compare_ = buffer_interval_compare;
}
}
template <typename BufferType>
typename GlobalDecreasingSizeBestFitHeap<BufferType>::BufferIntervalCompare
GlobalDecreasingSizeBestFitHeap<BufferType>::GetTemporalBufferIntervalCompare()
const {
return LessThanByKey([this](const BufferInterval& x) {
int64_t x_end = x.end;
for (auto colocation : GetTransitiveColocations(x)) {
x_end = std::max(x_end, buffer_intervals_.at(colocation).end);
}
return std::make_tuple(x.start - x_end, -x.size, std::cref(*x.buffer));
});
}
template <typename BufferType>
SliceTimePermutationIterator::Ty GlobalDecreasingSizeBestFitHeap<
BufferType>::slice_time_permutation_iterator_type() const {
return slice_time_permutation_iteration_type_;
}
template <typename BufferType>
typename GlobalDecreasingSizeBestFitHeap<
BufferType>::BufferIntervalCompare
GlobalDecreasingSizeBestFitHeap<BufferType>::GetSpatialBufferIntervalCompare() {
return LessThanByKey([](const BufferInterval& x) {
return std::make_tuple(-x.size, x.start - x.end, std::cref(*x.buffer));
});
}
template <typename BufferType>
void GlobalDecreasingSizeBestFitHeap<BufferType>::Alloc(
const BufferType* buffer, int64_t size) {
if (size == 0) {
result_.chunk_map.emplace(buffer, Chunk::FromOffsetSize(0, 0));
return;
}
auto emplace_result = buffer_intervals_.emplace(
buffer, BufferInterval{buffer, size, current_time_, -1, {}, true});
CHECK(emplace_result.second);
++current_time_;
}
template <typename BufferType>
void GlobalDecreasingSizeBestFitHeap<BufferType>::ShareWith(
const BufferType* buffer, const BufferType* share_with, int64_t size) {
if (size == 0) {
result_.chunk_map.emplace(buffer, Chunk::FromOffsetSize(0, 0));
return;
}
CHECK_NE(buffer_intervals_.count(share_with), 0);
buffer_intervals_[share_with].colocations.push_back(buffer);
auto emplace_result = buffer_intervals_.emplace(
buffer, BufferInterval{buffer, size, current_time_, -1, {}, false});
CHECK(emplace_result.second);
++current_time_;
}
template <typename BufferType>
absl::flat_hash_set<const BufferType*>
GlobalDecreasingSizeBestFitHeap<BufferType>::GetTransitiveColocations(
const BufferInterval& interval) const {
absl::flat_hash_set<const BufferType*> result;
std::vector<const BufferInterval*> worklist = {&interval};
while (!worklist.empty()) {
const BufferInterval* item = worklist.back();
worklist.pop_back();
for (const BufferType* buffer_colocated : item->colocations) {
if (result.insert(buffer_colocated).second) {
worklist.push_back(&buffer_intervals_.at(buffer_colocated));
}
}
}
return result;
}
template <typename BufferType>
void GlobalDecreasingSizeBestFitHeap<BufferType>::Free(const BufferType* buffer,
int64_t size) {
if (size == 0) {
return;
}
BufferInterval& buffer_interval = FindOrDie(buffer_intervals_, buffer);
CHECK_EQ(buffer_interval.buffer, buffer);
CHECK_EQ(buffer_interval.size, size);
CHECK_EQ(buffer_interval.end, -1);
if (buffer_interval.end != -1) {
return;
}
buffer_interval.end = current_time_;
++current_time_;
}
using Chunk = HeapSimulator::Chunk;
void BufferIntervalTree::Add(int64_t start, int64_t end, const Chunk& chunk) {
node_storage_.emplace_back(BufferIntervalTreeNode{
start, end, end, chunk,
nullptr, nullptr, nullptr});
if (root_ == nullptr) {
root_ = &node_storage_.back();
return;
}
BufferIntervalTreeNode* parent = root_;
while (true) {
parent->subtree_end = std::max(parent->subtree_end, end);
if (parent->start > start) {
if (parent->left == nullptr) {
parent->left = &node_storage_.back();
node_storage_.back().parent = parent;
return;
}
parent = parent->left;
} else {
if (parent->right == nullptr) {
parent->right = &node_storage_.back();
node_storage_.back().parent = parent;
return;
}
parent = parent->right;
}
}
}
bool BufferIntervalTree::Remove(int64_t start, int64_t end,
const Chunk& chunk) {
BufferIntervalTreeNode* to_delete = root_;
while (to_delete != nullptr) {
if (to_delete->start == start && to_delete->end == end &&
to_delete->chunk.offset == chunk.offset) {
break;
}
if (start < to_delete->start) {
to_delete = to_delete->left;
} else {
to_delete = to_delete->right;
}
}
if (to_delete == nullptr) {
return false;
}
std::function<void(BufferIntervalTreeNode*)> fix_up =
[&](BufferIntervalTreeNode* node) {
if (node == nullptr) {
return;
}
node->subtree_end = node->end;
if (node->left) {
node->subtree_end =
std::max(node->subtree_end, node->left->subtree_end);
}
if (node->right) {
node->subtree_end =
std::max(node->subtree_end, node->right->subtree_end);
}
fix_up(node->parent);
};
if (to_delete->right == nullptr) {
if (root_ == to_delete) {
root_ = to_delete->left;
return true;
}
if (to_delete == to_delete->parent->left) {
to_delete->parent->left = to_delete->left;
}
if (to_delete == to_delete->parent->right) {
to_delete->parent->right = to_delete->left;
}
if (to_delete->left) {
to_delete->left->parent = to_delete->parent;
}
fix_up(to_delete);
} else {
BufferIntervalTreeNode* to_promote = to_delete->right;
while (to_promote->left != nullptr) {
to_promote = to_promote->left;
}
to_delete->start = to_promote->start;
to_delete->end = to_promote->end;
to_delete->subtree_end = to_promote->subtree_end;
to_delete->chunk = to_promote->chunk;
auto to_promote_parent = to_promote->parent;
if (to_promote_parent->left == to_promote) {
to_promote_parent->left = to_promote->right;
} else {
to_promote_parent->right = to_promote->right;
}
if (to_promote->right) {
to_promote->right->parent = to_promote_parent;
}
fix_up(to_promote_parent);
}
return true;
}
std::vector<Chunk> BufferIntervalTree::ChunksOverlappingInTime(
int64_t start, int64_t end) const {
std::vector<Chunk> result;
for (const BufferIntervalTreeNode* node :
NodesOverlappingInTime(start, end)) {
result.push_back(node->chunk);
}
return result;
}
std::vector<const BufferIntervalTreeNode*>
BufferIntervalTree::NodesOverlappingInTime(int64_t start, int64_t end) const {
std::vector<const BufferIntervalTreeNode*> result;
if (root_ == nullptr) {
return result;
}
std::vector<const BufferIntervalTreeNode*> visiting_stack;
visiting_stack.push_back(root_);
while (!visiting_stack.empty()) {
const BufferIntervalTreeNode* top = visiting_stack.back();
visiting_stack.pop_back();
if (start > top->subtree_end) {
continue;
}
if (top->left != nullptr) {
visiting_stack.push_back(top->left);
}
if (top->start <= end && top->end >= start) {
result.push_back(top);
}
if (end < top->start) {
continue;
}
if (top->right != nullptr) {
visiting_stack.push_back(top->right);
}
}
return result;
}
std::string BufferIntervalTree::NodesOverlappingInTimeToAsciiArt(
int64_t start, int64_t end, int64_t group_size) const {
std::vector<const BufferIntervalTreeNode*> nodes =
NodesOverlappingInTime(start, end);
if (nodes.empty()) {
return "No nodes overlapping in time. Memory is free!";
}
auto [memory_block_size, end_of_last_occupied_chunk] =
GetAsciiMemoryMapParameters(nodes);
CHECK_GE(end_of_last_occupied_chunk, 0);
CHECK_NE(memory_block_size, 0);
int64_t total_time = end - start + 1;
int64_t num_memory_blocks = end_of_last_occupied_chunk / memory_block_size;
if (total_time > kMaxMemoryMapDimensionSize ||
num_memory_blocks > kMaxMemoryMapDimensionSize) {
std::string output;
absl::StrAppend(
&output,
"\nCannot print memory usage to ASCII art. Printing nodes instead!\n\n",
BufferIntervalTreeNodesToString(nodes));
return output;
}
std::vector<std::vector<bool>> memory_map =
GetMemoryMap(start, end, memory_block_size, num_memory_blocks, nodes);
return MemoryMapToString(start, end, memory_block_size, group_size,
memory_map);
}
std::vector<int64_t> BufferIntervalTree::MemoryUsedInInterval(
int64_t start, int64_t end) const {
int64_t total_time = end - start + 1;
CHECK_GE(total_time, 0);
std::vector<const BufferIntervalTreeNode*> nodes =
NodesOverlappingInTime(start, end);
std::vector<int64_t> memory_used_in_interval(total_time, 0);
for (const BufferIntervalTreeNode* node : nodes) {
int64_t node_start = std::max(node->start, start);
int64_t node_end = std::min(node->end, end);
for (int64_t time = node_start; time <= node_end; ++time) {
memory_used_in_interval[time - start] += node->chunk.size;
}
}
return memory_used_in_interval;
}
int64_t BufferIntervalTree::HeapSizeInInterval(const int64_t start,
const int64_t end) const {
CHECK_LE(start, end);
std::vector<const BufferIntervalTreeNode*> nodes =
NodesOverlappingInTime(start, end);
int64_t max_memory_used = 0;
for (const BufferIntervalTreeNode* node : nodes) {
max_memory_used = std::max(max_memory_used, node->chunk.chunk_end());
}
return max_memory_used;
}
template <typename BufferType>
std::string
GlobalDecreasingSizeBestFitHeap<BufferType>::BufferInterval::ToString() const {
return absl::StrCat("{ ",
"buffer: {", (buffer ? buffer->ToString() : "null"),
"}, ",
"size: ", size, ", ",
"start: ", start, ", ",
"end: ", end, ", ",
"num_colocations: ", colocations.size(), ", ",
"need_allocation: ", need_allocation,
" }");
}
template <typename BufferType>
const
typename GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedBufferInterval
GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedBufferInterval::
CreateConstInterval(const BufferInterval& full_buffer_interval) {
return SlicedBufferInterval(full_buffer_interval);
}
template <typename BufferType>
typename GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedBufferInterval
GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedBufferInterval::
CreateMutableInterval(BufferInterval& full_buffer_interval) {
return SlicedBufferInterval(full_buffer_interval, &full_buffer_interval);
}
template <typename BufferType>
void GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedBufferInterval::Slice(
absl::Span<const int64_t> slice_sizes_sorted_by_offset) {
if (slice_sizes_sorted_by_offset.empty()) {
slice_sizes_sorted_by_offset_ = {full_buffer_interval_.size};
make_free_chunks_intervals_ = {full_buffer_interval_};
return;
}
const int64_t min_slice_size =
*absl::c_min_element(slice_sizes_sorted_by_offset);
slice_sizes_sorted_by_offset_ = std::vector<int64_t>(
slice_sizes_sorted_by_offset.begin(), slice_sizes_sorted_by_offset.end());
size_t num_slices = slice_sizes_sorted_by_offset.size();
make_free_chunks_intervals_.clear();
make_free_chunks_intervals_.reserve(num_slices);
int64_t size_total = 0;
absl::InlinedVector<const BufferType*, 2> empty_colocations;
for (int i = 0; i < num_slices; ++i) {
int64_t new_size = slice_sizes_sorted_by_offset[i];
size_total += new_size;
make_free_chunks_intervals_.push_back(BufferInterval{
full_buffer_interval_.buffer,
(i == num_slices - 1 ? full_buffer_interval_.size : min_slice_size),
0,
full_buffer_interval_.end,
(i == num_slices - 1 ? full_buffer_interval_.colocations
: empty_colocations),
full_buffer_interval_.need_allocation});
}
CHECK_EQ(size_total, full_buffer_interval_.size)
<< " slice sizes: {" << absl::StrJoin(slice_sizes_sorted_by_offset, ", ")
<< "};";
}
template <typename BufferType>
void GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedBufferInterval::
UpdateExclusiveSliceStartTimes(
const std::vector<int64_t>& exclusive_start_times) {
std::vector<int64_t> inclusive_start_times = exclusive_start_times;
absl::c_for_each(inclusive_start_times,
[](int64_t& t) { t = ExclusiveToInclusiveStartTime(t); });
UpdateInclusiveSliceStartTimes(inclusive_start_times);
}
template <typename BufferType>
void GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedBufferInterval::
UpdateInclusiveSliceStartTimes(
const std::vector<int64_t>& inclusive_start_times) {
CHECK_EQ(inclusive_start_times.size(), num_slices());
CHECK(mutable_full_buffer_interval_ != nullptr);
mutable_full_buffer_interval_->start = inclusive_start_times.front();
for (size_t slice_time = 0; slice_time < num_slices(); ++slice_time) {
make_free_chunks_intervals_[slice_time].start =
inclusive_start_times[slice_time];
if (slice_time != num_slices() - 1) {
make_free_chunks_intervals_[slice_time].end =
ExclusiveToInclusiveEndTime(inclusive_start_times[slice_time + 1]);
} else {
make_free_chunks_intervals_[slice_time].end = full_buffer_interval_.end;
}
}
}
template <typename BufferType>
void GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedBufferInterval::UpdateEndTime(int64_t end_time) {
CHECK(mutable_full_buffer_interval_ != nullptr);
mutable_full_buffer_interval_->end = end_time;
make_free_chunks_intervals_.back().end = end_time;
}
template <typename BufferType>
const typename GlobalDecreasingSizeBestFitHeap<BufferType>::BufferInterval&
GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedBufferInterval::full_buffer_interval() const {
return full_buffer_interval_;
}
template <typename BufferType>
const std::vector<int64_t>& GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedBufferInterval::SliceSizesSortedByOffset() const {
return slice_sizes_sorted_by_offset_;
}
template <typename BufferType>
std::vector<int64_t> GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedBufferInterval::inclusive_start_times() const {
std::vector<int64_t> inclusive_start_times;
inclusive_start_times.reserve(num_slices());
for (const BufferInterval& buffer_interval : make_free_chunks_intervals_) {
inclusive_start_times.push_back(buffer_interval.start);
}
return inclusive_start_times;
}
template <typename BufferType>
const typename GlobalDecreasingSizeBestFitHeap<BufferType>::BufferInterval&
GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedBufferInterval::
IntervalForMakeFreeChunks(int64_t slice_time) const {
CHECK_LT(slice_time, num_slices());
return make_free_chunks_intervals_[slice_time];
}
template <typename BufferType>
GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedBufferInterval::
SlicedBufferInterval(const BufferInterval& full_buffer_interval,
BufferInterval* mutable_full_buffer_interval)
: full_buffer_interval_(full_buffer_interval),
mutable_full_buffer_interval_(mutable_full_buffer_interval) {
Slice({});
}
template <typename BufferType>
std::string GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedBufferInterval::ToString() const {
return absl::StrCat(
"{ full_buffer_interval: ", full_buffer_interval_.ToString(), ", ",
"MakeFreeChunks intervals: { ",
absl::StrJoin(make_free_chunks_intervals_, ", ",
[](std::string* out, const BufferInterval& interval) {
absl::StrAppend(out, interval.ToString());
}),
" }, ", "slize_sizes_sorted_by_offsets: { ",
absl::StrJoin(slice_sizes_sorted_by_offset_, ", "), " } }");
}
namespace {
class SliceTimePermutationValidator {
public:
explicit SliceTimePermutationValidator(
const SlicedAllocationData* original_slices)
: original_num_slices_(original_slices ? original_slices->num_slices()
: 0) {
if (original_num_slices_ <= 0) {
return;
}
slice_time_to_inclusive_schedule_time_ =
original_slices->SortedInclusiveStartTimes();
absl::c_sort(slice_time_to_inclusive_schedule_time_);
original_slice_sizes_and_start_times_pairwise_sorted_.reserve(
original_num_slices_);
for (const AllocatedSlice& slice :
original_slices->slices_sorted_by_offset) {
original_slice_sizes_and_start_times_pairwise_sorted_.push_back(
std::make_pair(slice.size, slice.inclusive_start_time));
}
absl::c_sort(original_slice_sizes_and_start_times_pairwise_sorted_);
sizes_sorted_by_offset_ = original_slices->SizesSortedByOffset();
}
bool IsValid(absl::Span<const int64_t> permutation) {
if (original_num_slices_ <= 0) {
return true;
}
std::vector<std::pair<int64_t, int64_t>>
proposed_slice_sizes_and_start_times_pairwise_sorted;
proposed_slice_sizes_and_start_times_pairwise_sorted.reserve(
original_num_slices_);
CHECK_EQ(sizes_sorted_by_offset_.size(), original_num_slices_);
CHECK_EQ(permutation.size(), original_num_slices_);
for (int i = 0; i < original_num_slices_; ++i) {
proposed_slice_sizes_and_start_times_pairwise_sorted.push_back(
std::make_pair(
sizes_sorted_by_offset_[i],
slice_time_to_inclusive_schedule_time_[permutation[i]]));
}
absl::c_sort(proposed_slice_sizes_and_start_times_pairwise_sorted);
bool allowed = (original_slice_sizes_and_start_times_pairwise_sorted_ ==
proposed_slice_sizes_and_start_times_pairwise_sorted);
VLOG(3) << [&]() {
auto export_pair = [](std::string* out,
const std::pair<int64_t, int64_t>& p) {
absl::StrAppend(out, "<", p.first, ", ", p.second, ">");
};
return absl::StrCat(
"Slice permutation ", (allowed ? "allowed" : "disallowed"),
". Original slice <size, start_time> mapping: ",
absl::StrJoin(original_slice_sizes_and_start_times_pairwise_sorted_,
", ", export_pair),
". Proposed mapping: ",
absl::StrJoin(proposed_slice_sizes_and_start_times_pairwise_sorted,
", ", export_pair),
".");
}();
return allowed;
}
private:
int64_t original_num_slices_;
std::vector<int64_t> slice_time_to_inclusive_schedule_time_;
std::vector<std::pair<int64_t, int64_t>>
original_slice_sizes_and_start_times_pairwise_sorted_;
std::vector<int64_t> sizes_sorted_by_offset_;
};
class ObservedPermutationManager {
public:
explicit ObservedPermutationManager(
absl::Span<const int64_t> inclusive_start_times) {
slice_time_to_inclusive_start_time_ = std::vector<int64_t>(
inclusive_start_times.begin(), inclusive_start_times.end());
absl::c_sort(slice_time_to_inclusive_start_time_);
}
bool Insert(absl::Span<const int64_t> permutation) {
std::vector<int64_t> permutation_inclusive_start_times;
permutation_inclusive_start_times.reserve(permutation.size());
for (int64_t slice_time : permutation) {
permutation_inclusive_start_times.push_back(
slice_time_to_inclusive_start_time_[slice_time]);
}
return observed_inclusive_start_time_permutation_
.insert(permutation_inclusive_start_times)
.second;
}
void Clear() { observed_inclusive_start_time_permutation_.clear(); }
protected:
std::vector<int64_t> slice_time_to_inclusive_start_time_;
absl::flat_hash_set<std::vector<int64_t>>
observed_inclusive_start_time_permutation_;
};
class SliceTimeAllPermutationIterator : public SliceTimePermutationIterator {
public:
explicit SliceTimeAllPermutationIterator(int64_t num_slices)
: num_slices_(num_slices), permutation_(num_slices, 0) {}
~SliceTimeAllPermutationIterator() override = default;
void Begin() override {
done_ = (num_slices_ <= 0);
for (int64_t i = 0; i < num_slices_; ++i) {
permutation_[i] = i;
}
}
bool Done() const override { return done_; }
void Next() override {
if (Done()) {
return;
}
done_ = !absl::c_next_permutation(permutation_);
}
absl::Span<const int64_t> Get() const override { return permutation_; }
private:
SliceTimeAllPermutationIterator() = default;
int64_t num_slices_;
bool done_ = true;
std::vector<int64_t> permutation_;
};
class SliceTimePreferredPermutationIterator
: public SliceTimePermutationIterator {
public:
SliceTimePreferredPermutationIterator(
int64_t num_slices,
const SlicedAllocationData* original_sliced_allocation)
: num_slices_(num_slices),
fixed_permutation_values_(num_slices, false),
permutation_(num_slices, 0) {
if (!original_sliced_allocation) {
slice_times_available_for_permutation_.reserve(num_slices_);
for (int64_t slice_time = 0; slice_time < num_slices_; ++slice_time) {
slice_times_available_for_permutation_.push_back(slice_time);
}
return;
}
absl::flat_hash_map<const AllocatedSlice*, int64_t>
slice_to_slice_time_map =
BuildSliceToSliceTimeMap(original_sliced_allocation);
const AllocatedSlice* first_slice = nullptr;
if (!original_sliced_allocation->slices_sorted_by_offset.empty()) {
first_slice =
&original_sliced_allocation->slices_sorted_by_offset.front();
}
for (int offset_index = 0; offset_index < num_slices_; ++offset_index) {
CHECK(first_slice);
const AllocatedSlice& slice =
original_sliced_allocation->slices_sorted_by_offset[offset_index];
if (slice.size != first_slice->size) {
fixed_permutation_values_[offset_index] = true;
permutation_[offset_index] = slice_to_slice_time_map[&slice];
continue;
}
slice_times_available_for_permutation_.push_back(
slice_to_slice_time_map[&slice]);
}
absl::c_sort(slice_times_available_for_permutation_);
}
~SliceTimePreferredPermutationIterator() override = default;
void Begin() override {
permutation_type_ = NextPermutationType(PermutationType::kUninitialized);
SetUpPermutationForCurrentType();
}
bool Done() const override {
return permutation_type_ == PermutationType::kDone;
}
void Next() override {
permutation_type_ = NextPermutationType(permutation_type_);
SetUpPermutationForCurrentType();
}
absl::Span<const int64_t> Get() const override { return permutation_; }
private:
enum class PermutationType {
kUninitialized,
kSmallerOffsetSmallerSliceTime,
kSmallerOffsetLargerSliceTime,
kDistributeSmallSliceTimesAroundMiddleOffset,
kDone,
};
SliceTimePreferredPermutationIterator() = default;
PermutationType NextPermutationType(PermutationType ty) {
switch (ty) {
case PermutationType::kUninitialized:
if (num_slices_ <= 0) {
return PermutationType::kDone;
}
return PermutationType::kSmallerOffsetSmallerSliceTime;
case PermutationType::kSmallerOffsetSmallerSliceTime:
if (num_slices_ <= 1) {
return PermutationType::kDone;
}
return PermutationType::kSmallerOffsetLargerSliceTime;
case PermutationType::kSmallerOffsetLargerSliceTime:
if (num_slices_ <= 2) {
return PermutationType::kDone;
}
return PermutationType::kDistributeSmallSliceTimesAroundMiddleOffset;
case PermutationType::kDistributeSmallSliceTimesAroundMiddleOffset:
case PermutationType::kDone:
return PermutationType::kDone;
}
}
absl::flat_hash_map<const AllocatedSlice*, int64_t> BuildSliceToSliceTimeMap(
const SlicedAllocationData* original_sliced_allocation) {
CHECK(original_sliced_allocation);
std::vector<const AllocatedSlice*> slice_time_to_slice;
slice_time_to_slice.reserve(num_slices_);
for (const AllocatedSlice& slice :
original_sliced_allocation->slices_sorted_by_offset) {
slice_time_to_slice.push_back(&slice);
}
absl::c_sort(slice_time_to_slice, [](const AllocatedSlice* lhs,
const AllocatedSlice* rhs) {
return std::make_tuple(lhs->inclusive_start_time, lhs->offset) <
std::make_tuple(rhs->inclusive_start_time, rhs->offset);
});
absl::flat_hash_map<const AllocatedSlice*, int64_t> map;
for (int slice_time = 0; slice_time < slice_time_to_slice.size();
++slice_time) {
map[slice_time_to_slice[slice_time]] = slice_time;
}
return map;
}
void SetUpPermutationForCurrentType() {
CHECK(permutation_type_ != PermutationType::kUninitialized);
if (Done()) {
return;
}
int permutation_index = NextAvailablePermutationIndex(-1);
for (int i = slice_times_available_for_permutation_.size() - 1; i >= 0;
--i) {
if (permutation_type_ == PermutationType::kSmallerOffsetLargerSliceTime ||
(permutation_type_ ==
PermutationType::kDistributeSmallSliceTimesAroundMiddleOffset &&
IsOdd(i))) {
CHECK_LT(permutation_index, permutation_.size());
permutation_[permutation_index] =
slice_times_available_for_permutation_[i];
permutation_index = NextAvailablePermutationIndex(permutation_index);
}
}
for (int i = 0; i < slice_times_available_for_permutation_.size(); ++i) {
if (permutation_type_ ==
PermutationType::kSmallerOffsetSmallerSliceTime ||
(permutation_type_ ==
PermutationType::kDistributeSmallSliceTimesAroundMiddleOffset &&
IsEven(i))) {
CHECK_LT(permutation_index, permutation_.size());
permutation_[permutation_index] =
slice_times_available_for_permutation_[i];
permutation_index = NextAvailablePermutationIndex(permutation_index);
}
}
CHECK_EQ(permutation_index, permutation_.size());
}
int NextAvailablePermutationIndex(int permutation_index) {
do {
++permutation_index;
} while (permutation_index < permutation_.size() &&
fixed_permutation_values_[permutation_index]);
return permutation_index;
}
int64_t num_slices_;
std::vector<bool> fixed_permutation_values_;
std::vector<int64_t> slice_times_available_for_permutation_;
PermutationType permutation_type_ = PermutationType::kUninitialized;
std::vector<int64_t> permutation_;
};
class ComposedSliceTimePermutationIterator
: public SliceTimePermutationIterator {
public:
ComposedSliceTimePermutationIterator(
SliceTimePermutationValidator validator,
ObservedPermutationManager seen_manager,
std::unique_ptr<SliceTimePermutationIterator> base_iterator)
: validator_(std::move(validator)),
seen_(std::move(seen_manager)),
base_iterator_(std::move(base_iterator)) {}
~ComposedSliceTimePermutationIterator() override = default;
void Begin() override { NextImpl(true); }
bool Done() const override { return base_iterator_->Done(); }
void Next() override { NextImpl(false); }
absl::Span<const int64_t> Get() const override {
return base_iterator_->Get();
}
private:
void NextImpl(bool initialize) {
if (initialize) {
seen_.Clear();
base_iterator_->Begin();
}
if (Done()) {
return;
}
if (!initialize) {
base_iterator_->Next();
}
while (!Done() && (!validator_.IsValid(Get()) || !seen_.Insert(Get()))) {
base_iterator_->Next();
}
}
SliceTimePermutationValidator validator_;
ObservedPermutationManager seen_;
std::unique_ptr<SliceTimePermutationIterator> base_iterator_;
};
}
std::unique_ptr<SliceTimePermutationIterator>
SliceTimePermutationIterator::CreateForNewAllocation(
Ty ty, absl::Span<const int64_t> inclusive_slice_start_times) {
switch (ty) {
case Ty::kAll:
return std::make_unique<ComposedSliceTimePermutationIterator>(
SliceTimePermutationValidator(nullptr),
ObservedPermutationManager(inclusive_slice_start_times),
std::make_unique<SliceTimeAllPermutationIterator>(
inclusive_slice_start_times.size()));
case Ty::kPreferred:
return std::make_unique<ComposedSliceTimePermutationIterator>(
SliceTimePermutationValidator(nullptr),
ObservedPermutationManager(inclusive_slice_start_times),
std::make_unique<SliceTimePreferredPermutationIterator>(
inclusive_slice_start_times.size(),
nullptr));
}
}
std::unique_ptr<SliceTimePermutationIterator>
SliceTimePermutationIterator::CreateForRepack(
Ty ty, const SlicedAllocationData* original_sliced_allocation) {
int64_t num_slices = 1;
if (original_sliced_allocation) {
num_slices = original_sliced_allocation->num_slices();
}
std::vector<int64_t> inclusive_start_times;
if (original_sliced_allocation) {
inclusive_start_times =
original_sliced_allocation->SortedInclusiveStartTimes();
} else {
inclusive_start_times.push_back(0);
}
switch (ty) {
case Ty::kAll:
return std::make_unique<ComposedSliceTimePermutationIterator>(
SliceTimePermutationValidator(original_sliced_allocation),
ObservedPermutationManager(inclusive_start_times),
std::make_unique<SliceTimeAllPermutationIterator>(num_slices));
case Ty::kPreferred:
return std::make_unique<ComposedSliceTimePermutationIterator>(
SliceTimePermutationValidator(original_sliced_allocation),
ObservedPermutationManager(inclusive_start_times),
std::make_unique<SliceTimePreferredPermutationIterator>(
num_slices, original_sliced_allocation));
}
}
template <typename BufferType>
std::string GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedAllocationFinder::FreeChunkPiece::ToString() const {
return absl::StrCat("{ dimensions: ", dimensions.ToString(), ", free at: t",
earliest_free_slice_time, " }");
}
template <typename BufferType>
std::string GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedAllocationFinder::FreeChunkRoot::ToString() const {
return absl::StrCat(
"{ chunk: ", chunk.ToString(), ", pieces: { ",
absl::StrJoin(
pieces.rbegin(), pieces.rend(), ", ",
[](std::string* out, const auto& offset_sliced_free_chunk_pair) {
absl::StrAppend(out,
offset_sliced_free_chunk_pair.second.ToString());
}),
" } }");
}
template <typename BufferType>
GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedAllocationFinder::
FreeChunkRoot::FreeChunkRoot(const Chunk& free_chunk,
int64_t free_chunk_slice_time)
: chunk(free_chunk),
pieces({{free_chunk.offset, {free_chunk_slice_time, free_chunk}}}) {}
template <typename BufferType>
void GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedAllocationFinder::
FreeChunkRoot::Update(const Chunk& free_chunk,
int64_t free_chunk_slice_time) {
VLOG(4) << "Updating root " << chunk.ToString() << " with "
<< free_chunk.ToString() << ", free at t" << free_chunk_slice_time;
std::vector<FreeChunkPiece> new_pieces;
for (auto it = pieces.lower_bound(free_chunk.chunk_end() - 1);
it != pieces.end() &&
it->second.dimensions.chunk_end() >= free_chunk.offset;) {
const FreeChunkPiece& piece = it->second;
if (!free_chunk.OverlapsWith(piece.dimensions) ||
free_chunk_slice_time != piece.earliest_free_slice_time - 1) {
++it;
continue;
}
if (free_chunk.offset > piece.dimensions.offset) {
FreeChunkPiece new_piece0(
{piece.earliest_free_slice_time,
Chunk::FromOffsetEnd(
piece.dimensions.offset,
std::min(free_chunk.offset, piece.dimensions.chunk_end()))});
new_pieces.push_back(new_piece0);
}
FreeChunkPiece new_piece1(
{free_chunk_slice_time,
Chunk::FromOffsetEnd(
std::max(free_chunk.offset, piece.dimensions.offset),
std::min(free_chunk.chunk_end(), piece.dimensions.chunk_end()))});
new_pieces.push_back(new_piece1);
if (free_chunk.chunk_end() < piece.dimensions.chunk_end()) {
FreeChunkPiece new_piece2(
{piece.earliest_free_slice_time,
Chunk::FromOffsetEnd(free_chunk.chunk_end(),
piece.dimensions.chunk_end())});
new_pieces.push_back(new_piece2);
}
it = pieces.erase(it);
}
for (auto it = new_pieces.begin(); it != new_pieces.end(); ++it) {
pieces.insert({it->dimensions.offset, *it});
}
VLOG(4) << "Root after update: " << ToString();
}
namespace {
constexpr int64_t kMaxRenderOffset = 200;
constexpr int64_t kMaxRenderSliceTime = 9;
std::string RenderTimeByFreeChunks(
const std::vector<std::vector<Chunk>>& time_by_chunks) {
if (time_by_chunks.size() - 1 > kMaxRenderSliceTime) {
return "too many time slices to render";
}
std::vector<std::string> time_by_memory_units;
for (int i = 0; i < time_by_chunks.size(); ++i) {
time_by_memory_units.push_back(std::string(kMaxRenderOffset + 1, 'X'));
for (const Chunk& chunk : time_by_chunks[i]) {
if (chunk.chunk_end() > kMaxRenderOffset) {
return "largest offset is too large to render";
}
for (int j = chunk.offset; j < chunk.chunk_end(); ++j) {
time_by_memory_units[i][j] = ' ';
}
}
}
std::vector<std::string> lines;
lines.push_back(" ^");
for (int i = time_by_memory_units.size() - 1; i >= 0; --i) {
lines.push_back(absl::StrCat("t", i, " |", time_by_memory_units[i]));
}
std::string yaxis = " +";
for (int i = 0; i < kMaxRenderOffset + 1; ++i) {
if (i % 10 == 0) {
yaxis += "!";
continue;
}
if (i % 5 == 0) {
yaxis += "|";
continue;
}
yaxis += "-";
}
lines.push_back(absl::StrCat(yaxis, ">"));
lines.push_back(" space");
return absl::StrJoin(lines, "\n");
}
}
template <typename BufferType>
GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedAllocationFinder::
SlicedAllocationFinder(
absl::Span<const FreeChunks> free_chunks_per_slice_time,
std::vector<int64_t> sorted_slice_sizes, int64_t max_colocation_size,
int64_t preferred_offset, int64_t alignment,
std::unique_ptr<SliceTimePermutationIterator>
slice_time_permutation_iterator,
absl::AnyInvocable<bool(int64_t) const> is_offset_allowed)
: sorted_slice_sizes_(std::move(sorted_slice_sizes)),
slice_size_sum_(std::accumulate(sorted_slice_sizes_.begin(),
sorted_slice_sizes_.end(),
static_cast<int64_t>(0))),
max_colocation_size_(max_colocation_size),
preferred_offset_(preferred_offset),
alignment_(alignment),
slice_time_permutation_iterator_(
std::move(slice_time_permutation_iterator)),
is_offset_allowed_(std::move(is_offset_allowed)) {
CHECK_EQ(sorted_slice_sizes_.size(), free_chunks_per_slice_time.size())
<< "We expect a data structure explaining the free chunks at each slice "
"time.";
CHECK(!free_chunks_per_slice_time.empty())
<< "Even an unsliced allocation is expected to have a list of free "
"chunks at slice time t0.";
if (VLOG_IS_ON(1)) {
std::vector<std::vector<Chunk>> time_by_chunks;
for (int64_t i = 0; i < free_chunks_per_slice_time.size(); ++i) {
std::vector<Chunk> chunks;
for (const auto& free_chunk : free_chunks_per_slice_time[i]) {
chunks.push_back(
Chunk::FromOffsetEnd(free_chunk.first, free_chunk.second));
}
time_by_chunks.push_back(chunks);
}
LOG(INFO) << "Initial free space:\n"
<< RenderTimeByFreeChunks(time_by_chunks);
}
if (max_colocation_size_ < slice_size_sum_) {
max_colocation_size_ = slice_size_sum_;
}
for (const std::pair<const int64_t, int64_t>& free_chunk_pair :
free_chunks_per_slice_time.back()) {
Chunk free_chunk =
Chunk::FromOffsetEnd(free_chunk_pair.first, free_chunk_pair.second);
if (free_chunk.size == 0) {
continue;
}
CHECK_GT(free_chunk.size, 0);
free_chunks_.insert(
{free_chunk_pair.first, FreeChunkRoot(free_chunk, LatestSliceTime())});
}
for (int64_t free_chunk_slice_time = LatestSliceTime() - 1;
free_chunk_slice_time >= EarliestSliceTime(); --free_chunk_slice_time) {
auto it = free_chunks_.begin();
for (const std::pair<const int64_t, int64_t>& free_chunk_pair :
free_chunks_per_slice_time[free_chunk_slice_time]) {
Chunk free_chunk =
Chunk::FromOffsetEnd(free_chunk_pair.first, free_chunk_pair.second);
if (free_chunk.size == 0) {
continue;
}
CHECK_GT(free_chunk.size, 0);
for (; it != free_chunks_.end() &&
free_chunk.chunk_end() - 1 < it->second.chunk.offset;
++it) {
}
if (it == free_chunks_.end()) {
break;
}
auto previous_it = it;
for (; it != free_chunks_.end() &&
it->second.chunk.OverlapsWith(free_chunk);
previous_it = it, ++it) {
FreeChunkRoot& root = it->second;
root.Update(free_chunk, free_chunk_slice_time);
}
it = previous_it;
}
}
VLOG(2) << "Initial candidates:\n" << FreeChunksToAsciiArt();
VLOG(2) << "SlicedAllocationFinder:\n" << ToString();
}
template <typename BufferType>
std::string GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedAllocationFinder::FreeChunksToAsciiArt() const {
auto it = free_chunks_.begin();
if (it == free_chunks_.end()) {
return "no candidate data";
}
int64_t final_offset = it->second.chunk.chunk_end();
if (LatestSliceTime() > kMaxRenderSliceTime ||
final_offset > kMaxRenderOffset) {
return "candidates too large to render";
}
std::vector<std::vector<Chunk>> time_by_chunks;
for (int64_t i = EarliestSliceTime(); i <= LatestSliceTime(); ++i) {
time_by_chunks.push_back({});
}
for (const std::pair<const int64_t, FreeChunkRoot>& offset_root_pair :
free_chunks_) {
for (const std::pair<const int64_t, FreeChunkPiece>& offset_piece_pair :
offset_root_pair.second.pieces) {
for (int64_t slice_time =
offset_piece_pair.second.earliest_free_slice_time;
slice_time <= LatestSliceTime(); ++slice_time) {
time_by_chunks[slice_time].push_back(
offset_piece_pair.second.dimensions);
}
}
}
return RenderTimeByFreeChunks(time_by_chunks);
}
template <typename BufferType>
std::string GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedAllocationFinder::ToString() const {
std::vector<std::string> lines;
lines.push_back(absl::StrCat("slices: { ",
absl::StrJoin(sorted_slice_sizes_, ", "), " }"));
lines.push_back(absl::StrCat("max_colocation_size: ", max_colocation_size_));
lines.push_back(absl::StrCat("preferred_offset: ", preferred_offset_));
lines.push_back("free chunks:");
int i = 0;
for (auto it = free_chunks_.rbegin(); it != free_chunks_.rend(); ++it) {
lines.push_back(absl::StrCat(" chunk ", i, ": ", it->second.ToString()));
++i;
}
return absl::StrJoin(lines, "\n");
}
template <typename BufferType>
typename GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedAllocationFinder::ChunksSortedBySliceTime
GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedAllocationFinder::Find()
const {
if (preferred_offset_ >= 0) {
ChunksSortedBySliceTime chunks = FindForOffset(preferred_offset_);
if (!chunks.empty()) {
VLOG(1) << "SlicedAllocationFinder found chunks: " << "{ "
<< absl::StrJoin(chunks, ", ", absl::StreamFormatter()) << " }";
return chunks;
}
}
std::vector<const FreeChunkRoot*> root_heap;
for (auto it = free_chunks_.rbegin(); it != free_chunks_.rend(); ++it) {
root_heap.push_back(&it->second);
}
auto heap_cmp = [](const FreeChunkRoot* lhs, const FreeChunkRoot* rhs) {
if (lhs->chunk.size != rhs->chunk.size) {
return lhs->chunk.size > rhs->chunk.size;
}
return lhs->chunk.offset > rhs->chunk.offset;
};
auto heap_next = [&]() -> const FreeChunkRoot* {
if (root_heap.empty()) {
return nullptr;
}
absl::c_pop_heap(root_heap, heap_cmp);
const FreeChunkRoot* root = root_heap.back();
root_heap.pop_back();
return root;
};
absl::c_make_heap(root_heap, heap_cmp);
for (const FreeChunkRoot* root = heap_next(); root != nullptr;
root = heap_next()) {
VLOG(3) << "SlicedAllocationFinder::Find() searching " << root->ToString();
ChunksSortedBySliceTime chunks = FindInRoot(*root);
if (!chunks.empty()) {
VLOG(1) << "SlicedAllocationFinder found chunks: " << "{ "
<< absl::StrJoin(chunks, ", ", absl::StreamFormatter()) << " }";
return chunks;
}
}
LOG(ERROR) << "We did not find a place for our sliced allocation. This "
"should not happen because MSA operates on an infinitely "
"sized heap.";
return {};
}
template <typename BufferType>
typename GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedAllocationFinder::ChunksSortedBySliceTime
GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedAllocationFinder::FindForOffset(int64_t offset) const {
VLOG(3) << "SlicedAllocationFinder::FindForOffset() searching offset "
<< offset;
auto it = free_chunks_.lower_bound(offset);
if (it != free_chunks_.end()) {
const FreeChunkRoot* root = &it->second;
ChunksSortedBySliceTime chunks = FindInRoot(*root, offset);
if (!chunks.empty()) {
VLOG(3) << "SlicedAllocationFinder found chunks at " << offset << ": "
<< "{ " << absl::StrJoin(chunks, ", ", absl::StreamFormatter())
<< " }";
return chunks;
}
}
return {};
}
template <typename BufferType>
absl::Status GlobalDecreasingSizeBestFitHeap<BufferType>::
SlicedAllocationFinder::DoesPermutationFit(
absl::Span<const int64_t> permutation_of_slice_times,
const FreeChunkRoot& root, int64_t offset) const {
absl::Status result =
DoesPermutationFitImpl(permutation_of_slice_times, root, offset);
VLOG(3) << "SlicedAllocationFinder::DoesPermutationFit\n"
<< " permutation of slice times: [ "
<< absl::StrJoin(permutation_of_slice_times, ",") << " ]\n"
<< " offset: " << offset << "\n"
<< " root: " << root.ToString() << "\n"
<< " -> " << result;
return result;
}
template <typename BufferType>
absl::Status GlobalDecreasingSizeBestFitHeap<BufferType>::
SlicedAllocationFinder::DoesPermutationFitImpl(
absl::Span<const int64_t> permutation_of_slice_times,
const FreeChunkRoot& root, int64_t offset) const {
if (permutation_of_slice_times.size() != sorted_slice_sizes_.size()) {
return InvalidArgumentStrCat(
sorted_slice_sizes_.size(), " slices times expected in permutation. ",
permutation_of_slice_times.size(), " specified.");
}
if (offset >= root.chunk.chunk_end()) {
return FailedPrecondition(
"%s", absl::StrCat("Free chunk root ", root.chunk.ToString(),
" does not overlap with offset ", offset, "."));
}
if (offset + max_colocation_size_ > root.chunk.chunk_end()) {
return FailedPrecondition(
"%s", absl::StrCat("Not enough space to fit enitre allocation [",
offset, ", ", offset + max_colocation_size_,
") in free chunk root ", root.chunk.ToString()));
}
if (!is_offset_allowed_(offset)) {
return FailedPrecondition(
"%s", absl::StrCat("We are not permitted to place an allocation at ",
"offset ", offset, "."));
}
auto piece_fwd_it = root.pieces.lower_bound(offset);
if (piece_fwd_it == root.pieces.end()) {
return FailedPrecondition(
"%s", absl::StrCat("Offset ", offset, " comes before free chunk root ",
root.chunk.ToString()));
}
++piece_fwd_it;
auto piece_reverse_it = std::make_reverse_iterator(piece_fwd_it);
auto at_pieces_end = [&](auto it) { return it == root.pieces.rend(); };
size_t slice_index = 0;
auto out_of_slices = [&](size_t index) { return index > LatestSliceTime(); };
int64_t amount_of_current_slice_consumed = 0;
int64_t current_offset = offset;
while (!at_pieces_end(piece_reverse_it) && !out_of_slices(slice_index)) {
int64_t current_slice_time = permutation_of_slice_times[slice_index];
int64_t current_slice_size = sorted_slice_sizes_[slice_index];
int64_t remaining_in_slice =
current_slice_size - amount_of_current_slice_consumed;
int64_t current_piece_time =
piece_reverse_it->second.earliest_free_slice_time;
int64_t remaining_in_piece =
piece_reverse_it->second.dimensions.chunk_end() - current_offset;
int64_t amount_to_consume =
std::min(remaining_in_slice, remaining_in_piece);
if (current_piece_time > current_slice_time) {
return FailedPrecondition(
"%s",
absl::StrCat("At slice time t", current_slice_time, ", slice ",
slice_index, " does not fit at offset ", current_offset,
" in root ", root.chunk.ToString()));
}
if (remaining_in_slice >= remaining_in_piece) {
++piece_reverse_it;
amount_of_current_slice_consumed += amount_to_consume;
}
if (remaining_in_slice <= remaining_in_piece) {
++slice_index;
amount_of_current_slice_consumed = 0;
}
current_offset += amount_to_consume;
}
if (!out_of_slices(slice_index)) {
return InternalStrCat("Ran out of space in root ", root.chunk.ToString(),
" to fit slice permutation; however, we should "
"have caught such a condition earlier.");
}
return absl::OkStatus();
}
template <typename BufferType>
typename GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedAllocationFinder::ChunksSortedBySliceTime
GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedAllocationFinder::FindInRoot(
const FreeChunkRoot& root,
std::optional<int64_t> only_try_this_offset) const {
int64_t first_offset = root.chunk.offset;
int64_t last_end = root.chunk.chunk_end();
if (only_try_this_offset.has_value()) {
first_offset = *only_try_this_offset;
last_end = *only_try_this_offset + max_colocation_size_;
if (*only_try_this_offset % alignment_ != 0) {
return {};
}
} else if (first_offset % alignment_ != 0) {
first_offset = first_offset + (alignment_ - (first_offset % alignment_));
}
CHECK_EQ(first_offset % alignment_, 0);
for (int64_t offset = first_offset; offset + max_colocation_size_ <= last_end;
offset += alignment_) {
for (slice_time_permutation_iterator_->Begin();
!slice_time_permutation_iterator_->Done();
slice_time_permutation_iterator_->Next()) {
if (DoesPermutationFit(slice_time_permutation_iterator_->Get(), root,
offset)
.ok()) {
return PermutationToChunks(slice_time_permutation_iterator_->Get(),
offset);
}
}
if (root.pieces.size() == 1) {
break;
}
}
return {};
}
template <typename BufferType>
typename GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedAllocationFinder::ChunksSortedBySliceTime
GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedAllocationFinder::
PermutationToChunks(absl::Span<const int64_t> permutation_of_slice_times,
int64_t offset) const {
ChunksSortedBySliceTime chunks(permutation_of_slice_times.size() + 1,
Chunk::FromOffsetSize(-1, 1));
int64_t current_offset = offset;
for (int64_t slice_index = 0; slice_index <= LatestSliceTime();
++slice_index) {
int64_t size = sorted_slice_sizes_[slice_index];
chunks[permutation_of_slice_times[slice_index]] =
Chunk::FromOffsetSize(current_offset, size);
current_offset += size;
}
chunks.back() = Chunk::FromOffsetSize(
current_offset, max_colocation_size_ - (current_offset - offset));
DCHECK(std::all_of(chunks.begin(), chunks.end(), [](const Chunk& chunk) {
return chunk.offset >= 0 && chunk.size >= 0;
}));
return chunks;
}
template <typename BufferType>
absl::StatusOr<HeapSimulator::Result<BufferType>>
GlobalDecreasingSizeBestFitHeap<BufferType>::Finish() {
std::vector<BufferInterval> sorted_buffer_intervals =
GetSortedBufferIntervals();
for (auto& buffer_interval : sorted_buffer_intervals) {
if (!buffer_interval.need_allocation) {
continue;
}
CommitChunk(buffer_interval, FindChunkCandidate(buffer_interval));
}
VLOG(1) << "result heap_size: " << result_.heap_size;
Result result;
result.heap_size = result_.heap_size;
result.heap_results.emplace_back(result_);
return result;
}
template <typename BufferType>
std::vector<
typename GlobalDecreasingSizeBestFitHeap<BufferType>::BufferInterval>
GlobalDecreasingSizeBestFitHeap<BufferType>::GetSortedBufferIntervals() const {
std::vector<BufferInterval> sorted_buffer_intervals;
sorted_buffer_intervals.reserve(buffer_intervals_.size());
for (auto& entry : buffer_intervals_) {
sorted_buffer_intervals.push_back(entry.second);
}
absl::c_sort(sorted_buffer_intervals, buffer_interval_compare_);
return sorted_buffer_intervals;
}
template <typename BufferType>
typename GlobalDecreasingSizeBestFitHeap<BufferType>::Chunk
GlobalDecreasingSizeBestFitHeap<BufferType>::FindChunkCandidate(
const GlobalDecreasingSizeBestFitHeap::BufferInterval& buffer_interval,
int64_t preferred_offset) const {
const SlicedBufferInterval sliced_buffer_interval =
SlicedBufferInterval::CreateConstInterval(buffer_interval);
std::vector<Chunk> chunks =
FindChunkCandidates(sliced_buffer_interval, preferred_offset);
CHECK_EQ(chunks.size(), 1);
return chunks[0];
}
template <typename BufferType>
typename GlobalDecreasingSizeBestFitHeap<BufferType>::FreeChunks
GlobalDecreasingSizeBestFitHeap<BufferType>::MakeFreeChunks(
const BufferInterval& buffer_interval, int64_t max_colocation_size) const {
FreeChunks free_chunks{
{0, INT64_MAX}};
auto subtract_used_chunks = [&](const std::vector<Chunk>& used_chunks) {
for (const Chunk& used_chunk : used_chunks) {
auto it_end = free_chunks.lower_bound(used_chunk.chunk_end());
if (it_end == free_chunks.end()) continue;
auto it_start = free_chunks.lower_bound(used_chunk.offset);
int64_t free_chunk_end = it_end->second;
if (it_start != free_chunks.end()) {
if (used_chunk.offset - it_start->first >= buffer_interval.size) {
it_start->second = std::min(it_start->second, used_chunk.offset);
} else {
++it_start;
}
}
free_chunks.erase(it_end, it_start);
int64_t chunk_end_aligned = RoundUpTo(used_chunk.chunk_end(), alignment_);
if (free_chunk_end - chunk_end_aligned >= max_colocation_size) {
CHECK(free_chunks.insert({chunk_end_aligned, free_chunk_end}).second);
}
}
};
subtract_used_chunks(interval_tree_.ChunksOverlappingInTime(
buffer_interval.start, buffer_interval.end));
for (const BufferType* colocation :
GetTransitiveColocations(buffer_interval)) {
const BufferInterval& interval = buffer_intervals_.at(colocation);
VLOG(1) << " Alias size " << interval.size << ", start " << interval.start
<< ", end " << interval.end << " " << interval.buffer->ToString();
subtract_used_chunks(
interval_tree_.ChunksOverlappingInTime(interval.start, interval.end));
}
return free_chunks;
}
template <typename BufferType>
std::vector<typename GlobalDecreasingSizeBestFitHeap<BufferType>::Chunk>
GlobalDecreasingSizeBestFitHeap<BufferType>::FindChunkCandidates(
const SlicedBufferInterval& sliced_buffer_interval,
int64_t preferred_offset) const {
VLOG(1) << "Finding chunks for sliced buffer interval: "
<< sliced_buffer_interval.ToString();
int64_t max_colocation_size =
GetMaxColocationSize(sliced_buffer_interval.full_buffer_interval());
auto chunks =
CreateSlicedAllocationFinder(
sliced_buffer_interval, max_colocation_size, preferred_offset,
SliceTimePermutationIterator::CreateForNewAllocation(
slice_time_permutation_iteration_type_,
sliced_buffer_interval.inclusive_start_times()))
.Find();
return PostProcessFindChunkCandidatesResult(sliced_buffer_interval,
std::move(chunks));
}
template <typename BufferType>
int64_t GlobalDecreasingSizeBestFitHeap<BufferType>::GetMaxColocationSize(
const BufferInterval& buffer_interval) const {
int64_t max_colocation_size = buffer_interval.size;
for (const BufferType* colocation :
GetTransitiveColocations(buffer_interval)) {
max_colocation_size =
std::max(max_colocation_size, buffer_intervals_.at(colocation).size);
}
return max_colocation_size;
}
template <typename BufferType>
typename GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedAllocationFinder
GlobalDecreasingSizeBestFitHeap<BufferType>::CreateSlicedAllocationFinder(
const SlicedBufferInterval& sliced_interval, int64_t max_colocation_size,
int64_t preferred_offset,
std::unique_ptr<SliceTimePermutationIterator>
slice_time_permutation_iterator,
absl::AnyInvocable<bool(int64_t) const> is_offset_allowed) const {
std::vector<FreeChunks> free_chunks_per_slice_time;
free_chunks_per_slice_time.reserve(sliced_interval.num_slices());
for (int slice_time = 0; slice_time < sliced_interval.num_slices() - 1;
++slice_time) {
free_chunks_per_slice_time.push_back(
MakeFreeChunks(sliced_interval.IntervalForMakeFreeChunks(slice_time),
-1));
}
free_chunks_per_slice_time.push_back(MakeFreeChunks(
sliced_interval.IntervalForMakeFreeChunks(sliced_interval.num_slices() -
1),
max_colocation_size));
return SlicedAllocationFinder(
free_chunks_per_slice_time, sliced_interval.SliceSizesSortedByOffset(),
max_colocation_size, preferred_offset, alignment_,
std::move(slice_time_permutation_iterator), std::move(is_offset_allowed));
}
template <typename BufferType>
std::vector<typename GlobalDecreasingSizeBestFitHeap<BufferType>::Chunk>
GlobalDecreasingSizeBestFitHeap<BufferType>::
PostProcessFindChunkCandidatesResult(
const SlicedBufferInterval& sliced_interval,
std::vector<Chunk> chunks) const {
if (chunks.empty()) {
return {};
}
CHECK_EQ(chunks.size(), sliced_interval.num_slices() + 1);
chunks.pop_back();
return chunks;
}
template <typename BufferType>
void GlobalDecreasingSizeBestFitHeap<BufferType>::CommitChunk(
const GlobalDecreasingSizeBestFitHeap<BufferType>::BufferInterval&
buffer_interval,
GlobalDecreasingSizeBestFitHeap<BufferType>::Chunk chunk) {
CHECK_EQ(chunk.size, buffer_interval.size);
result_.heap_size = result_.UpdatedHeapSize(chunk);
interval_tree_.Add(buffer_interval.start, buffer_interval.end, chunk);
for (auto colocation : GetTransitiveColocations(buffer_interval)) {
auto colocation_interval = buffer_intervals_[colocation];
Chunk colocation_chunk =
Chunk::FromOffsetSize(chunk.offset, colocation_interval.size);
result_.heap_size = result_.UpdatedHeapSize(colocation_chunk);
interval_tree_.Add(colocation_interval.start, colocation_interval.end,
colocation_chunk);
AddToChunkMap(colocation, colocation_chunk);
}
AddToChunkMap(buffer_interval.buffer, chunk);
}
template <typename BufferType>
void GlobalDecreasingSizeBestFitHeap<BufferType>::AddToChunkMap(
const BufferType* buffer, Chunk chunk) {
const auto emplace_result = result_.chunk_map.emplace(buffer, chunk);
DCHECK(emplace_result.second);
}
absl::StatusOr<HeapSimulator::Result<HloValue>>
ConstrainedGlobalDecreasingSizeBestFitHeap::Finish() {
std::vector<BufferInterval> sorted_buffer_vec = GetSortedBufferIntervals();
std::list<BufferInterval> sorted_buffer_intervals(sorted_buffer_vec.begin(),
sorted_buffer_vec.end());
Result multi_heap_result;
do {
for (auto it = sorted_buffer_intervals.begin();
it != sorted_buffer_intervals.end();) {
BufferInterval buffer_interval = *it;
if (!buffer_interval.need_allocation) {
it = sorted_buffer_intervals.erase(it);
continue;
}
if (buffer_interval.size > size_limit_per_heap_) {
LOG(WARNING) << "Alloc buffer size " << buffer_interval.size
<< " larger than the per-heap size limit "
<< size_limit_per_heap_;
}
Chunk chunk_candidate = FindChunkCandidate(buffer_interval);
if (chunk_candidate.chunk_end() <= size_limit_per_heap_ ||
result_.heap_size == 0) {
CommitChunk(buffer_interval, chunk_candidate);
it = sorted_buffer_intervals.erase(it);
continue;
}
++it;
}
multi_heap_result.heap_size += result_.heap_size;
multi_heap_result.heap_results.push_back(std::move(result_));
result_ = {};
interval_tree_ = {};
} while (!sorted_buffer_intervals.empty());
VLOG(1) << "Number of heaps produced = "
<< multi_heap_result.heap_results.size();
return multi_heap_result;
}
template <typename BufferType>
absl::StatusOr<HeapSimulator::Result<BufferType>>
ChooseBestHeapAlgorithm<BufferType>::Finish() {
DCHECK(!algorithms_.empty());
std::vector<Result> results(algorithms_.size());
int64_t min_size = INT64_MAX;
int min_size_index = -1;
for (int i = 0; i < algorithms_.size(); ++i) {
TF_ASSIGN_OR_RETURN(results[i], algorithms_[i]->Finish());
if (results[i].heap_size < min_size) {
min_size = results[i].heap_size;
min_size_index = i;
}
}
DCHECK_GE(min_size_index, 0);
return results[min_size_index];
}
template class GlobalDecreasingSizeBestFitHeap<HloValue>;
template class GlobalDecreasingSizeBestFitHeap<AllocationBlock>;
template class ChooseBestHeapAlgorithm<HloValue>;
} | #include "xla/service/heap_simulator/heap_simulator.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/literal_util.h"
#include "xla/service/buffer_value.h"
#include "xla/service/heap_simulator/allocation_block.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_value.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using ::testing::ContainerEq;
using ::testing::HasSubstr;
using ::testing::StrEq;
class MinimumMemoryForSequenceTest : public HloTestBase {};
TEST_F(MinimumMemoryForSequenceTest, MultiComputation) {
auto module = CreateNewVerifiedModule();
const Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape, scalar_shape});
auto cond_builder = HloComputation::Builder("WhileCond");
HloInstruction* cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "cond_param"));
HloInstruction* cond_iter = cond_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, cond_param, 0));
HloInstruction* cond_data = cond_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, cond_param, 1));
HloInstruction* cond_lt = cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), cond_iter,
cond_data, ComparisonDirection::kLt));
HloComputation* cond_computation =
module->AddEmbeddedComputation(cond_builder.Build());
auto body_builder = HloComputation::Builder("WhileBody");
HloInstruction* body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "body_param"));
HloComputation* body_computation =
module->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* iter = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param_iter"));
HloInstruction* data = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "param_data"));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({iter, data}));
HloInstruction* while_op = builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, cond_computation, body_computation, tuple));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
auto size_fn = [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), 8);
};
HloSchedule schedule(module.get());
schedule.set_sequence(cond_computation,
{cond_param, cond_iter, cond_data, cond_lt});
schedule.set_sequence(body_computation, {body_param});
schedule.set_sequence(entry_computation, {iter, data, tuple, while_op});
TF_ASSERT_OK(schedule.Verify());
EXPECT_EQ(25,
HeapSimulator::MinimumMemoryForModule(schedule, size_fn).value());
}
TEST_F(MinimumMemoryForSequenceTest, SubcomputationAccounting) {
auto module = CreateNewVerifiedModule();
const Shape r0f32 = ShapeUtil::MakeShape(F32, {});
const Shape r1f32 = ShapeUtil::MakeShape(F32, {4});
const Shape r2f32 = ShapeUtil::MakeShape(F32, {2, 4});
auto cond_builder = HloComputation::Builder("WhileCond");
HloInstruction* cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, r1f32, "cond_param"));
HloInstruction* slice =
cond_builder.AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(F32, {1}), cond_param, {0}, {1}, {1}));
HloInstruction* reshape =
cond_builder.AddInstruction(HloInstruction::CreateReshape(r0f32, slice));
HloInstruction* zero = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0)));
HloInstruction* cond_comparison = cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), reshape,
zero, ComparisonDirection::kNe));
auto cond_computation = module->AddEmbeddedComputation(cond_builder.Build());
auto body_builder = HloComputation::Builder("WhileBody");
HloInstruction* body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, r1f32, "body_param"));
HloInstruction* one_vector =
body_builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 1, 1, 1})));
HloInstruction* subtract =
body_builder.AddInstruction(HloInstruction::CreateBinary(
r1f32, HloOpcode::kSubtract, body_param, one_vector));
auto body_computation = module->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* while_init =
builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 1, 1, 1})));
HloInstruction* while_loop =
builder.AddInstruction(HloInstruction::CreateWhile(
r1f32, cond_computation, body_computation, while_init));
HloInstruction* bcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(r2f32, while_loop, {1}));
HloInstruction* matrix = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2<float>(
{{1.0, 2.0, 3.0, 4.0}, {1.0, 2.0, 3.0, 4.0}})));
HloInstruction* transpose = builder.AddInstruction(
HloInstruction::CreateTranspose(r2f32, matrix, {0, 1}));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kAdd, transpose, bcast));
auto entry_computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
std::vector<HloInstruction*> cond_vec = {cond_param, slice, reshape, zero,
cond_comparison};
std::vector<HloInstruction*> while_body_vec = {body_param, one_vector,
subtract};
std::vector<HloInstruction*> entry_comp_vec = {while_init, while_loop, bcast,
matrix, transpose, add};
schedule.set_sequence(cond_computation, cond_vec);
schedule.set_sequence(body_computation, while_body_vec);
schedule.set_sequence(entry_computation, entry_comp_vec);
auto size_fn = [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
};
std::unique_ptr<HloAliasAnalysis> alias_analysis =
HloAliasAnalysis::Run(module.get()).value();
EXPECT_EQ(64, HeapSimulator::MinimumMemoryForComputation(
*entry_computation, schedule.sequence(entry_computation),
*alias_analysis, size_fn)
.value());
}
const char kAlloc[] = "Alloc";
const char kFree[] = "Free";
const char kShare[] = "Share";
const char kFinish[] = "Finish";
using CallSequence = std::vector<std::pair<std::string, const HloValue*>>;
class HeapCallRecorder : public HeapAlgorithm<HloValue> {
public:
explicit HeapCallRecorder(CallSequence* calls) : calls_(calls) {}
~HeapCallRecorder() override {}
void Alloc(const HloValue* buffer, int64_t size) override {
calls_->emplace_back(kAlloc, buffer);
const int64_t offset = result_.chunk_map.size();
result_.chunk_map.emplace(buffer, Chunk::FromOffsetSize(offset, size));
}
void ShareWith(const HloValue* buffer, const HloValue* shared,
int64_t size) override {
calls_->emplace_back(kShare, buffer);
const int64_t offset = result_.chunk_map[shared].offset;
result_.chunk_map.emplace(buffer, Chunk::FromOffsetSize(offset, size));
}
void Free(const HloValue* buffer, int64_t size) override {
calls_->emplace_back(kFree, buffer);
}
absl::StatusOr<Result> Finish() override {
calls_->emplace_back(kFinish, nullptr);
HeapSimulator::Result<HloValue> result;
result.heap_size = result_.heap_size;
result.heap_results.emplace_back(std::move(result_));
return result;
}
private:
CallSequence* calls_;
HeapSimulator::HeapResult<HloValue> result_;
};
class HeapSimulatorTracker {
public:
explicit HeapSimulatorTracker(
std::unique_ptr<HloModule> module,
const std::vector<HloInstruction*>& instruction_sequence,
const std::vector<HloInstruction*>& must_alias_set = {},
const HloDataflowAnalysis::CanShareBuffer& can_share_buffer = nullptr) {
module_ = std::move(module);
Init(instruction_sequence, can_share_buffer);
}
explicit HeapSimulatorTracker(
const std::string& name,
std::unique_ptr<HloComputation> entry_computation,
const std::vector<HloInstruction*>& instruction_sequence,
const std::vector<HloInstruction*>& must_alias_set = {},
const HloDataflowAnalysis::CanShareBuffer& can_share_buffer = nullptr) {
HloModuleConfig config;
module_ = std::make_unique<HloModule>(name, config);
module_->AddEntryComputation(std::move(entry_computation));
Init(instruction_sequence, can_share_buffer);
}
explicit HeapSimulatorTracker(const std::string& name) {
HloModuleConfig config;
module_ = std::make_unique<HloModule>(name, config);
}
void RunWholeModule(
const std::vector<HloInstruction*>& full_module_sequence) {
alias_analysis_ = HloAliasAnalysis::Run(module_.get()).value();
HloSchedule schedule(module_.get());
absl::flat_hash_map<const HloInstruction*, int> reverse_position;
for (int i = 0; i < full_module_sequence.size(); ++i) {
HloInstruction* instruction = full_module_sequence[i];
schedule.GetOrCreateSequence(instruction->parent())
.push_back(instruction);
reverse_position[instruction] = full_module_sequence.size() - i;
}
auto size_fn = [&reverse_position](const BufferValue& buffer) {
return reverse_position[buffer.instruction()];
};
auto algorithm = std::make_unique<HeapCallRecorder>(&actual_calls_);
result_ = HeapSimulator::Run(std::move(algorithm), *module_, schedule,
*alias_analysis_, size_fn)
.value();
}
HloModule* module() { return module_.get(); }
const HloValue* BufferAt(const HloInstruction* instruction,
const ShapeIndex& index) const {
return &alias_analysis_->dataflow_analysis().GetUniqueValueAt(instruction,
index);
}
int64_t OffsetAt(const HloInstruction* instruction, const ShapeIndex& index) {
const HloValue* buffer = BufferAt(instruction, index);
CHECK_EQ(1, result_.heap_results.size());
return result_.heap_results.at(0).chunk_map.at(buffer).offset;
}
void ExpectCallSequence(const CallSequence& expected) const {
auto to_string = [](const CallSequence& sequence) {
std::string output;
for (int64_t i = 0; i < sequence.size(); ++i) {
auto pair = sequence.at(i);
absl::StrAppendFormat(&output, "%d", i);
absl::StrAppendFormat(&output, " :%s", pair.first);
if (pair.second != nullptr) {
absl::StrAppendFormat(&output, " - %s{%s}\n",
pair.second->instruction()->name(),
pair.second->index().ToString());
}
}
return output;
};
EXPECT_EQ(expected, actual_calls_) << "Expected:\n"
<< to_string(expected) << " \nActual:\n"
<< to_string(actual_calls_) << "\n";
}
void ExpectSharedBuffers(const HloInstruction* instruction_a,
const ShapeIndex& index_a,
const HloInstruction* instruction_b,
const ShapeIndex& index_b) {
int64_t offset_a = OffsetAt(instruction_a, index_a);
int64_t offset_b = OffsetAt(instruction_b, index_b);
EXPECT_EQ(offset_a, offset_b);
}
private:
void Init(const std::vector<HloInstruction*>& instruction_sequence,
const HloDataflowAnalysis::CanShareBuffer& can_share_buffer) {
auto zero_size = [](const BufferValue& buffer) { return 0; };
auto algorithm = std::make_unique<HeapCallRecorder>(&actual_calls_);
alias_analysis_ =
HloAliasAnalysis::Run(module_.get(), can_share_buffer).value();
HeapSimulator::Options options;
result_ =
HeapSimulator::Run(std::move(algorithm), *module_->entry_computation(),
HloInstructionSequence(instruction_sequence),
*alias_analysis_, zero_size, options)
.value();
}
std::unique_ptr<HloModule> module_;
std::unique_ptr<HloAliasAnalysis> alias_analysis_;
CallSequence actual_calls_;
HeapSimulator::Result<HloValue> result_;
};
class HeapSimulatorTest : public HloTestBase {
protected:
HeapSimulatorTest() {}
~HeapSimulatorTest() override {}
Shape f32scalar_ = ShapeUtil::MakeShape(xla::F32, {});
Shape f32vec4_ = ShapeUtil::MakeShape(F32, {4});
};
TEST_F(HeapSimulatorTest, ScalarConstant) {
auto builder = HloComputation::Builder(TestName());
auto const0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
HeapSimulatorTracker tracker(TestName(), builder.Build(), {const0});
tracker.ExpectCallSequence({{kFinish, nullptr}});
}
TEST_F(HeapSimulatorTest, OneParam) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32scalar_, "param0"));
HeapSimulatorTracker tracker(TestName(), builder.Build(), {param0});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(param0, {})},
{kFree, tracker.BufferAt(param0, {})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, Multiply) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32scalar_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
HeapSimulatorTracker tracker(TestName(), builder.Build(),
{paramA, paramX, mul});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(paramX, {})},
{kAlloc, tracker.BufferAt(mul, {})},
{kFree, tracker.BufferAt(paramA, {})},
{kFree, tracker.BufferAt(paramX, {})},
{kFree, tracker.BufferAt(mul, {})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, MultiplyAdd) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32scalar_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto paramY = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec4_, "paramY"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, mul, paramY));
HeapSimulatorTracker tracker(TestName(), builder.Build(),
{paramA, paramX, mul, paramY, add});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(paramX, {})},
{kAlloc, tracker.BufferAt(paramY, {})},
{kAlloc, tracker.BufferAt(mul, {})},
{kFree, tracker.BufferAt(mul, {})},
{kShare, tracker.BufferAt(add, {})},
{kFree, tracker.BufferAt(paramA, {})},
{kFree, tracker.BufferAt(paramX, {})},
{kFree, tracker.BufferAt(paramY, {})},
{kFree, tracker.BufferAt(add, {})},
{kFinish, nullptr},
});
tracker.ExpectSharedBuffers(add, {}, mul, {});
}
TEST_F(HeapSimulatorTest, FusionOutputsOnlyShareOnce) {
auto can_share_buffer =
[](const HloInstruction* instr, const HloInstruction* operand,
const ShapeIndex& user_index) -> std::optional<bool> {
return instr->opcode() == HloOpcode::kFusion &&
operand->shape().IsArray() &&
ShapeUtil::Equal(operand->shape(),
ShapeUtil::GetSubshape(instr->shape(), user_index));
};
HloModuleConfig config;
auto module = std::make_unique<HloModule>(TestName(), config);
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "paramA"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kNegate, paramA));
auto fusion_builder = HloComputation::Builder("simple_two_way_forwarding");
{
auto param = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "x"));
fusion_builder.AddInstruction(HloInstruction::CreateTuple({param, param}));
}
auto fusion_computation =
module->AddEmbeddedComputation(fusion_builder.Build());
auto fusion = builder.AddInstruction(HloInstruction::CreateFusion(
ShapeUtil::MakeTupleShape({f32vec4_, f32vec4_}),
HloInstruction::FusionKind::kLoop, {negate}, fusion_computation));
auto element0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32scalar_, fusion, 0));
auto element1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32scalar_, fusion, 1));
auto negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kNegate, element0));
auto negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kNegate, element1));
builder.AddInstruction(HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd,
negate0, negate1));
module->AddEntryComputation(builder.Build());
HeapSimulatorTracker tracker(
std::move(module),
{paramA, negate, fusion, element0, element1, negate0, negate1}, {},
can_share_buffer);
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(negate, {})},
{kAlloc, tracker.BufferAt(fusion, {})},
{kFree, tracker.BufferAt(negate, {})},
{kShare, tracker.BufferAt(fusion, {0})},
{kAlloc, tracker.BufferAt(fusion, {1})},
{kFree, tracker.BufferAt(fusion, {})},
{kAlloc, tracker.BufferAt(negate0, {})},
{kFree, tracker.BufferAt(fusion, {0})},
{kFree, tracker.BufferAt(negate0, {})},
{kAlloc, tracker.BufferAt(negate1, {})},
{kFree, tracker.BufferAt(fusion, {1})},
{kFree, tracker.BufferAt(negate1, {})},
{kFree, tracker.BufferAt(paramA, {})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, FusionOutputsOnlyShareOnceOutputShortLived) {
auto can_share_buffer =
[](const HloInstruction* instr, const HloInstruction* operand,
const ShapeIndex& user_index) -> std::optional<bool> {
if (instr->opcode() == HloOpcode::kFusion) {
return true;
}
return false;
};
HloModuleConfig config;
auto module = std::make_unique<HloModule>(TestName(), config);
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "paramA"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kNegate, paramA));
auto fusion_builder = HloComputation::Builder("simple_two_way_forwarding");
{
auto param = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "x"));
fusion_builder.AddInstruction(HloInstruction::CreateTuple({param, param}));
}
auto fusion_computation =
module->AddEmbeddedComputation(fusion_builder.Build());
auto fusion = builder.AddInstruction(HloInstruction::CreateFusion(
ShapeUtil::MakeTupleShape({f32vec4_, f32vec4_}),
HloInstruction::FusionKind::kLoop, {negate}, fusion_computation));
auto element1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32scalar_, fusion, 1));
auto negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kNegate, element1));
module->AddEntryComputation(builder.Build());
HeapSimulatorTracker tracker(std::move(module),
{paramA, negate, fusion, element1, negate1}, {},
can_share_buffer);
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(negate, {})},
{kFree, tracker.BufferAt(negate, {})},
{kShare, tracker.BufferAt(fusion, {0})},
{kAlloc, tracker.BufferAt(fusion, {})},
{kAlloc, tracker.BufferAt(fusion, {1})},
{kFree, tracker.BufferAt(fusion, {0})},
{kFree, tracker.BufferAt(fusion, {})},
{kAlloc, tracker.BufferAt(negate1, {})},
{kFree, tracker.BufferAt(fusion, {1})},
{kFree, tracker.BufferAt(paramA, {})},
{kFree, tracker.BufferAt(negate1, {})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, BufferReusedOnce) {
HeapSimulatorTracker tracker(TestName());
auto builder = HloComputation::Builder(TestName());
HloComputation::Builder fusion_builder("fusion");
{
HloComputation::Builder& builder = fusion_builder;
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, f32vec4_, "A"));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kExp, a_param));
auto neg = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kNegate, a_param));
builder.AddInstruction(HloInstruction::CreateTuple({exp, neg}));
}
auto fusion_computation =
tracker.module()->AddEmbeddedComputation(fusion_builder.Build());
auto a_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "paramA"));
auto neg = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kNegate, a_param));
auto fusion = builder.AddInstruction(HloInstruction::CreateFusion(
ShapeUtil::MakeTupleShape({f32vec4_, f32vec4_}),
HloInstruction::FusionKind::kLoop, {neg}, fusion_computation));
tracker.module()->AddEntryComputation(builder.Build());
tracker.RunWholeModule({a_param, neg, fusion});
auto neg_buffer = tracker.OffsetAt(neg, {});
int64_t output_buffer_0 = tracker.OffsetAt(fusion, {0});
int64_t output_buffer_1 = tracker.OffsetAt(fusion, {1});
EXPECT_TRUE((neg_buffer == output_buffer_0) ^
(neg_buffer == output_buffer_1));
}
TEST_F(HeapSimulatorTest, MultiplyDot) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32scalar_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto paramY = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32scalar_, "paramY"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
f32vec4_, mul, paramY, dot_dnums, DefaultPrecisionConfig(2)));
HeapSimulatorTracker tracker(TestName(), builder.Build(),
{paramA, paramX, mul, paramY, dot});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(paramX, {})},
{kAlloc, tracker.BufferAt(paramY, {})},
{kAlloc, tracker.BufferAt(mul, {})},
{kAlloc, tracker.BufferAt(dot, {})},
{kFree, tracker.BufferAt(mul, {})},
{kFree, tracker.BufferAt(paramA, {})},
{kFree, tracker.BufferAt(paramX, {})},
{kFree, tracker.BufferAt(paramY, {})},
{kFree, tracker.BufferAt(dot, {})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, MultiplyDotAdd) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32scalar_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto paramY = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32scalar_, "paramY"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
f32vec4_, mul, paramY, dot_dnums, DefaultPrecisionConfig(2)));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, dot, paramA));
HeapSimulatorTracker tracker(TestName(), builder.Build(),
{paramA, paramX, mul, paramY, dot, add});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(paramX, {})},
{kAlloc, tracker.BufferAt(paramY, {})},
{kAlloc, tracker.BufferAt(mul, {})},
{kAlloc, tracker.BufferAt(dot, {})},
{kFree, tracker.BufferAt(mul, {})},
{kFree, tracker.BufferAt(dot, {})},
{kShare, tracker.BufferAt(add, {})},
{kFree, tracker.BufferAt(paramA, {})},
{kFree, tracker.BufferAt(paramX, {})},
{kFree, tracker.BufferAt(paramY, {})},
{kFree, tracker.BufferAt(add, {})},
{kFinish, nullptr},
});
tracker.ExpectSharedBuffers(add, {}, dot, {});
}
TEST_F(HeapSimulatorTest, MultiplyDotDot) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32scalar_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto paramY = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32scalar_, "paramY"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot0 = builder.AddInstruction(HloInstruction::CreateDot(
f32vec4_, mul, paramY, dot_dnums, DefaultPrecisionConfig(2)));
auto dot1 = builder.AddInstruction(HloInstruction::CreateDot(
f32vec4_, dot0, paramY, dot_dnums, DefaultPrecisionConfig(2)));
HeapSimulatorTracker tracker(TestName(), builder.Build(),
{paramA, paramX, mul, paramY, dot0, dot1});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(paramX, {})},
{kAlloc, tracker.BufferAt(paramY, {})},
{kAlloc, tracker.BufferAt(mul, {})},
{kAlloc, tracker.BufferAt(dot0, {})},
{kFree, tracker.BufferAt(mul, {})},
{kAlloc, tracker.BufferAt(dot1, {})},
{kFree, tracker.BufferAt(dot0, {})},
{kFree, tracker.BufferAt(paramA, {})},
{kFree, tracker.BufferAt(paramX, {})},
{kFree, tracker.BufferAt(paramY, {})},
{kFree, tracker.BufferAt(dot1, {})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, MultiplyDotDotTuple) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32scalar_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto paramY = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32scalar_, "paramY"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot0 = builder.AddInstruction(HloInstruction::CreateDot(
f32vec4_, mul, paramY, dot_dnums, DefaultPrecisionConfig(2)));
auto dot1 = builder.AddInstruction(HloInstruction::CreateDot(
f32vec4_, dot0, paramY, dot_dnums, DefaultPrecisionConfig(2)));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({dot0, dot1}));
HeapSimulatorTracker tracker(
TestName(), builder.Build(),
{paramA, paramX, mul, paramY, dot0, dot1, tuple});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(paramX, {})},
{kAlloc, tracker.BufferAt(paramY, {})},
{kAlloc, tracker.BufferAt(mul, {})},
{kAlloc, tracker.BufferAt(dot0, {})},
{kFree, tracker.BufferAt(mul, {})},
{kAlloc, tracker.BufferAt(dot1, {})},
{kAlloc, tracker.BufferAt(tuple, {})},
{kFree, tracker.BufferAt(paramA, {})},
{kFree, tracker.BufferAt(paramX, {})},
{kFree, tracker.BufferAt(paramY, {})},
{kFree, tracker.BufferAt(dot0, {})},
{kFree, tracker.BufferAt(dot1, {})},
{kFree, tracker.BufferAt(tuple, {})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, IndependentTupleElements) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32scalar_, "paramA"));
auto paramB = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32scalar_, "paramB"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32scalar_, HloOpcode::kMultiply, paramA, paramB));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
f32scalar_, HloOpcode::kAdd, paramA, paramB));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({mul, add}));
auto element0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32scalar_, tuple, 0));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32vec4_, element0, {0}));
auto sub = builder.AddInstruction(HloInstruction::CreateBinary(
f32scalar_, HloOpcode::kSubtract, paramA, paramB));
auto element1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32scalar_, tuple, 1));
auto output = builder.AddInstruction(
HloInstruction::CreateTuple({broadcast, sub, element1}));
HeapSimulatorTracker tracker(TestName(), builder.Build(),
{paramA, paramB, mul, add, tuple, element0,
broadcast, sub, element1, output});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(paramB, {})},
{kAlloc, tracker.BufferAt(mul, {})},
{kAlloc, tracker.BufferAt(add, {})},
{kAlloc, tracker.BufferAt(tuple, {})},
{kAlloc, tracker.BufferAt(broadcast, {})},
{kFree, tracker.BufferAt(mul, {})},
{kAlloc, tracker.BufferAt(sub, {})},
{kFree, tracker.BufferAt(tuple, {})},
{kAlloc, tracker.BufferAt(output, {})},
{kFree, tracker.BufferAt(paramA, {})},
{kFree, tracker.BufferAt(paramB, {})},
{kFree, tracker.BufferAt(add, {})},
{kFree, tracker.BufferAt(broadcast, {})},
{kFree, tracker.BufferAt(sub, {})},
{kFree, tracker.BufferAt(output, {})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, WholeModule) {
HeapSimulatorTracker tracker(TestName());
const Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape, scalar_shape});
auto cond_builder = HloComputation::Builder("WhileCond");
HloInstruction* cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "cond_param"));
HloInstruction* cond_iter = cond_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, cond_param, 0));
HloInstruction* cond_data = cond_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, cond_param, 1));
HloInstruction* cond_lt = cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), cond_iter,
cond_data, ComparisonDirection::kLt));
HloComputation* cond_computation =
tracker.module()->AddEmbeddedComputation(cond_builder.Build());
auto body_builder = HloComputation::Builder("WhileBody");
HloInstruction* body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "body_param"));
HloComputation* body_computation =
tracker.module()->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
HloInstruction* while_op = builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, cond_computation, body_computation, param));
tracker.module()->AddEntryComputation(builder.Build());
tracker.RunWholeModule(
{param, while_op, body_param, cond_param, cond_iter, cond_data, cond_lt});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(param, {})},
{kAlloc, tracker.BufferAt(param, {0})},
{kAlloc, tracker.BufferAt(param, {1})},
{kAlloc, tracker.BufferAt(cond_lt, {})},
{kFree, tracker.BufferAt(cond_lt, {})},
{kFree, tracker.BufferAt(param, {})},
{kFree, tracker.BufferAt(param, {0})},
{kFree, tracker.BufferAt(param, {1})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, AsyncCallImplicitSharding) {
std::string hlo_string = R"(
HloModule module, is_scheduled=true
called_computation {
param0 = f32[4] parameter(0)
constant = f32[1] constant(1)
dynamic-update-slice = f32[4] dynamic-update-slice(param0, constant, constant)
ROOT negate = f32[4] negate(dynamic-update-slice)
}
ENTRY entry {
p0 = f32[8] parameter(0)
call-start = ((f32[8]), f32[8], s32[]) call-start(p0), async_execution_thread="foo", to_apply=called_computation
ROOT call-done = f32[8] call-done(call-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(auto alias_analysis,
HloAliasAnalysis::Run(module.get()));
auto size_fn = [](const BufferValue& buffer) -> int64_t {
const Shape& shape = buffer.shape();
if (!shape.IsArray()) {
return 0;
}
return ShapeUtil::ByteSizeOf(shape);
};
auto algorithm = std::make_unique<GlobalDecreasingSizeBestFitHeap<HloValue>>(
1);
HeapSimulator::Result<HloValue> result =
HeapSimulator::Run(std::move(algorithm), *module, module->schedule(),
*alias_analysis, size_fn)
.value();
for (const auto& [value, chunk] : result.heap_results[0].chunk_map) {
if (value->instruction()->name() == "dynamic-update-slice") {
EXPECT_EQ(chunk.size, 32);
}
}
}
class HeapAlgorithmTestBase : public ::testing::Test {
protected:
HeapAlgorithmTestBase() : builder_("heap_simulator_test") {
buffer_a_ = DummyBufferValue();
buffer_b_ = DummyBufferValue();
buffer_c_ = DummyBufferValue();
buffer_d_ = DummyBufferValue();
buffer_e_ = DummyBufferValue();
buffer_f_ = DummyBufferValue();
buffer_g_ = DummyBufferValue();
buffer_h_ = DummyBufferValue();
buffer_i_ = DummyBufferValue();
}
~HeapAlgorithmTestBase() override {}
const HloValue* buffer_a_;
const HloValue* buffer_b_;
const HloValue* buffer_c_;
const HloValue* buffer_d_;
const HloValue* buffer_e_;
const HloValue* buffer_f_;
const HloValue* buffer_g_;
const HloValue* buffer_h_;
const HloValue* buffer_i_;
private:
const HloValue* DummyBufferValue() {
const HloValue::Id id = buffers_.size();
auto const0 = builder_.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
buffers_.emplace_back(std::make_unique<HloValue>(id, const0, ShapeIndex{}));
return buffers_.back().get();
}
HloComputation::Builder builder_;
std::vector<std::unique_ptr<HloValue>> buffers_;
};
class NoFragmentationStatsHeapTest : public HeapAlgorithmTestBase {};
TEST_F(NoFragmentationStatsHeapTest, Empty) {
NoFragmentationStatsHeap<HloValue> heap;
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> result,
heap.Finish());
EXPECT_EQ(0, result.heap_size);
}
TEST_F(NoFragmentationStatsHeapTest, Simple) {
NoFragmentationStatsHeap<HloValue> heap;
heap.Alloc(buffer_a_, 10);
heap.Alloc(buffer_b_, 20);
heap.Alloc(buffer_c_, 30);
heap.Alloc(buffer_d_, 30);
heap.Free(buffer_a_, 10);
heap.Free(buffer_b_, 20);
heap.Free(buffer_c_, 30);
heap.Free(buffer_d_, 30);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> result,
heap.Finish());
EXPECT_EQ(90, result.heap_size);
}
TEST_F(NoFragmentationStatsHeapTest, Mixed) {
NoFragmentationStatsHeap<HloValue> heap;
heap.Alloc(buffer_a_, 10);
heap.Alloc(buffer_b_, 20);
heap.Free(buffer_b_, 20);
heap.Alloc(buffer_c_, 30);
heap.Free(buffer_c_, 30);
heap.Alloc(buffer_d_, 5);
heap.Free(buffer_d_, 5);
heap.Free(buffer_a_, 10);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> result,
heap.Finish());
EXPECT_EQ(40, result.heap_size);
}
class GlobalDecreasingSizeBestFitHeapTest : public HeapAlgorithmTestBase {};
TEST_F(GlobalDecreasingSizeBestFitHeapTest, Empty) {
GlobalDecreasingSizeBestFitHeap<HloValue> heap(1);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> result,
heap.Finish());
EXPECT_EQ(0, result.heap_size);
EXPECT_EQ(1, result.heap_results.size());
EXPECT_EQ(0, result.heap_results.at(0).chunk_map.size());
}
TEST_F(GlobalDecreasingSizeBestFitHeapTest, DecreasingSize) {
GlobalDecreasingSizeBestFitHeap<HloValue> heap(1);
heap.Alloc(buffer_a_, 10);
heap.Alloc(buffer_b_, 30);
heap.Alloc(buffer_c_, 20);
heap.Alloc(buffer_d_, 40);
heap.Free(buffer_a_, 10);
heap.Free(buffer_b_, 30);
heap.Free(buffer_c_, 20);
heap.Free(buffer_d_, 40);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> results,
heap.Finish());
EXPECT_EQ(1, results.heap_results.size());
const HeapSimulator::HeapResult<HloValue>& result =
results.heap_results.at(0);
EXPECT_EQ(100, result.heap_size);
EXPECT_EQ(10, result.chunk_map.at(buffer_a_).size);
EXPECT_EQ(30, result.chunk_map.at(buffer_b_).size);
EXPECT_EQ(20, result.chunk_map.at(buffer_c_).size);
EXPECT_EQ(40, result.chunk_map.at(buffer_d_).size);
EXPECT_EQ(90, result.chunk_map.at(buffer_a_).offset);
EXPECT_EQ(40, result.chunk_map.at(buffer_b_).offset);
EXPECT_EQ(70, result.chunk_map.at(buffer_c_).offset);
EXPECT_EQ(0, result.chunk_map.at(buffer_d_).offset);
}
TEST_F(GlobalDecreasingSizeBestFitHeapTest, DecreasingSizeWithAlignment) {
GlobalDecreasingSizeBestFitHeap<HloValue> heap(20);
heap.Alloc(buffer_a_, 10);
heap.Alloc(buffer_b_, 20);
heap.Alloc(buffer_c_, 50);
heap.Free(buffer_a_, 10);
heap.Alloc(buffer_d_, 40);
heap.Free(buffer_b_, 20);
heap.Free(buffer_c_, 50);
heap.Free(buffer_d_, 40);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> results,
heap.Finish());
EXPECT_EQ(1, results.heap_results.size());
const HeapSimulator::HeapResult<HloValue>& result =
results.heap_results.at(0);
EXPECT_EQ(120, result.heap_size);
EXPECT_EQ(10, result.chunk_map.at(buffer_a_).size);
EXPECT_EQ(20, result.chunk_map.at(buffer_b_).size);
EXPECT_EQ(50, result.chunk_map.at(buffer_c_).size);
EXPECT_EQ(40, result.chunk_map.at(buffer_d_).size);
EXPECT_EQ(60, result.chunk_map.at(buffer_a_).offset);
EXPECT_EQ(100, result.chunk_map.at(buffer_b_).offset);
EXPECT_EQ(0, result.chunk_map.at(buffer_c_).offset);
EXPECT_EQ(60, result.chunk_map.at(buffer_d_).offset);
}
TEST_F(GlobalDecreasingSizeBestFitHeapTest, BestFit) {
GlobalDecreasingSizeBestFitHeap<HloValue> heap(1);
heap.Alloc(buffer_a_, 10);
heap.Alloc(buffer_b_, 20);
heap.Alloc(buffer_c_, 40);
heap.Free(buffer_a_, 10);
heap.Alloc(buffer_d_, 30);
heap.Alloc(buffer_e_, 50);
heap.Free(buffer_b_, 20);
heap.Free(buffer_c_, 40);
heap.Free(buffer_d_, 30);
heap.Free(buffer_e_, 50);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> results,
heap.Finish());
EXPECT_EQ(1, results.heap_results.size());
const HeapSimulator::HeapResult<HloValue>& result =
results.heap_results.at(0);
EXPECT_EQ(140, result.heap_size);
EXPECT_EQ(10, result.chunk_map.at(buffer_a_).size);
EXPECT_EQ(20, result.chunk_map.at(buffer_b_).size);
EXPECT_EQ(40, result.chunk_map.at(buffer_c_).size);
EXPECT_EQ(30, result.chunk_map.at(buffer_d_).size);
EXPECT_EQ(50, result.chunk_map.at(buffer_e_).size);
EXPECT_EQ(90, result.chunk_map.at(buffer_a_).offset);
EXPECT_EQ(120, result.chunk_map.at(buffer_b_).offset);
EXPECT_EQ(50, result.chunk_map.at(buffer_c_).offset);
EXPECT_EQ(90, result.chunk_map.at(buffer_d_).offset);
EXPECT_EQ(0, result.chunk_map.at(buffer_e_).offset);
}
TEST_F(GlobalDecreasingSizeBestFitHeapTest, Colocated) {
GlobalDecreasingSizeBestFitHeap<HloValue> heap(1);
heap.Alloc(buffer_a_, 40);
heap.Free(buffer_a_, 40);
heap.Alloc(buffer_b_, 20);
heap.Free(buffer_b_, 20);
heap.ShareWith(buffer_c_, buffer_a_, 40);
heap.Free(buffer_c_, 40);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> results,
heap.Finish());
EXPECT_EQ(1, results.heap_results.size());
const HeapSimulator::HeapResult<HloValue>& result =
results.heap_results.at(0);
EXPECT_EQ(40, result.heap_size);
EXPECT_EQ(40, result.chunk_map.at(buffer_a_).size);
EXPECT_EQ(20, result.chunk_map.at(buffer_b_).size);
EXPECT_EQ(40, result.chunk_map.at(buffer_c_).size);
EXPECT_EQ(0, result.chunk_map.at(buffer_a_).offset);
EXPECT_EQ(0, result.chunk_map.at(buffer_b_).offset);
EXPECT_EQ(0, result.chunk_map.at(buffer_c_).offset);
}
TEST_F(GlobalDecreasingSizeBestFitHeapTest, ColocatedII) {
GlobalDecreasingSizeBestFitHeap<HloValue> heap(1);
heap.Alloc(buffer_a_, 40);
heap.Free(buffer_a_, 40);
heap.Alloc(buffer_b_, 20);
heap.ShareWith(buffer_c_, buffer_a_, 40);
heap.Free(buffer_c_, 40);
heap.Free(buffer_b_, 20);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> results,
heap.Finish());
EXPECT_EQ(1, results.heap_results.size());
const HeapSimulator::HeapResult<HloValue>& result =
results.heap_results.at(0);
EXPECT_EQ(60, result.heap_size);
EXPECT_EQ(40, result.chunk_map.at(buffer_a_).size);
EXPECT_EQ(20, result.chunk_map.at(buffer_b_).size);
EXPECT_EQ(40, result.chunk_map.at(buffer_c_).size);
EXPECT_EQ(0, result.chunk_map.at(buffer_a_).offset);
EXPECT_EQ(40, result.chunk_map.at(buffer_b_).offset);
EXPECT_EQ(0, result.chunk_map.at(buffer_c_).offset);
}
TEST_F(GlobalDecreasingSizeBestFitHeapTest, ColocatedIII) {
GlobalDecreasingSizeBestFitHeap<HloValue> heap(1);
heap.Alloc(buffer_a_, 10);
heap.Free(buffer_a_, 10);
heap.Alloc(buffer_b_, 30);
heap.ShareWith(buffer_c_, buffer_a_, 10);
heap.Free(buffer_c_, 10);
heap.Free(buffer_b_, 30);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> results,
heap.Finish());
EXPECT_EQ(1, results.heap_results.size());
const HeapSimulator::HeapResult<HloValue>& result =
results.heap_results.at(0);
EXPECT_EQ(40, result.heap_size);
EXPECT_EQ(10, result.chunk_map.at(buffer_a_).size);
EXPECT_EQ(30, result.chunk_map.at(buffer_b_).size);
EXPECT_EQ(10, result.chunk_map.at(buffer_c_).size);
EXPECT_EQ(30, result.chunk_map.at(buffer_a_).offset);
EXPECT_EQ(0, result.chunk_map.at(buffer_b_).offset);
EXPECT_EQ(30, result.chunk_map.at(buffer_c_).offset);
}
TEST_F(GlobalDecreasingSizeBestFitHeapTest, ColocatedDifferentSize1) {
GlobalDecreasingSizeBestFitHeap<HloValue> heap(1);
heap.Alloc(buffer_a_, 40);
heap.Free(buffer_a_, 40);
heap.Alloc(buffer_b_, 20);
heap.ShareWith(buffer_c_, buffer_a_, 30);
heap.Free(buffer_c_, 30);
heap.Free(buffer_b_, 20);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> results,
heap.Finish());
EXPECT_EQ(1, results.heap_results.size());
const HeapSimulator::HeapResult<HloValue>& result =
results.heap_results.at(0);
EXPECT_EQ(50, result.heap_size);
EXPECT_EQ(40, result.chunk_map.at(buffer_a_).size);
EXPECT_EQ(20, result.chunk_map.at(buffer_b_).size);
EXPECT_EQ(30, result.chunk_map.at(buffer_c_).size);
EXPECT_EQ(0, result.chunk_map.at(buffer_a_).offset);
EXPECT_EQ(30, result.chunk_map.at(buffer_b_).offset);
EXPECT_EQ(0, result.chunk_map.at(buffer_c_).offset);
}
TEST_F(GlobalDecreasingSizeBestFitHeapTest, ColocatedDifferentSize2) {
GlobalDecreasingSizeBestFitHeap<HloValue> heap(1);
heap.Alloc(buffer_a_, 40);
heap.Free(buffer_a_, 40);
heap.Alloc(buffer_b_, 20);
heap.ShareWith(buffer_c_, buffer_a_, 50);
heap.Free(buffer_c_, 50);
heap.Free(buffer_b_, 20);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> results,
heap.Finish());
EXPECT_EQ(1, results.heap_results.size());
const HeapSimulator::HeapResult<HloValue>& result =
results.heap_results.at(0);
EXPECT_EQ(70, result.heap_size);
EXPECT_EQ(40, result.chunk_map.at(buffer_a_).size);
EXPECT_EQ(20, result.chunk_map.at(buffer_b_).size);
EXPECT_EQ(50, result.chunk_map.at(buffer_c_).size);
EXPECT_EQ(0, result.chunk_map.at(buffer_a_).offset);
EXPECT_EQ(50, result.chunk_map.at(buffer_b_).offset);
EXPECT_EQ(0, result.chunk_map.at(buffer_c_).offset);
}
class FindGlobalDecreasingSizeBestFitTest : public HeapAlgorithmTestBase {
protected:
class InheritedGlobalDecreasingSizeBestFitHeap
: public GlobalDecreasingSizeBestFitHeap<HloValue> {
public:
InheritedGlobalDecreasingSizeBestFitHeap()
: GlobalDecreasingSizeBestFitHeap(1) {}
std::pair<int64_t, int64_t> MakeFindAndCommit(
const HloValue* buffer, int64_t size, int64_t start, int64_t end,
int64_t preferred_offset = -1) {
MakeBufferInterval(buffer, size, start, end);
BufferInterval* buffer_interval = &GetBufferInterval(buffer);
Chunk chunk_candidate =
FindChunkCandidate(*buffer_interval, preferred_offset);
EXPECT_EQ(chunk_candidate.size, size);
std::pair<int64_t, int64_t> result = std::make_pair(
chunk_candidate.offset, result_.UpdatedHeapSize(chunk_candidate));
CommitChunk(*buffer_interval, chunk_candidate);
return result;
}
void MakeBufferInterval(const HloValue* buffer, int64_t size, int64_t start,
int64_t end) {
BufferInterval* buffer_interval = &buffer_intervals_[buffer];
buffer_interval->buffer = buffer;
buffer_interval->size = size;
buffer_interval->start = start;
buffer_interval->end = end;
}
void AddColocationToBuffer(const HloValue* buffer,
const HloValue* colocation) {
CHECK(buffer_intervals_.contains(buffer));
buffer_intervals_[buffer].colocations.push_back(colocation);
}
BufferInterval& GetBufferInterval(const HloValue* buffer) {
CHECK(buffer_intervals_.contains(buffer));
return buffer_intervals_[buffer];
}
std::vector<Chunk> FindChunkCandidates(
const SlicedBufferInterval& sliced_buffer_interval,
int64_t preferred_offset = -1) const {
return GlobalDecreasingSizeBestFitHeap<HloValue>::FindChunkCandidates(
sliced_buffer_interval, preferred_offset);
}
void CommitChunk(const BufferInterval& buffer_interval, Chunk chunk) {
GlobalDecreasingSizeBestFitHeap<HloValue>::CommitChunk(buffer_interval,
chunk);
}
void AddToChunkMap(const HloValue* buffer, Chunk chunk) override {
committed_[buffer].push_back(chunk);
}
const absl::flat_hash_map<const HloValue*, std::vector<Chunk>>& committed()
const {
return committed_;
}
int64_t heap_size() const { return result_.heap_size; }
private:
absl::flat_hash_map<const HloValue*, std::vector<Chunk>> committed_;
};
using BufferInterval =
InheritedGlobalDecreasingSizeBestFitHeap::BufferInterval;
using SlicedBufferInterval =
InheritedGlobalDecreasingSizeBestFitHeap::SlicedBufferInterval;
using Chunk = InheritedGlobalDecreasingSizeBestFitHeap::Chunk;
InheritedGlobalDecreasingSizeBestFitHeap heap_;
};
TEST_F(FindGlobalDecreasingSizeBestFitTest, ChunkCandidate) {
using pair = std::pair<int64_t, int64_t>;
EXPECT_EQ(pair(5, 10), heap_.MakeFindAndCommit(buffer_a_, 5, 6, 10, 5));
EXPECT_EQ(pair(0, 10), heap_.MakeFindAndCommit(buffer_b_, 10, 3, 5));
EXPECT_EQ(pair(10, 15), heap_.MakeFindAndCommit(buffer_c_, 5, 2, 8));
EXPECT_EQ(pair(0, 15), heap_.MakeFindAndCommit(buffer_d_, 5, 0, 2, 10));
EXPECT_EQ(pair(10, 20), heap_.MakeFindAndCommit(buffer_e_, 10, 11, 13, 10));
EXPECT_EQ(pair(20, 25), heap_.MakeFindAndCommit(buffer_f_, 5, 3, 5, 20));
EXPECT_EQ(pair(25, 35), heap_.MakeFindAndCommit(buffer_g_, 10, 4, 8, 15));
}
TEST_F(FindGlobalDecreasingSizeBestFitTest, FindChunkCandidates) {
{
heap_.MakeBufferInterval(buffer_a_, 10, 5, 15);
auto sliced_buffer_a = SlicedBufferInterval::CreateMutableInterval(
heap_.GetBufferInterval(buffer_a_));
auto chunks = heap_.FindChunkCandidates(sliced_buffer_a);
EXPECT_THAT(chunks, ::testing::ElementsAre(Chunk::FromOffsetSize(0, 10)));
heap_.CommitChunk(sliced_buffer_a.full_buffer_interval(),
Chunk::FromOffsetSize(0, 10));
EXPECT_THAT(
heap_.committed(),
::testing::UnorderedElementsAre(::testing::Pair(
buffer_a_, ::testing::ElementsAre(Chunk::FromOffsetSize(0, 10)))));
EXPECT_EQ(heap_.heap_size(), 10);
}
{
heap_.MakeBufferInterval(buffer_b_, 10, 25, 35);
heap_.MakeBufferInterval(buffer_c_, 15, 10, 20);
heap_.AddColocationToBuffer(buffer_b_, buffer_c_);
auto sliced_buffer_b = SlicedBufferInterval::CreateMutableInterval(
heap_.GetBufferInterval(buffer_b_));
auto sliced_buffer_c = SlicedBufferInterval::CreateMutableInterval(
heap_.GetBufferInterval(buffer_c_));
sliced_buffer_b.Slice({5, 5});
sliced_buffer_b.UpdateInclusiveSliceStartTimes({25, 30});
auto chunks = heap_.FindChunkCandidates(sliced_buffer_b);
EXPECT_THAT(chunks, ::testing::ElementsAre(Chunk::FromOffsetSize(10, 5),
Chunk::FromOffsetSize(15, 5)));
heap_.CommitChunk(BufferInterval{buffer_b_, 5, 25, 30, {},
true},
Chunk::FromOffsetSize(10, 5));
heap_.CommitChunk(
BufferInterval{buffer_b_, 10, 30, 35, {buffer_c_},
true},
Chunk::FromOffsetSize(10, 10));
EXPECT_THAT(
heap_.committed(),
::testing::UnorderedElementsAre(
::testing::Pair(buffer_a_, ::testing::ElementsAre(
Chunk::FromOffsetSize(0, 10))),
::testing::Pair(buffer_b_, ::testing::ElementsAre(
Chunk::FromOffsetSize(10, 5),
Chunk::FromOffsetSize(10, 10))),
::testing::Pair(buffer_c_, ::testing::ElementsAre(
Chunk::FromOffsetSize(10, 15)))));
EXPECT_EQ(heap_.heap_size(), 25);
}
{
heap_.MakeBufferInterval(buffer_d_, 5, 25, 35);
auto sliced_buffer_d = SlicedBufferInterval::CreateMutableInterval(
heap_.GetBufferInterval(buffer_d_));
auto chunks = heap_.FindChunkCandidates(sliced_buffer_d);
EXPECT_THAT(chunks, ::testing::ElementsAre(Chunk::FromOffsetSize(0, 5)));
heap_.CommitChunk(sliced_buffer_d.full_buffer_interval(),
Chunk::FromOffsetSize(0, 5));
EXPECT_THAT(
heap_.committed(),
::testing::UnorderedElementsAre(
::testing::Pair(buffer_a_, ::testing::ElementsAre(
Chunk::FromOffsetSize(0, 10))),
::testing::Pair(buffer_b_, ::testing::ElementsAre(
Chunk::FromOffsetSize(10, 5),
Chunk::FromOffsetSize(10, 10))),
::testing::Pair(buffer_c_, ::testing::ElementsAre(
Chunk::FromOffsetSize(10, 15))),
::testing::Pair(buffer_d_, ::testing::ElementsAre(
Chunk::FromOffsetSize(0, 5)))));
EXPECT_EQ(heap_.heap_size(), 25);
}
{
heap_.MakeBufferInterval(buffer_e_, 10, 30, 35);
auto sliced_buffer_e = SlicedBufferInterval::CreateMutableInterval(
heap_.GetBufferInterval(buffer_e_));
auto chunks = heap_.FindChunkCandidates(sliced_buffer_e);
EXPECT_THAT(chunks, ::testing::ElementsAre(Chunk::FromOffsetSize(20, 10)));
heap_.CommitChunk(sliced_buffer_e.full_buffer_interval(),
Chunk::FromOffsetSize(20, 10));
EXPECT_THAT(
heap_.committed(),
::testing::UnorderedElementsAre(
::testing::Pair(buffer_a_, ::testing::ElementsAre(
Chunk::FromOffsetSize(0, 10))),
::testing::Pair(buffer_b_, ::testing::ElementsAre(
Chunk::FromOffsetSize(10, 5),
Chunk::FromOffsetSize(10, 10))),
::testing::Pair(buffer_c_, ::testing::ElementsAre(
Chunk::FromOffsetSize(10, 15))),
::testing::Pair(
buffer_d_, ::testing::ElementsAre(Chunk::FromOffsetSize(0, 5))),
::testing::Pair(buffer_e_, ::testing::ElementsAre(
Chunk::FromOffsetSize(20, 10)))));
EXPECT_EQ(heap_.heap_size(), 30);
}
{
heap_.MakeBufferInterval(buffer_f_, 10, 25, 29);
auto sliced_buffer_f = SlicedBufferInterval::CreateMutableInterval(
heap_.GetBufferInterval(buffer_f_));
auto chunks = heap_.FindChunkCandidates(sliced_buffer_f);
EXPECT_THAT(chunks, ::testing::ElementsAre(Chunk::FromOffsetSize(15, 10)));
heap_.CommitChunk(sliced_buffer_f.full_buffer_interval(),
Chunk::FromOffsetSize(15, 10));
EXPECT_THAT(
heap_.committed(),
::testing::UnorderedElementsAre(
::testing::Pair(buffer_a_, ::testing::ElementsAre(
Chunk::FromOffsetSize(0, 10))),
::testing::Pair(buffer_b_, ::testing::ElementsAre(
Chunk::FromOffsetSize(10, 5),
Chunk::FromOffsetSize(10, 10))),
::testing::Pair(buffer_c_, ::testing::ElementsAre(
Chunk::FromOffsetSize(10, 15))),
::testing::Pair(
buffer_d_, ::testing::ElementsAre(Chunk::FromOffsetSize(0, 5))),
::testing::Pair(buffer_e_, ::testing::ElementsAre(
Chunk::FromOffsetSize(20, 10))),
::testing::Pair(buffer_f_, ::testing::ElementsAre(
Chunk::FromOffsetSize(15, 10)))));
EXPECT_EQ(heap_.heap_size(), 30);
}
}
class ConstrainedGlobalDecreasingSizeBestFitHeapTest
: public HeapAlgorithmTestBase {};
TEST_F(ConstrainedGlobalDecreasingSizeBestFitHeapTest, DecreasingSize) {
ConstrainedGlobalDecreasingSizeBestFitHeap heap(50,
1);
heap.Alloc(buffer_a_, 10);
heap.Alloc(buffer_b_, 30);
heap.Alloc(buffer_c_, 20);
heap.Alloc(buffer_d_, 40);
heap.Free(buffer_a_, 10);
heap.Free(buffer_b_, 30);
heap.Free(buffer_c_, 20);
heap.Free(buffer_d_, 40);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> result,
heap.Finish());
EXPECT_EQ(100, result.heap_size);
EXPECT_EQ(2, result.heap_results.size());
EXPECT_TRUE(result.heap_results[0].chunk_map.contains(buffer_a_));
EXPECT_TRUE(result.heap_results[0].chunk_map.contains(buffer_d_));
EXPECT_EQ(10, result.heap_results[0].chunk_map.at(buffer_a_).size);
EXPECT_EQ(40, result.heap_results[0].chunk_map.at(buffer_d_).size);
EXPECT_EQ(40, result.heap_results[0].chunk_map.at(buffer_a_).offset);
EXPECT_EQ(0, result.heap_results[0].chunk_map.at(buffer_d_).offset);
}
TEST_F(ConstrainedGlobalDecreasingSizeBestFitHeapTest,
DecreasingSizeWithAlignment) {
ConstrainedGlobalDecreasingSizeBestFitHeap heap(70,
20);
heap.Alloc(buffer_a_, 10);
heap.Alloc(buffer_b_, 20);
heap.Alloc(buffer_c_, 50);
heap.Free(buffer_a_, 10);
heap.Alloc(buffer_d_, 40);
heap.Free(buffer_b_, 20);
heap.Free(buffer_c_, 50);
heap.Free(buffer_d_, 40);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> result,
heap.Finish());
EXPECT_EQ(130, result.heap_size);
EXPECT_EQ(2, result.heap_results.size());
EXPECT_TRUE(result.heap_results[0].chunk_map.contains(buffer_a_));
EXPECT_TRUE(result.heap_results[0].chunk_map.contains(buffer_c_));
EXPECT_EQ(10, result.heap_results[0].chunk_map.at(buffer_a_).size);
EXPECT_EQ(50, result.heap_results[0].chunk_map.at(buffer_c_).size);
EXPECT_EQ(60, result.heap_results[0].chunk_map.at(buffer_a_).offset);
EXPECT_EQ(0, result.heap_results[0].chunk_map.at(buffer_c_).offset);
}
TEST_F(ConstrainedGlobalDecreasingSizeBestFitHeapTest, ColocatedII) {
ConstrainedGlobalDecreasingSizeBestFitHeap heap(50,
20);
heap.Alloc(buffer_a_, 30);
heap.Free(buffer_a_, 30);
heap.Alloc(buffer_b_, 20);
heap.ShareWith(buffer_c_, buffer_a_, 40);
heap.Free(buffer_c_, 40);
heap.Free(buffer_b_, 20);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> result,
heap.Finish());
EXPECT_EQ(60, result.heap_size);
EXPECT_EQ(2, result.heap_results.size());
EXPECT_TRUE(result.heap_results[0].chunk_map.contains(buffer_a_));
EXPECT_TRUE(result.heap_results[0].chunk_map.contains(buffer_c_));
EXPECT_EQ(30, result.heap_results[0].chunk_map.at(buffer_a_).size);
EXPECT_EQ(40, result.heap_results[0].chunk_map.at(buffer_c_).size);
EXPECT_EQ(0, result.heap_results[0].chunk_map.at(buffer_a_).offset);
EXPECT_EQ(0, result.heap_results[0].chunk_map.at(buffer_c_).offset);
}
class IntervalTreeTest : public ::testing::Test {};
TEST_F(IntervalTreeTest, InsertAndRemove) {
HeapSimulator::Chunk chunk = HeapSimulator::Chunk::FromOffsetSize(1, 2);
BufferIntervalTree tree;
tree.Add(1, 2, chunk);
EXPECT_TRUE(tree.Remove(1, 2, chunk));
EXPECT_FALSE(tree.Remove(1, 2, chunk));
ASSERT_EQ(tree.GetRoot(), nullptr);
tree.Add(1, 2, chunk);
EXPECT_TRUE(tree.Remove(1, 2, chunk));
EXPECT_FALSE(tree.Remove(1, 2, chunk));
ASSERT_EQ(tree.GetRoot(), nullptr);
}
TEST_F(IntervalTreeTest, InsertAndRemoveTwoLevelsLeft) {
HeapSimulator::Chunk chunk = HeapSimulator::Chunk::FromOffsetSize(
1, 2);
BufferIntervalTree tree;
tree.Add(20, 36, chunk);
tree.Add(1, 45, chunk);
EXPECT_TRUE(tree.Remove(1, 45, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 36);
EXPECT_TRUE(tree.Remove(20, 36, chunk));
ASSERT_EQ(tree.GetRoot(), nullptr);
}
TEST_F(IntervalTreeTest, InsertAndRemoveTwoLevelsRight) {
HeapSimulator::Chunk chunk = HeapSimulator::Chunk::FromOffsetSize(
1, 2);
BufferIntervalTree tree;
tree.Add(20, 36, chunk);
tree.Add(21, 45, chunk);
EXPECT_TRUE(tree.Remove(21, 45, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 36);
EXPECT_TRUE(tree.Remove(20, 36, chunk));
ASSERT_EQ(tree.GetRoot(), nullptr);
}
TEST_F(IntervalTreeTest, TwoLevelsRight_RootFirst) {
HeapSimulator::Chunk chunk = HeapSimulator::Chunk::FromOffsetSize(
1, 2);
BufferIntervalTree tree;
tree.Add(20, 36, chunk);
tree.Add(21, 45, chunk);
EXPECT_TRUE(tree.Remove(20, 36, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 45);
EXPECT_EQ(tree.GetRoot()->start, 21);
EXPECT_EQ(tree.GetRoot()->end, 45);
EXPECT_EQ(tree.GetRoot()->left, nullptr);
EXPECT_EQ(tree.GetRoot()->right, nullptr);
EXPECT_TRUE(tree.Remove(21, 45, chunk));
ASSERT_EQ(tree.GetRoot(), nullptr);
}
TEST_F(IntervalTreeTest, TwoLevelsLeft_RootFirst) {
HeapSimulator::Chunk chunk = HeapSimulator::Chunk::FromOffsetSize(
1, 2);
BufferIntervalTree tree;
tree.Add(20, 36, chunk);
tree.Add(1, 45, chunk);
EXPECT_TRUE(tree.Remove(20, 36, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 45);
EXPECT_EQ(tree.GetRoot()->start, 1);
EXPECT_EQ(tree.GetRoot()->end, 45);
EXPECT_EQ(tree.GetRoot()->left, nullptr);
EXPECT_EQ(tree.GetRoot()->right, nullptr);
EXPECT_TRUE(tree.Remove(1, 45, chunk));
ASSERT_EQ(tree.GetRoot(), nullptr);
}
TEST_F(IntervalTreeTest, ThreeLevelsRight) {
HeapSimulator::Chunk chunk = HeapSimulator::Chunk::FromOffsetSize(
1, 2);
BufferIntervalTree tree;
tree.Add(20, 36, chunk);
tree.Add(21, 45, chunk);
tree.Add(22, 40, chunk);
EXPECT_TRUE(tree.Remove(21, 45, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 40);
EXPECT_TRUE(tree.Remove(20, 36, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 40);
EXPECT_TRUE(tree.Remove(22, 40, chunk));
ASSERT_EQ(tree.GetRoot(), nullptr);
}
TEST_F(IntervalTreeTest, ThreeLevelsLeftLeft) {
HeapSimulator::Chunk chunk = HeapSimulator::Chunk::FromOffsetSize(
1, 2);
BufferIntervalTree tree;
tree.Add(20, 36, chunk);
tree.Add(10, 45, chunk);
tree.Add(1, 40, chunk);
EXPECT_TRUE(tree.Remove(10, 45, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 40);
EXPECT_TRUE(tree.Remove(1, 40, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 36);
EXPECT_TRUE(tree.Remove(20, 36, chunk));
ASSERT_EQ(tree.GetRoot(), nullptr);
}
TEST_F(IntervalTreeTest, ThreeLevelsLeftRight) {
HeapSimulator::Chunk chunk = HeapSimulator::Chunk::FromOffsetSize(
1, 2);
BufferIntervalTree tree;
tree.Add(20, 36, chunk);
tree.Add(10, 45, chunk);
tree.Add(15, 40, chunk);
EXPECT_TRUE(tree.Remove(10, 45, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 40);
EXPECT_TRUE(tree.Remove(15, 40, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 36);
EXPECT_TRUE(tree.Remove(20, 36, chunk));
ASSERT_EQ(tree.GetRoot(), nullptr);
}
TEST_F(IntervalTreeTest, ThreeLevelsRightLeft) {
HeapSimulator::Chunk chunk = HeapSimulator::Chunk::FromOffsetSize(
1, 2);
BufferIntervalTree tree;
tree.Add(20, 36, chunk);
tree.Add(25, 45, chunk);
tree.Add(22, 40, chunk);
EXPECT_TRUE(tree.Remove(25, 45, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 40);
EXPECT_TRUE(tree.Remove(20, 36, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 40);
EXPECT_TRUE(tree.Remove(22, 40, chunk));
ASSERT_EQ(tree.GetRoot(), nullptr);
}
TEST_F(IntervalTreeTest, ThreeLevelsRightLeftChunkDifferent) {
HeapSimulator::Chunk chunk1 = HeapSimulator::Chunk::FromOffsetSize(1, 2);
HeapSimulator::Chunk chunk2 = HeapSimulator::Chunk::FromOffsetSize(2, 3);
HeapSimulator::Chunk chunk3 = HeapSimulator::Chunk::FromOffsetSize(3, 4);
BufferIntervalTree tree;
tree.Add(20, 36, chunk1);
tree.Add(25, 45, chunk2);
tree.Add(22, 40, chunk3);
EXPECT_TRUE(tree.Remove(25, 45, chunk2));
EXPECT_EQ(tree.GetRoot()->subtree_end, 40);
EXPECT_EQ(tree.GetRoot()->chunk.offset, 1);
EXPECT_EQ(tree.GetRoot()->chunk.size, 2);
EXPECT_TRUE(tree.Remove(20, 36, chunk1));
EXPECT_EQ(tree.GetRoot()->subtree_end, 40);
EXPECT_EQ(tree.GetRoot()->chunk.offset, 3);
EXPECT_EQ(tree.GetRoot()->chunk.size, 4);
EXPECT_TRUE(tree.Remove(22, 40, chunk3));
ASSERT_EQ(tree.GetRoot(), nullptr);
}
TEST_F(IntervalTreeTest, BufferIntervalTreeToAsciiArt) {
BufferIntervalTree tree;
tree.Add(15, 25, HeapSimulator::Chunk::FromOffsetEnd(0, 16));
tree.Add(15, 19, HeapSimulator::Chunk::FromOffsetEnd(16, 48));
tree.Add(20, 22, HeapSimulator::Chunk::FromOffsetEnd(32, 64));
std::string output = tree.NodesOverlappingInTimeToAsciiArt(
18, 23, 3);
EXPECT_THAT(output, HasSubstr("Memory map for time: [18,23], "
"memory_block_size: 16, group_size: 3"));
EXPECT_THAT(output, HasSubstr("..# ##. 64"));
EXPECT_THAT(output, HasSubstr("### ##. 48"));
EXPECT_THAT(output, HasSubstr("##. ... 32"));
EXPECT_THAT(output, HasSubstr("### ### 16"));
EXPECT_THAT(output, HasSubstr("890 123"));
}
TEST_F(IntervalTreeTest, BufferIntervalTreeToAsciiArtTooLarge) {
BufferIntervalTree tree;
tree.Add(0, 4, HeapSimulator::Chunk::FromOffsetEnd(0, 128));
tree.Add(5, 10, HeapSimulator::Chunk::FromOffsetEnd(1, 129));
std::string output = tree.NodesOverlappingInTimeToAsciiArt(
0, 10, 3);
EXPECT_THAT(
output,
HasSubstr(
"Cannot print memory usage to ASCII art. Printing nodes instead!"));
EXPECT_THAT(output, HasSubstr("start: 0 end: 4 chunk: [0,128)"));
EXPECT_THAT(output, HasSubstr("start: 5 end: 10 chunk: [1,129)"));
}
TEST_F(IntervalTreeTest, BufferIntervalTreeToAsciiArtFreeMemory) {
BufferIntervalTree tree;
tree.Add(5, 10, HeapSimulator::Chunk::FromOffsetEnd(0, 16));
std::string output = tree.NodesOverlappingInTimeToAsciiArt(
0, 4, 10);
EXPECT_THAT(output, StrEq("No nodes overlapping in time. Memory is free!"));
}
TEST_F(IntervalTreeTest, BufferIntervalTreeMemoryUsedInInterval) {
BufferIntervalTree tree;
tree.Add(15, 25, HeapSimulator::Chunk::FromOffsetEnd(0, 16));
tree.Add(15, 19, HeapSimulator::Chunk::FromOffsetEnd(16, 48));
tree.Add(20, 22, HeapSimulator::Chunk::FromOffsetEnd(32, 64));
std::vector<int64_t> memory_used_by_time = tree.MemoryUsedInInterval(
18, 23);
std::vector<int64_t> expected_memory_used_by_time = {48, 48, 48, 48, 48, 16};
EXPECT_THAT(memory_used_by_time, ContainerEq(expected_memory_used_by_time));
}
TEST_F(IntervalTreeTest, BufferIntervalTreeHeapSize) {
BufferIntervalTree tree;
tree.Add(15, 26, HeapSimulator::Chunk::FromOffsetEnd(0, 16));
tree.Add(17, 24, HeapSimulator::Chunk::FromOffsetEnd(16, 48));
tree.Add(20, 22, HeapSimulator::Chunk::FromOffsetEnd(32, 64));
EXPECT_THAT(tree.HeapSizeInInterval(15, 16), 16);
EXPECT_THAT(tree.HeapSizeInInterval(15, 19), 48);
EXPECT_THAT(tree.HeapSizeInInterval(15, 22), 64);
EXPECT_THAT(tree.HeapSizeInInterval(23, 24), 48);
EXPECT_THAT(tree.HeapSizeInInterval(25, 26), 16);
}
class SlicedBufferIntervalTest : public ::testing::Test {
public:
using HeapTy = GlobalDecreasingSizeBestFitHeap<HloValue>;
using ColocationTy = absl::InlinedVector<const HloValue*, 2>;
static std::tuple<const HloValue*, int64_t, int64_t, int64_t,
const ColocationTy&, bool>
BufferIntervalToTuple(const HeapTy::BufferInterval& buffer_interval) {
return std::make_tuple(buffer_interval.buffer, buffer_interval.size,
buffer_interval.start, buffer_interval.end,
std::ref(buffer_interval.colocations),
buffer_interval.need_allocation);
}
SlicedBufferIntervalTest() {
HloModuleConfig config;
module_ = std::make_unique<HloModule>("TestModule", config);
Shape f32vec4 = ShapeUtil::MakeShape(F32, {4});
auto builder = HloComputation::Builder("TestComputation");
auto p0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4, "p0"));
auto p1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4, "p1"));
builder.AddInstruction(
HloInstruction::CreateBinary(f32vec4, HloOpcode::kAdd, p0, p1));
module_->AddEntryComputation(builder.Build());
p0_value_ = std::make_unique<HloValue>(0, p0, ShapeIndex{});
p1_value_ = std::make_unique<HloValue>(0, p1, ShapeIndex{});
full_buffer_interval_ = HeapTy::BufferInterval({
p0_value_.get(),
20,
100,
200,
{p1_value_.get()},
true,
});
sliced_buffer_interval_ = std::make_unique<HeapTy::SlicedBufferInterval>(
HeapTy::SlicedBufferInterval::CreateConstInterval(
full_buffer_interval_));
mutable_sliced_buffer_interval_ =
std::make_unique<HeapTy::SlicedBufferInterval>(
HeapTy::SlicedBufferInterval::CreateMutableInterval(
full_buffer_interval_));
}
protected:
std::unique_ptr<HloModule> module_;
std::unique_ptr<HloValue> p0_value_;
std::unique_ptr<HloValue> p1_value_;
HeapTy::BufferInterval full_buffer_interval_;
std::unique_ptr<const HeapTy::SlicedBufferInterval> sliced_buffer_interval_;
std::unique_ptr<HeapTy::SlicedBufferInterval> mutable_sliced_buffer_interval_;
};
TEST_F(SlicedBufferIntervalTest, NoSlices) {
EXPECT_EQ(
BufferIntervalToTuple(sliced_buffer_interval_->full_buffer_interval()),
BufferIntervalToTuple(full_buffer_interval_));
EXPECT_EQ(sliced_buffer_interval_->num_slices(), 1);
EXPECT_THAT(sliced_buffer_interval_->SliceSizesSortedByOffset(),
::testing::ElementsAre(20));
EXPECT_EQ(BufferIntervalToTuple(
sliced_buffer_interval_->IntervalForMakeFreeChunks(0)),
BufferIntervalToTuple(full_buffer_interval_));
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->full_buffer_interval()),
BufferIntervalToTuple(full_buffer_interval_));
}
TEST_F(SlicedBufferIntervalTest, Sliced) {
std::vector<int64_t> slice_sizes = {4, 5, 5, 6};
mutable_sliced_buffer_interval_->Slice(absl::Span<int64_t>(slice_sizes));
EXPECT_EQ(mutable_sliced_buffer_interval_->num_slices(), 4);
EXPECT_THAT(mutable_sliced_buffer_interval_->SliceSizesSortedByOffset(),
::testing::ElementsAre(4, 5, 5, 6));
mutable_sliced_buffer_interval_->UpdateInclusiveSliceStartTimes(
{100, 125, 150, 175});
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->IntervalForMakeFreeChunks(0)),
BufferIntervalToTuple(
{p0_value_.get(), 4, 100, 124, ColocationTy(), true}));
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->IntervalForMakeFreeChunks(1)),
BufferIntervalToTuple(
{p0_value_.get(), 4, 125, 149, ColocationTy(), true}));
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->IntervalForMakeFreeChunks(2)),
BufferIntervalToTuple(
{p0_value_.get(), 4, 150, 174, ColocationTy(), true}));
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->IntervalForMakeFreeChunks(3)),
BufferIntervalToTuple({p0_value_.get(), 20, 175, 200,
ColocationTy({p1_value_.get()}), true}));
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->full_buffer_interval()),
BufferIntervalToTuple({p0_value_.get(), 20, 100, 200,
ColocationTy({p1_value_.get()}), true}));
mutable_sliced_buffer_interval_->UpdateExclusiveSliceStartTimes(
{100, 125, 150, 175});
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->IntervalForMakeFreeChunks(0)),
BufferIntervalToTuple(
{p0_value_.get(), 4, 101, 125, ColocationTy(), true}));
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->IntervalForMakeFreeChunks(1)),
BufferIntervalToTuple(
{p0_value_.get(), 4, 126, 150, ColocationTy(), true}));
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->IntervalForMakeFreeChunks(2)),
BufferIntervalToTuple(
{p0_value_.get(), 4, 151, 175, ColocationTy(), true}));
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->IntervalForMakeFreeChunks(3)),
BufferIntervalToTuple({p0_value_.get(), 20, 176, 200,
ColocationTy({p1_value_.get()}), true}));
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->full_buffer_interval()),
BufferIntervalToTuple({p0_value_.get(), 20, 101, 200,
ColocationTy({p1_value_.get()}), true}));
mutable_sliced_buffer_interval_->UpdateEndTime(300);
EXPECT_EQ(mutable_sliced_buffer_interval_->IntervalForMakeFreeChunks(2).end,
175);
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->IntervalForMakeFreeChunks(3)),
BufferIntervalToTuple({p0_value_.get(), 20, 176, 300,
ColocationTy({p1_value_.get()}), true}));
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->full_buffer_interval()),
BufferIntervalToTuple({p0_value_.get(), 20, 101, 300,
ColocationTy({p1_value_.get()}), true}));
}
class SlicedAllocationFinderTest : public ::testing::Test {
public:
using HeapTy = GlobalDecreasingSizeBestFitHeap<HloValue>;
using FreeChunks = typename HeapTy::FreeChunks;
using Chunk = HeapSimulator::Chunk;
using Finder = typename HeapTy::SlicedAllocationFinder;
protected:
std::unique_ptr<SliceTimePermutationIterator> NewPermutationIterator(
int64_t num_slices) {
std::vector<int64_t> inclusive_start_times;
inclusive_start_times.reserve(num_slices);
for (int64_t start_time = 0; start_time < num_slices; ++start_time) {
inclusive_start_times.push_back(start_time);
}
return SliceTimePermutationIterator::CreateForNewAllocation(
SliceTimePermutationIterator::Ty::kAll, inclusive_start_times);
}
};
TEST_F(SlicedAllocationFinderTest, NoSlices) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{45, 48},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(Chunk::FromOffsetSize(45, 3),
Chunk::FromOffsetSize(48, 0)));
}
TEST_F(SlicedAllocationFinderTest, NoSlicesLargerMaxColloc) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{45, 48},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3};
int64_t max_colocation_size = 6;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(Chunk::FromOffsetSize(60, 3),
Chunk::FromOffsetSize(63, 3)));
}
TEST_F(SlicedAllocationFinderTest, NoSlicesSmallestTie) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 13},
{15, 40},
{45, 48},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(Chunk::FromOffsetSize(10, 3),
Chunk::FromOffsetSize(13, 0)));
}
TEST_F(SlicedAllocationFinderTest, LeftHole) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{45, 48},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 51},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 54},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(45, 3), Chunk::FromOffsetSize(48, 3),
Chunk::FromOffsetSize(51, 3), Chunk::FromOffsetSize(54, 0)));
}
TEST_F(SlicedAllocationFinderTest, RightHole) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{51, 54},
{60, 70},
},
{
{5, 7},
{10, 40},
{48, 54},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 54},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(51, 3), Chunk::FromOffsetSize(48, 3),
Chunk::FromOffsetSize(45, 3), Chunk::FromOffsetSize(54, 0)));
}
TEST_F(SlicedAllocationFinderTest, MiddleHole) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{48, 51},
{60, 70},
},
{
{5, 7},
{10, 40},
{48, 54},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 54},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(48, 3), Chunk::FromOffsetSize(51, 3),
Chunk::FromOffsetSize(45, 3), Chunk::FromOffsetSize(54, 0)));
}
TEST_F(SlicedAllocationFinderTest, ManyHoles) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 31},
{39, 42},
{46, 51},
{54, 60},
{62, 64},
},
{
{5, 31},
{38, 44},
{46, 51},
{54, 59},
{62, 64},
},
{
{5, 31},
{36, 59},
{62, 64},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(46, 3), Chunk::FromOffsetSize(40, 3),
Chunk::FromOffsetSize(43, 3), Chunk::FromOffsetSize(49, 0)));
}
TEST_F(SlicedAllocationFinderTest, EarlySliceTimesHaveLargeFreeChunks) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{6, 68},
},
{
{5, 25},
{28, 40},
{48, 54},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 54},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(48, 3), Chunk::FromOffsetSize(51, 3),
Chunk::FromOffsetSize(45, 3), Chunk::FromOffsetSize(54, 0)));
}
TEST_F(SlicedAllocationFinderTest, DifferentSliceSizes1) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{46, 51},
{60, 70},
},
{
{5, 7},
{10, 40},
{46, 54},
{60, 70},
},
{
{5, 7},
{10, 40},
{42, 54},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {5, 3, 4};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(47, 3), Chunk::FromOffsetSize(50, 4),
Chunk::FromOffsetSize(42, 5), Chunk::FromOffsetSize(54, 0)));
}
TEST_F(SlicedAllocationFinderTest, DifferentSliceSizes2) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{46, 49},
{60, 70},
},
{
{5, 7},
{10, 40},
{46, 54},
{60, 70},
},
{
{5, 7},
{10, 40},
{42, 54},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {5, 3, 4};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(10, 5), Chunk::FromOffsetSize(15, 3),
Chunk::FromOffsetSize(18, 4), Chunk::FromOffsetSize(22, 0)));
}
TEST_F(SlicedAllocationFinderTest, ZeroSizeFreeChunk) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 5},
{10, 40},
{45, 48},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 51},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 45},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(60, 3), Chunk::FromOffsetSize(63, 3),
Chunk::FromOffsetSize(66, 3), Chunk::FromOffsetSize(69, 0)));
}
TEST_F(SlicedAllocationFinderTest, LargerMaxColloc) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{48, 51},
{60, 70},
},
{
{5, 7},
{10, 40},
{48, 54},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 54},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = 10;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(60, 3), Chunk::FromOffsetSize(63, 3),
Chunk::FromOffsetSize(66, 3), Chunk::FromOffsetSize(69, 1)));
}
TEST_F(SlicedAllocationFinderTest, PreferredOffsetFit) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{48, 51},
{60, 70},
},
{
{5, 7},
{10, 40},
{48, 54},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 54},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = 20;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(20, 3), Chunk::FromOffsetSize(23, 3),
Chunk::FromOffsetSize(26, 3), Chunk::FromOffsetSize(29, 0)));
}
TEST_F(SlicedAllocationFinderTest, PreferredOffsetNoFit) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{48, 51},
{60, 70},
},
{
{5, 7},
{10, 40},
{48, 54},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 54},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = 35;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(48, 3), Chunk::FromOffsetSize(51, 3),
Chunk::FromOffsetSize(45, 3), Chunk::FromOffsetSize(54, 0)));
}
TEST_F(SlicedAllocationFinderTest, Misaligned) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{47, 53},
{60, 70},
},
{
{5, 7},
{10, 40},
{47, 57},
{60, 70},
},
{
{5, 7},
{10, 40},
{43, 57},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {4, 4, 4};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 2;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(48, 4), Chunk::FromOffsetSize(52, 4),
Chunk::FromOffsetSize(44, 4), Chunk::FromOffsetSize(56, 0)));
}
TEST_F(SlicedAllocationFinderTest, PreferredOffsetMisaligned) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{47, 53},
{60, 70},
},
{
{5, 7},
{10, 40},
{47, 57},
{60, 70},
},
{
{5, 7},
{10, 40},
{43, 57},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {4, 4, 4};
int64_t max_colocation_size = -1;
int64_t preferred_offset = 21;
int64_t alignment = 2;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(48, 4), Chunk::FromOffsetSize(52, 4),
Chunk::FromOffsetSize(44, 4), Chunk::FromOffsetSize(56, 0)));
}
TEST_F(SlicedAllocationFinderTest, CorrectInitialization1) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 11},
{15, 21},
},
{
{5, 11},
{25, 31},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(Chunk::FromOffsetSize(5, 3),
Chunk::FromOffsetSize(8, 3),
Chunk::FromOffsetSize(11, 0)));
}
TEST_F(SlicedAllocationFinderTest, CorrectInitialization2) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 16},
{20, 26},
{40, 43},
},
{
{5, 16},
{26, 32},
{42, 45},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(Chunk::FromOffsetSize(5, 3),
Chunk::FromOffsetSize(8, 3),
Chunk::FromOffsetSize(11, 0)));
}
TEST_F(SlicedAllocationFinderTest, LeftHoleNotAllowedToStartAtFirstOffset) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{45, 49},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 52},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 55},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(
free_chunks_per_slice_time, sorted_slice_sizes, max_colocation_size,
preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()),
[](int64_t offset) { return offset != 45; });
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(46, 3), Chunk::FromOffsetSize(49, 3),
Chunk::FromOffsetSize(52, 3), Chunk::FromOffsetSize(55, 0)));
}
TEST_F(SlicedAllocationFinderTest, LeftHoleAllowedToIncludeNoStartOffset) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{45, 48},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 51},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 54},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(
free_chunks_per_slice_time, sorted_slice_sizes, max_colocation_size,
preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()),
[](int64_t offset) { return offset != 46; });
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(45, 3), Chunk::FromOffsetSize(48, 3),
Chunk::FromOffsetSize(51, 3), Chunk::FromOffsetSize(54, 0)));
}
TEST_F(SlicedAllocationFinderTest, RightHoleNotAllowedToStartAtFirstOffset) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{51, 55},
{60, 70},
},
{
{5, 7},
{10, 40},
{48, 55},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 55},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(
free_chunks_per_slice_time, sorted_slice_sizes, max_colocation_size,
preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()),
[](int64_t offset) { return offset != 45; });
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(52, 3), Chunk::FromOffsetSize(49, 3),
Chunk::FromOffsetSize(46, 3), Chunk::FromOffsetSize(55, 0)));
}
TEST_F(SlicedAllocationFinderTest, RightHoleNotAllowedOffsetsFindsNewHole) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{51, 54},
{60, 70},
},
{
{5, 7},
{10, 40},
{48, 54},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 54},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(
free_chunks_per_slice_time, sorted_slice_sizes, max_colocation_size,
preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()),
[](int64_t offset) { return offset != 45; });
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(60, 3), Chunk::FromOffsetSize(63, 3),
Chunk::FromOffsetSize(66, 3), Chunk::FromOffsetSize(69, 0)));
}
TEST_F(SlicedAllocationFinderTest, FindForOffset) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{45, 49},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 52},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 55},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(
free_chunks_per_slice_time, sorted_slice_sizes, max_colocation_size,
preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()),
[](int64_t offset) { return offset != 45; });
EXPECT_THAT(finder.FindForOffset(10),
::testing::ElementsAre(
Chunk::FromOffsetSize(10, 3), Chunk::FromOffsetSize(13, 3),
Chunk::FromOffsetSize(16, 3), Chunk::FromOffsetSize(19, 0)));
EXPECT_THAT(finder.FindForOffset(20),
::testing::ElementsAre(
Chunk::FromOffsetSize(20, 3), Chunk::FromOffsetSize(23, 3),
Chunk::FromOffsetSize(26, 3), Chunk::FromOffsetSize(29, 0)));
EXPECT_THAT(finder.FindForOffset(45),
::testing::IsEmpty());
EXPECT_THAT(finder.FindForOffset(46),
::testing::ElementsAre(
Chunk::FromOffsetSize(46, 3), Chunk::FromOffsetSize(49, 3),
Chunk::FromOffsetSize(52, 3), Chunk::FromOffsetSize(55, 0)));
EXPECT_THAT(finder.FindForOffset(59),
::testing::IsEmpty());
EXPECT_THAT(finder.FindForOffset(61),
::testing::ElementsAre(
Chunk::FromOffsetSize(61, 3), Chunk::FromOffsetSize(64, 3),
Chunk::FromOffsetSize(67, 3), Chunk::FromOffsetSize(70, 0)));
}
class SliceTimePermutationIteratorTest : public ::testing::Test {
protected:
struct NewAllocationTestCase {
void Test() const {
auto iterator = SliceTimePermutationIterator::CreateForNewAllocation(
ty, inclusive_start_times);
for (int i = 0; i < 5; ++i) {
VLOG(2) << "Test case try #" << i << ": NewAllocation, " << name;
EXPECT_THAT(GetPermutations(iterator.get()),
::testing::ElementsAreArray(expected_permutations))
<< "Failed NewAllocation, " << name;
}
}
std::string name;
SliceTimePermutationIterator::Ty ty;
std::vector<int64_t> inclusive_start_times;
std::vector<std::vector<int64_t>> expected_permutations;
};
struct RepackTestCase {
void Test() const {
auto iterator = SliceTimePermutationIterator::CreateForRepack(
ty, (original_slice_data.has_value() ? &(*original_slice_data)
: nullptr));
for (int i = 0; i < 5; ++i) {
VLOG(2) << "Test case try #" << i << ": Repack, " << name;
EXPECT_THAT(GetPermutations(iterator.get()),
::testing::ElementsAreArray(expected_permutations))
<< "Failed Repack, " << name;
}
}
std::string name;
SliceTimePermutationIterator::Ty ty;
std::optional<SlicedAllocationData> original_slice_data;
std::vector<std::vector<int64_t>> expected_permutations;
};
static std::vector<std::vector<int64_t>> GetPermutations(
SliceTimePermutationIterator* it) {
std::vector<std::vector<int64_t>> results;
for (it->Begin(); !it->Done(); it->Next()) {
absl::Span<const int64_t> permutation = it->Get();
results.push_back(
std::vector<int64_t>(permutation.begin(), permutation.end()));
}
return results;
}
};
TEST_F(SliceTimePermutationIteratorTest, NewAllocations) {
std::vector<NewAllocationTestCase> test_cases = {
{
"0 slices, all permutations",
SliceTimePermutationIterator::Ty::kAll,
{},
{},
},
{
"1 slice, all permutations",
SliceTimePermutationIterator::Ty::kAll,
{0},
{{0}},
},
{
"2 slices, all permutations",
SliceTimePermutationIterator::Ty::kAll,
{10, 20},
{{0, 1}, {1, 0}},
},
{
"many slices, all permutations, unique start times",
SliceTimePermutationIterator::Ty::kAll,
{40, 10, 450},
{{0, 1, 2}, {0, 2, 1}, {1, 0, 2}, {1, 2, 0}, {2, 0, 1}, {2, 1, 0}},
},
{
"many slices, all permutations, non-unique start times",
SliceTimePermutationIterator::Ty::kAll,
{40, 10, 450, 10},
{
{0, 1, 2, 3},
{0, 1, 3, 2},
{0, 2, 1, 3},
{0, 2, 3, 1},
{0, 3, 1, 2},
{0, 3, 2, 1},
{2, 0, 1, 3},
{2, 0, 3, 1},
{2, 3, 0, 1},
{3, 0, 1, 2},
{3, 0, 2, 1},
{3, 2, 0, 1},
},
},
{
"0 slices, preferred permutations",
SliceTimePermutationIterator::Ty::kPreferred,
{},
{},
},
{
"1 slice, preferred permutations",
SliceTimePermutationIterator::Ty::kPreferred,
{0},
{{0}},
},
{
"2 slices, preferred permutations",
SliceTimePermutationIterator::Ty::kPreferred,
{10, 20},
{{0, 1}, {1, 0}},
},
{
"many slices, preferred permutations, unique start times",
SliceTimePermutationIterator::Ty::kPreferred,
{40, 10, 450, 12, 14},
{{0, 1, 2, 3, 4}, {4, 3, 2, 1, 0}, {3, 1, 0, 2, 4}},
},
{
"many slices, preferred permutations, non-unique start times 1",
SliceTimePermutationIterator::Ty::kPreferred,
{40, 10, 450, 10},
{
{0, 1, 2, 3},
{3, 2, 1, 0},
{3, 1, 0, 2}},
},
{
"many slices, preferred permutations, non-unique start times 2",
SliceTimePermutationIterator::Ty::kPreferred,
{40, 40},
{
{0, 1},
},
},
};
for (const NewAllocationTestCase& test_case : test_cases) {
test_case.Test();
}
}
TEST_F(SliceTimePermutationIteratorTest, Repacks) {
std::vector<RepackTestCase> test_cases = {
{
"no slice data, all permutations",
SliceTimePermutationIterator::Ty::kAll,
std::nullopt,
{{0}},
},
{
"0 slices, all permutations",
SliceTimePermutationIterator::Ty::kAll,
SlicedAllocationData{},
{},
},
{
"1 slice, all permutations",
SliceTimePermutationIterator::Ty::kAll,
SlicedAllocationData{{
{1, 1, 1},
}},
{{0}},
},
{
"2 slices, uniform slice size, all permutations",
SliceTimePermutationIterator::Ty::kAll,
SlicedAllocationData{{
{1, 1, 1},
{1, 2, 2},
}},
{{0, 1}, {1, 0}},
},
{
"many slices, uniform slice size, unique start times, all "
"permutations",
SliceTimePermutationIterator::Ty::kAll,
SlicedAllocationData{{
{1, 1, 1},
{1, 2, 2},
{1, 3, 3},
}},
{{0, 1, 2}, {0, 2, 1}, {1, 0, 2}, {1, 2, 0}, {2, 0, 1}, {2, 1, 0}},
},
{
"many slices, non-uniform slice size, unique start times, all "
"permutations",
SliceTimePermutationIterator::Ty::kAll,
SlicedAllocationData{{
{1, 1, 1},
{2, 2, 3},
{1, 3, 2},
}},
{
{0, 2, 1},
{1, 2, 0},
},
},
{
"many slices, non-uniform slice size, non-unique start times, all "
"permutations",
SliceTimePermutationIterator::Ty::kAll,
SlicedAllocationData{{
{1, 1, 1},
{1, 2, 2},
{2, 3, 1},
{1, 5, 1},
{2, 6, 3},
{3, 8, 4},
}},
{
{0, 1, 2, 3, 4, 5},
{0, 1, 4, 3, 2, 5},
{0, 3, 1, 2, 4, 5},
{0, 3, 4, 1, 2, 5},
{3, 0, 1, 2, 4, 5},
{3, 0, 4, 1, 2, 5},
},
},
{
"no slice data, preferred permutations",
SliceTimePermutationIterator::Ty::kPreferred,
std::nullopt,
{{0}},
},
{
"0 slices, preferred permutations",
SliceTimePermutationIterator::Ty::kPreferred,
SlicedAllocationData{},
{},
},
{
"1 slice, preferred permutations",
SliceTimePermutationIterator::Ty::kPreferred,
SlicedAllocationData{{
{1, 1, 1},
}},
{{0}},
},
{
"2 slices, uniform slice size, preferred permutations",
SliceTimePermutationIterator::Ty::kPreferred,
SlicedAllocationData{{
{1, 1, 1},
{1, 2, 2},
}},
{{0, 1}, {1, 0}},
},
{
"many slices, uniform slice size, unique start times, preferred "
"permutations",
SliceTimePermutationIterator::Ty::kPreferred,
SlicedAllocationData{{
{1, 1, 1},
{1, 2, 2},
{1, 3, 3},
}},
{{0, 1, 2}, {2, 1, 0}, {1, 0, 2}},
},
{
"many slices, non-uniform slice size, unique start times, preferred "
"permutations",
SliceTimePermutationIterator::Ty::kPreferred,
SlicedAllocationData{{
{1, 1, 1},
{2, 2, 3},
{1, 3, 2},
}},
{
{0, 2, 1},
{1, 2, 0},
},
},
{
"many slices, non-uniform slice size, non-unique start times, "
"preferred permutations",
SliceTimePermutationIterator::Ty::kPreferred,
SlicedAllocationData{{
{1, 1, 1},
{1, 2, 2},
{2, 3, 1},
{1, 5, 1},
{2, 6, 3},
{3, 8, 4},
}},
{
{0, 2, 1, 3, 4, 5},
{3, 2, 1, 0, 4, 5},
},
},
};
for (const RepackTestCase& test_case : test_cases) {
test_case.Test();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/heap_simulator/heap_simulator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/heap_simulator/heap_simulator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
34bec701-5df1-4e9b-812d-9acf51c7d7b9 | cpp | tensorflow/tensorflow | graphcycles | third_party/xla/xla/service/graphcycles/graphcycles.cc | third_party/xla/xla/service/graphcycles/graphcycles_test.cc | #include "xla/service/graphcycles/graphcycles.h"
#include <algorithm>
#include <cstddef>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/service/graphcycles/ordered_set.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
using NodeSet = absl::flat_hash_set<int32_t>;
using OrderedNodeSet = OrderedSet<int32_t>;
struct Node {
int32_t rank;
bool visited;
};
struct NodeIO {
OrderedNodeSet in;
OrderedNodeSet out;
};
}
struct GraphCycles::Rep {
std::vector<Node> nodes_;
std::vector<NodeIO> node_io_;
std::vector<int32_t> free_nodes_;
std::vector<int32_t> deltaf_;
std::vector<int32_t> deltab_;
std::vector<int32_t> list_;
std::vector<int32_t> merged_;
std::vector<int32_t>
stack_;
std::vector<void*> node_data_;
};
GraphCycles::GraphCycles() : rep_(new Rep) {}
GraphCycles::~GraphCycles() {
delete rep_;
}
bool GraphCycles::CheckInvariants() const {
Rep* r = rep_;
NodeSet ranks;
for (size_t x = 0; x < r->nodes_.size(); x++) {
Node* nx = &r->nodes_[x];
if (nx->visited) {
LOG(FATAL) << "Did not clear visited marker on node " << x;
}
if (!ranks.insert(nx->rank).second) {
LOG(FATAL) << "Duplicate occurrence of rank " << nx->rank;
}
NodeIO* nx_io = &r->node_io_[x];
for (int32_t y : nx_io->out.GetSequence()) {
Node* ny = &r->nodes_[y];
if (nx->rank >= ny->rank) {
LOG(FATAL) << "Edge " << x << "->" << y << " has bad rank assignment "
<< nx->rank << "->" << ny->rank;
}
}
}
return true;
}
int32_t GraphCycles::NewNode() {
if (rep_->free_nodes_.empty()) {
Node n;
n.visited = false;
n.rank = rep_->nodes_.size();
rep_->nodes_.emplace_back(n);
rep_->node_io_.emplace_back();
rep_->node_data_.push_back(nullptr);
return n.rank;
} else {
int32_t r = rep_->free_nodes_.back();
rep_->free_nodes_.pop_back();
rep_->node_data_[r] = nullptr;
return r;
}
}
void GraphCycles::RemoveNode(int32_t node) {
NodeIO* x = &rep_->node_io_[node];
for (int32_t y : x->out.GetSequence()) {
rep_->node_io_[y].in.Erase(node);
}
for (int32_t y : x->in.GetSequence()) {
rep_->node_io_[y].out.Erase(node);
}
x->in.Clear();
x->out.Clear();
rep_->free_nodes_.push_back(node);
}
void* GraphCycles::GetNodeData(int32_t node) const {
return rep_->node_data_[node];
}
void GraphCycles::SetNodeData(int32_t node, void* data) {
rep_->node_data_[node] = data;
}
bool GraphCycles::HasEdge(int32_t x, int32_t y) const {
return rep_->node_io_[x].out.Contains(y);
}
void GraphCycles::RemoveEdge(int32_t x, int32_t y) {
rep_->node_io_[x].out.Erase(y);
rep_->node_io_[y].in.Erase(x);
}
static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound);
static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound);
static void Reorder(GraphCycles::Rep* r);
static void Sort(absl::Span<const Node>, std::vector<int32_t>* delta);
static void MoveToList(GraphCycles::Rep* r, std::vector<int32_t>* src,
std::vector<int32_t>* dst);
static void ClearVisitedBits(GraphCycles::Rep* r,
absl::Span<const int32_t> visited_indices);
bool GraphCycles::InsertEdge(int32_t x, int32_t y) {
if (x == y) return false;
Rep* r = rep_;
NodeIO* nx_io = &r->node_io_[x];
if (!nx_io->out.Insert(y)) {
return true;
}
NodeIO* ny_io = &r->node_io_[y];
ny_io->in.Insert(x);
Node* nx = &r->nodes_[x];
Node* ny = &r->nodes_[y];
if (nx->rank <= ny->rank) {
return true;
}
if (!ForwardDFS(r, y, nx->rank)) {
nx_io->out.Erase(y);
ny_io->in.Erase(x);
ClearVisitedBits(r, r->deltaf_);
return false;
}
BackwardDFS(r, x, ny->rank);
Reorder(r);
return true;
}
static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound) {
r->deltaf_.clear();
r->stack_.clear();
r->stack_.push_back(n);
while (!r->stack_.empty()) {
n = r->stack_.back();
r->stack_.pop_back();
Node* nn = &r->nodes_[n];
if (nn->visited) continue;
nn->visited = true;
r->deltaf_.push_back(n);
NodeIO* nn_io = &r->node_io_[n];
for (auto w : nn_io->out.GetSequence()) {
Node* nw = &r->nodes_[w];
if (nw->rank == upper_bound) {
return false;
}
if (!nw->visited && nw->rank < upper_bound) {
r->stack_.push_back(w);
}
}
}
return true;
}
static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound) {
r->deltab_.clear();
r->stack_.clear();
r->stack_.push_back(n);
while (!r->stack_.empty()) {
n = r->stack_.back();
r->stack_.pop_back();
Node* nn = &r->nodes_[n];
if (nn->visited) continue;
nn->visited = true;
r->deltab_.push_back(n);
NodeIO* nn_io = &r->node_io_[n];
for (auto w : nn_io->in.GetSequence()) {
Node* nw = &r->nodes_[w];
if (!nw->visited && lower_bound < nw->rank) {
r->stack_.push_back(w);
}
}
}
}
static void Reorder(GraphCycles::Rep* r) {
Sort(r->nodes_, &r->deltab_);
Sort(r->nodes_, &r->deltaf_);
r->list_.clear();
MoveToList(r, &r->deltab_, &r->list_);
MoveToList(r, &r->deltaf_, &r->list_);
r->merged_.resize(r->deltab_.size() + r->deltaf_.size());
std::merge(r->deltab_.begin(), r->deltab_.end(), r->deltaf_.begin(),
r->deltaf_.end(), r->merged_.begin());
for (size_t i = 0; i < r->list_.size(); i++) {
r->nodes_[r->list_[i]].rank = r->merged_[i];
}
}
static void Sort(absl::Span<const Node> nodes, std::vector<int32_t>* delta) {
std::sort(delta->begin(), delta->end(), [&](int32_t a, int32_t b) {
return nodes[a].rank < nodes[b].rank;
});
}
static void MoveToList(GraphCycles::Rep* r, std::vector<int32_t>* src,
std::vector<int32_t>* dst) {
for (size_t i = 0; i < src->size(); i++) {
int32_t w = (*src)[i];
(*src)[i] = r->nodes_[w].rank;
r->nodes_[w].visited = false;
dst->push_back(w);
}
}
static void ClearVisitedBits(GraphCycles::Rep* r,
absl::Span<const int32_t> visited_indices) {
for (auto index : visited_indices) {
r->nodes_[index].visited = false;
}
}
int GraphCycles::FindPath(int32_t x, int32_t y, int max_path_len,
int32_t path[]) const {
int path_len = 0;
Rep* r = rep_;
NodeSet seen;
r->stack_.clear();
r->stack_.push_back(x);
while (!r->stack_.empty()) {
int32_t n = r->stack_.back();
r->stack_.pop_back();
if (n < 0) {
path_len--;
continue;
}
if (path_len < max_path_len) {
path[path_len] = n;
}
path_len++;
r->stack_.push_back(-1);
if (n == y) {
return path_len;
}
for (auto w : r->node_io_[n].out.GetSequence()) {
if (seen.insert(w).second) {
r->stack_.push_back(w);
}
}
}
return 0;
}
bool GraphCycles::IsReachable(int32_t x, int32_t y) const {
return FindPath(x, y, 0, nullptr) > 0;
}
bool GraphCycles::IsReachableNonConst(int32_t x, int32_t y) {
if (x == y) return true;
Rep* r = rep_;
Node* nx = &r->nodes_[x];
Node* ny = &r->nodes_[y];
if (nx->rank >= ny->rank) {
return false;
}
bool reachable = !ForwardDFS(r, x, ny->rank);
ClearVisitedBits(r, r->deltaf_);
return reachable;
}
bool GraphCycles::CanContractEdge(int32_t a, int32_t b) {
CHECK(HasEdge(a, b)) << "No edge exists from " << a << " to " << b;
RemoveEdge(a, b);
bool reachable = IsReachableNonConst(a, b);
InsertEdge(a, b);
return !reachable;
}
std::optional<int32_t> GraphCycles::ContractEdge(int32_t a, int32_t b) {
CHECK(HasEdge(a, b));
RemoveEdge(a, b);
if (IsReachableNonConst(a, b)) {
InsertEdge(a, b);
return std::nullopt;
}
if (rep_->node_io_[b].in.Size() + rep_->node_io_[b].out.Size() >
rep_->node_io_[a].in.Size() + rep_->node_io_[a].out.Size()) {
std::swap(a, b);
}
NodeIO* nb_io = &rep_->node_io_[b];
OrderedNodeSet out = std::move(nb_io->out);
OrderedNodeSet in = std::move(nb_io->in);
for (int32_t y : out.GetSequence()) {
rep_->node_io_[y].in.Erase(b);
}
for (int32_t y : in.GetSequence()) {
rep_->node_io_[y].out.Erase(b);
}
rep_->free_nodes_.push_back(b);
rep_->node_io_[a].out.Reserve(rep_->node_io_[a].out.Size() + out.Size());
for (int32_t y : out.GetSequence()) {
InsertEdge(a, y);
}
rep_->node_io_[a].in.Reserve(rep_->node_io_[a].in.Size() + in.Size());
for (int32_t y : in.GetSequence()) {
InsertEdge(y, a);
}
return a;
}
absl::Span<const int32_t> GraphCycles::Successors(int32_t node) const {
return rep_->node_io_[node].out.GetSequence();
}
absl::Span<const int32_t> GraphCycles::Predecessors(int32_t node) const {
return rep_->node_io_[node].in.GetSequence();
}
std::vector<int32_t> GraphCycles::SuccessorsCopy(int32_t node) const {
absl::Span<const int32_t> successors = Successors(node);
return std::vector<int32_t>(successors.begin(), successors.end());
}
std::vector<int32_t> GraphCycles::PredecessorsCopy(int32_t node) const {
absl::Span<const int32_t> predecessors = Predecessors(node);
return std::vector<int32_t>(predecessors.begin(), predecessors.end());
}
namespace {
void SortInPostOrder(absl::Span<const Node> nodes,
std::vector<int32_t>* to_sort) {
absl::c_sort(*to_sort, [&](int32_t a, int32_t b) {
DCHECK(a == b || nodes[a].rank != nodes[b].rank);
return nodes[a].rank > nodes[b].rank;
});
}
}
std::vector<int32_t> GraphCycles::AllNodesInPostOrder() const {
absl::flat_hash_set<int32_t> free_nodes_set;
absl::c_copy(rep_->free_nodes_,
std::inserter(free_nodes_set, free_nodes_set.begin()));
std::vector<int32_t> all_nodes;
all_nodes.reserve(rep_->nodes_.size() - free_nodes_set.size());
for (int64_t i = 0, e = rep_->nodes_.size(); i < e; i++) {
if (!free_nodes_set.contains(i)) {
all_nodes.push_back(i);
}
}
SortInPostOrder(rep_->nodes_, &all_nodes);
return all_nodes;
}
std::string GraphCycles::DebugString() const {
absl::flat_hash_set<int32_t> free_nodes_set(rep_->free_nodes_.begin(),
rep_->free_nodes_.end());
std::string result = "digraph {\n";
for (int i = 0, end = rep_->nodes_.size(); i < end; i++) {
if (free_nodes_set.contains(i)) {
continue;
}
for (int32_t succ : rep_->node_io_[i].out.GetSequence()) {
absl::StrAppend(&result, " \"", i, "\" -> \"", succ, "\"\n");
}
}
absl::StrAppend(&result, "}\n");
return result;
}
} | #include "xla/service/graphcycles/graphcycles.h"
#include <cstdint>
#include <optional>
#include <random>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/random/random.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
typedef std::vector<int> Nodes;
struct Edge {
int from;
int to;
};
typedef std::vector<Edge> Edges;
static bool IsReachable(Edges *edges, int from, int to,
absl::flat_hash_set<int> *seen) {
seen->insert(from);
if (from == to) return true;
for (int i = 0; i != edges->size(); i++) {
Edge *edge = &(*edges)[i];
if (edge->from == from) {
if (edge->to == to) {
return true;
} else if (seen->find(edge->to) == seen->end() &&
IsReachable(edges, edge->to, to, seen)) {
return true;
}
}
}
return false;
}
static void PrintNodes(Nodes *nodes) {
LOG(INFO) << "NODES (" << nodes->size() << ")";
for (int i = 0; i != nodes->size(); i++) {
LOG(INFO) << (*nodes)[i];
}
}
static void PrintEdges(Edges *edges) {
LOG(INFO) << "EDGES (" << edges->size() << ")";
for (int i = 0; i != edges->size(); i++) {
int a = (*edges)[i].from;
int b = (*edges)[i].to;
LOG(INFO) << a << " " << b;
}
LOG(INFO) << "---";
}
static void PrintGCEdges(Nodes *nodes, xla::GraphCycles *gc) {
LOG(INFO) << "GC EDGES";
for (int i = 0; i != nodes->size(); i++) {
for (int j = 0; j != nodes->size(); j++) {
int a = (*nodes)[i];
int b = (*nodes)[j];
if (gc->HasEdge(a, b)) {
LOG(INFO) << a << " " << b;
}
}
}
LOG(INFO) << "---";
}
static void PrintTransitiveClosure(Nodes *nodes, Edges *edges,
xla::GraphCycles *gc) {
LOG(INFO) << "Transitive closure";
for (int i = 0; i != nodes->size(); i++) {
for (int j = 0; j != nodes->size(); j++) {
int a = (*nodes)[i];
int b = (*nodes)[j];
absl::flat_hash_set<int> seen;
if (IsReachable(edges, a, b, &seen)) {
LOG(INFO) << a << " " << b;
}
}
}
LOG(INFO) << "---";
}
static void PrintGCTransitiveClosure(Nodes *nodes, xla::GraphCycles *gc) {
LOG(INFO) << "GC Transitive closure";
for (int i = 0; i != nodes->size(); i++) {
for (int j = 0; j != nodes->size(); j++) {
int a = (*nodes)[i];
int b = (*nodes)[j];
if (gc->IsReachable(a, b)) {
LOG(INFO) << a << " " << b;
}
}
}
LOG(INFO) << "---";
}
static void CheckTransitiveClosure(Nodes *nodes, Edges *edges,
xla::GraphCycles *gc) {
absl::flat_hash_set<int> seen;
for (int i = 0; i != nodes->size(); i++) {
for (int j = 0; j != nodes->size(); j++) {
seen.clear();
int a = (*nodes)[i];
int b = (*nodes)[j];
bool gc_reachable = gc->IsReachable(a, b);
CHECK_EQ(gc_reachable, gc->IsReachableNonConst(a, b));
bool reachable = IsReachable(edges, a, b, &seen);
if (gc_reachable != reachable) {
PrintEdges(edges);
PrintGCEdges(nodes, gc);
PrintTransitiveClosure(nodes, edges, gc);
PrintGCTransitiveClosure(nodes, gc);
LOG(FATAL) << "gc_reachable " << gc_reachable << " reachable "
<< reachable << " a " << a << " b " << b;
}
}
}
}
static void CheckEdges(Nodes *nodes, Edges *edges, xla::GraphCycles *gc) {
int count = 0;
for (int i = 0; i != edges->size(); i++) {
int a = (*edges)[i].from;
int b = (*edges)[i].to;
if (!gc->HasEdge(a, b)) {
PrintEdges(edges);
PrintGCEdges(nodes, gc);
LOG(FATAL) << "!gc->HasEdge(" << a << ", " << b << ")";
}
}
for (int i = 0; i != nodes->size(); i++) {
for (int j = 0; j != nodes->size(); j++) {
int a = (*nodes)[i];
int b = (*nodes)[j];
if (gc->HasEdge(a, b)) {
count++;
}
}
}
if (count != edges->size()) {
PrintEdges(edges);
PrintGCEdges(nodes, gc);
LOG(FATAL) << "edges->size() " << edges->size() << " count " << count;
}
}
static int RandomNode(std::mt19937 *rnd, Nodes *nodes) {
std::uniform_int_distribution<int> distribution(0, nodes->size() - 1);
return distribution(*rnd);
}
static int RandomEdge(std::mt19937 *rnd, Edges *edges) {
std::uniform_int_distribution<int> distribution(0, edges->size() - 1);
return distribution(*rnd);
}
static int EdgeIndex(Edges *edges, int from, int to) {
int i = 0;
while (i != edges->size() &&
((*edges)[i].from != from || (*edges)[i].to != to)) {
i++;
}
return i == edges->size() ? -1 : i;
}
TEST(GraphCycles, RandomizedTest) {
Nodes nodes;
Edges edges;
xla::GraphCycles graph_cycles;
static const int kMaxNodes = 7;
static const int kDataOffset = 17;
int n = 100000;
int op = 0;
std::mt19937 rnd(tsl::testing::RandomSeed() + 1);
for (int iter = 0; iter != n; iter++) {
if ((iter % 10000) == 0) VLOG(0) << "Iter " << iter << " of " << n;
if (VLOG_IS_ON(3)) {
LOG(INFO) << "===============";
LOG(INFO) << "last op " << op;
PrintNodes(&nodes);
PrintEdges(&edges);
PrintGCEdges(&nodes, &graph_cycles);
}
for (int i = 0; i != nodes.size(); i++) {
ASSERT_EQ(reinterpret_cast<intptr_t>(graph_cycles.GetNodeData(i)),
i + kDataOffset)
<< " node " << i;
}
CheckEdges(&nodes, &edges, &graph_cycles);
CheckTransitiveClosure(&nodes, &edges, &graph_cycles);
std::uniform_int_distribution<int> distribution(0, 5);
op = distribution(rnd);
switch (op) {
case 0:
if (nodes.size() < kMaxNodes) {
int new_node = graph_cycles.NewNode();
ASSERT_NE(-1, new_node);
VLOG(1) << "adding node " << new_node;
ASSERT_EQ(nullptr, graph_cycles.GetNodeData(new_node));
graph_cycles.SetNodeData(
new_node, reinterpret_cast<void *>(
static_cast<intptr_t>(new_node + kDataOffset)));
ASSERT_GE(new_node, 0);
for (int i = 0; i != nodes.size(); i++) {
ASSERT_NE(nodes[i], new_node);
}
nodes.push_back(new_node);
}
break;
case 1:
if (!nodes.empty()) {
int node_index = RandomNode(&rnd, &nodes);
int node = nodes[node_index];
nodes[node_index] = nodes.back();
nodes.pop_back();
VLOG(1) << "removing node " << node;
graph_cycles.RemoveNode(node);
int i = 0;
while (i != edges.size()) {
if (edges[i].from == node || edges[i].to == node) {
edges[i] = edges.back();
edges.pop_back();
} else {
i++;
}
}
}
break;
case 2:
if (!nodes.empty()) {
int from = RandomNode(&rnd, &nodes);
int to = RandomNode(&rnd, &nodes);
if (EdgeIndex(&edges, nodes[from], nodes[to]) == -1) {
if (graph_cycles.InsertEdge(nodes[from], nodes[to])) {
Edge new_edge;
new_edge.from = nodes[from];
new_edge.to = nodes[to];
edges.push_back(new_edge);
} else {
absl::flat_hash_set<int> seen;
ASSERT_TRUE(IsReachable(&edges, nodes[to], nodes[from], &seen))
<< "Edge " << nodes[to] << "->" << nodes[from];
}
}
}
break;
case 3:
if (!edges.empty()) {
int i = RandomEdge(&rnd, &edges);
int from = edges[i].from;
int to = edges[i].to;
ASSERT_EQ(i, EdgeIndex(&edges, from, to));
edges[i] = edges.back();
edges.pop_back();
ASSERT_EQ(-1, EdgeIndex(&edges, from, to));
VLOG(1) << "removing edge " << from << " " << to;
graph_cycles.RemoveEdge(from, to);
}
break;
case 4:
if (!nodes.empty()) {
int from = RandomNode(&rnd, &nodes);
int to = RandomNode(&rnd, &nodes);
int32_t path[2 * kMaxNodes];
int path_len = graph_cycles.FindPath(nodes[from], nodes[to],
2 * kMaxNodes, path);
absl::flat_hash_set<int> seen;
bool reachable = IsReachable(&edges, nodes[from], nodes[to], &seen);
bool gc_reachable = graph_cycles.IsReachable(nodes[from], nodes[to]);
ASSERT_EQ(gc_reachable,
graph_cycles.IsReachableNonConst(nodes[from], nodes[to]));
ASSERT_EQ(path_len != 0, reachable);
ASSERT_EQ(path_len != 0, gc_reachable);
ASSERT_LE(path_len, kMaxNodes + 1);
if (path_len != 0) {
ASSERT_EQ(nodes[from], path[0]);
ASSERT_EQ(nodes[to], path[path_len - 1]);
for (int i = 1; i < path_len; i++) {
ASSERT_NE(-1, EdgeIndex(&edges, path[i - 1], path[i]));
ASSERT_TRUE(graph_cycles.HasEdge(path[i - 1], path[i]));
}
}
}
break;
case 5:
CHECK(graph_cycles.CheckInvariants());
break;
default:
LOG(FATAL);
}
std::bernoulli_distribution rarely(1.0 / 1024.0);
if (rarely(rnd)) {
VLOG(3) << "Graph expansion";
CheckEdges(&nodes, &edges, &graph_cycles);
CheckTransitiveClosure(&nodes, &edges, &graph_cycles);
for (int i = 0; i != 256; i++) {
int new_node = graph_cycles.NewNode();
ASSERT_NE(-1, new_node);
VLOG(1) << "adding node " << new_node;
ASSERT_GE(new_node, 0);
ASSERT_EQ(nullptr, graph_cycles.GetNodeData(new_node));
graph_cycles.SetNodeData(
new_node, reinterpret_cast<void *>(
static_cast<intptr_t>(new_node + kDataOffset)));
for (int j = 0; j != nodes.size(); j++) {
ASSERT_NE(nodes[j], new_node);
}
nodes.push_back(new_node);
}
for (int i = 0; i != 256; i++) {
ASSERT_GT(nodes.size(), 0);
int node_index = RandomNode(&rnd, &nodes);
int node = nodes[node_index];
nodes[node_index] = nodes.back();
nodes.pop_back();
VLOG(1) << "removing node " << node;
graph_cycles.RemoveNode(node);
int j = 0;
while (j != edges.size()) {
if (edges[j].from == node || edges[j].to == node) {
edges[j] = edges.back();
edges.pop_back();
} else {
j++;
}
}
}
CHECK(graph_cycles.CheckInvariants());
}
}
}
class GraphCyclesTest : public ::testing::Test {
public:
xla::GraphCycles g_;
GraphCyclesTest() {
for (int i = 0; i < 100; i++) {
CHECK_EQ(i, g_.NewNode());
}
CHECK(g_.CheckInvariants());
}
bool AddEdge(int x, int y) { return g_.InsertEdge(x, y); }
void AddMultiples() {
for (int x = 1; x < 25; x++) {
EXPECT_TRUE(AddEdge(x, 2 * x)) << x;
EXPECT_TRUE(AddEdge(x, 3 * x)) << x;
}
CHECK(g_.CheckInvariants());
}
std::string Path(int x, int y) {
static const int kPathSize = 5;
int32_t path[kPathSize];
int np = g_.FindPath(x, y, kPathSize, path);
std::string result;
for (int i = 0; i < np; i++) {
if (i >= kPathSize) {
result += " ...";
break;
}
if (!result.empty()) result.push_back(' ');
char buf[20];
snprintf(buf, sizeof(buf), "%d", path[i]);
result += buf;
}
return result;
}
};
TEST_F(GraphCyclesTest, NoCycle) {
AddMultiples();
CHECK(g_.CheckInvariants());
}
TEST_F(GraphCyclesTest, SimpleCycle) {
AddMultiples();
EXPECT_FALSE(AddEdge(8, 4));
EXPECT_EQ("4 8", Path(4, 8));
CHECK(g_.CheckInvariants());
}
TEST_F(GraphCyclesTest, IndirectCycle) {
AddMultiples();
EXPECT_TRUE(AddEdge(16, 9));
CHECK(g_.CheckInvariants());
EXPECT_FALSE(AddEdge(9, 2));
EXPECT_EQ("2 4 8 16 9", Path(2, 9));
CHECK(g_.CheckInvariants());
}
TEST_F(GraphCyclesTest, LongPath) {
ASSERT_TRUE(AddEdge(2, 4));
ASSERT_TRUE(AddEdge(4, 6));
ASSERT_TRUE(AddEdge(6, 8));
ASSERT_TRUE(AddEdge(8, 10));
ASSERT_TRUE(AddEdge(10, 12));
ASSERT_FALSE(AddEdge(12, 2));
EXPECT_EQ("2 4 6 8 10 ...", Path(2, 12));
CHECK(g_.CheckInvariants());
}
TEST_F(GraphCyclesTest, RemoveNode) {
ASSERT_TRUE(AddEdge(1, 2));
ASSERT_TRUE(AddEdge(2, 3));
ASSERT_TRUE(AddEdge(3, 4));
ASSERT_TRUE(AddEdge(4, 5));
g_.RemoveNode(3);
ASSERT_TRUE(AddEdge(5, 1));
}
TEST_F(GraphCyclesTest, ManyEdges) {
const int N = 50;
for (int i = 0; i < N; i++) {
for (int j = 1; j < N; j++) {
ASSERT_TRUE(AddEdge(i, i + j));
}
}
CHECK(g_.CheckInvariants());
ASSERT_TRUE(AddEdge(2 * N - 1, 0));
CHECK(g_.CheckInvariants());
ASSERT_FALSE(AddEdge(10, 9));
CHECK(g_.CheckInvariants());
}
TEST_F(GraphCyclesTest, ContractEdge) {
ASSERT_TRUE(AddEdge(1, 2));
ASSERT_TRUE(AddEdge(1, 3));
ASSERT_TRUE(AddEdge(2, 3));
ASSERT_TRUE(AddEdge(2, 4));
ASSERT_TRUE(AddEdge(3, 4));
EXPECT_FALSE(g_.ContractEdge(1, 3).has_value());
CHECK(g_.CheckInvariants());
EXPECT_TRUE(g_.HasEdge(1, 3));
EXPECT_EQ(g_.ContractEdge(1, 2).value(), 2);
CHECK(g_.CheckInvariants());
EXPECT_TRUE(g_.HasEdge(2, 3));
EXPECT_TRUE(g_.HasEdge(2, 4));
EXPECT_TRUE(g_.HasEdge(3, 4));
EXPECT_EQ(g_.ContractEdge(2, 3).value(), 2);
CHECK(g_.CheckInvariants());
EXPECT_TRUE(g_.HasEdge(2, 4));
}
TEST_F(GraphCyclesTest, CanContractEdge) {
ASSERT_TRUE(AddEdge(1, 2));
ASSERT_TRUE(AddEdge(1, 3));
ASSERT_TRUE(AddEdge(2, 3));
ASSERT_TRUE(AddEdge(2, 4));
ASSERT_TRUE(AddEdge(3, 4));
EXPECT_FALSE(g_.CanContractEdge(1, 3));
EXPECT_FALSE(g_.CanContractEdge(2, 4));
EXPECT_TRUE(g_.CanContractEdge(1, 2));
EXPECT_TRUE(g_.CanContractEdge(2, 3));
EXPECT_TRUE(g_.CanContractEdge(3, 4));
}
static void BM_StressTest(::testing::benchmark::State &state) {
const int num_nodes = state.range(0);
while (state.KeepRunningBatch(num_nodes)) {
xla::GraphCycles g;
int32_t *nodes = new int32_t[num_nodes];
for (int i = 0; i < num_nodes; i++) {
nodes[i] = g.NewNode();
}
for (int i = 0; i < num_nodes; i++) {
int end = std::min(num_nodes, i + 5);
for (int j = i + 1; j < end; j++) {
if (nodes[i] >= 0 && nodes[j] >= 0) {
CHECK(g.InsertEdge(nodes[i], nodes[j]));
}
}
}
delete[] nodes;
}
}
BENCHMARK(BM_StressTest)->Range(2048, 1048576);
static void BM_ContractEdge(::testing::benchmark::State &state) {
const int num_nodes = state.range(0);
while (state.KeepRunningBatch(num_nodes)) {
state.PauseTiming();
xla::GraphCycles g;
std::vector<int32_t> nodes;
nodes.reserve(num_nodes);
for (int i = 0; i < num_nodes; i++) {
nodes.push_back(g.NewNode());
}
for (int i = 0; i < num_nodes - 1; ++i) {
g.InsertEdge(nodes[i], nodes[num_nodes - 1]);
}
state.ResumeTiming();
int node = num_nodes - 1;
for (int i = 0; i < num_nodes - 1; ++i) {
node = g.ContractEdge(nodes[i], node).value();
}
}
}
BENCHMARK(BM_ContractEdge)->Arg(1000)->Arg(10000);
static void BM_IsReachableNonConst(testing::benchmark::State &state) {
const int num_nodes = state.range(0);
xla::GraphCycles g;
std::vector<uint32_t> nodes;
nodes.reserve(num_nodes);
for (int i = 0; i < num_nodes; i++) {
nodes.push_back(g.NewNode());
}
absl::BitGen bitgen;
for (int i = 0; i < num_nodes; i++) {
int max = num_nodes - 1 - i;
if (max == 0) break;
constexpr int branch_factor = 2;
for (int b = 0; b < branch_factor; b++) {
int j = i + 1 + absl::Uniform(bitgen, 0, max);
CHECK_LT(j, num_nodes);
CHECK(g.InsertEdge(nodes[i], nodes[j]));
}
}
auto get_random_node = [&]() {
return nodes[absl::Uniform(bitgen, 0, num_nodes)];
};
uint32_t src, dst;
int i = 0;
for (auto s : state) {
if (i % 256 == 0) {
src = get_random_node();
dst = get_random_node();
}
bool reachable = g.IsReachableNonConst(src, dst);
benchmark::DoNotOptimize(reachable);
i++;
}
}
BENCHMARK(BM_IsReachableNonConst)
->Arg(10)
->Arg(50)
->Arg(100)
->Arg(200)
->Arg(1000)
->Arg(30000); | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/graphcycles/graphcycles.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/graphcycles/graphcycles_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1c5201c2-01d4-4a96-b235-4a452e3fa598 | cpp | tensorflow/tensorflow | semantic_version | third_party/xla/xla/stream_executor/semantic_version.cc | third_party/xla/xla/stream_executor/semantic_version_test.cc | #include "xla/stream_executor/semantic_version.h"
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "tsl/platform/statusor.h"
namespace stream_executor {
std::string SemanticVersion::ToString() const {
return absl::StrFormat("%d.%d.%d", major_, minor_, patch_);
}
static absl::StatusOr<unsigned> ParseUnsignedNumber(
absl::string_view component) {
unsigned number;
if (!absl::SimpleAtoi(component, &number)) {
return absl::InvalidArgumentError(
absl::StrFormat("'%s' is not an unsigned number.", component));
}
return number;
}
absl::StatusOr<SemanticVersion> SemanticVersion::ParseFromString(
absl::string_view str) {
std::vector<absl::string_view> components = absl::StrSplit(str, '.');
if (components.size() != 3) {
return absl::InvalidArgumentError(
"Version does not match the format X.Y.Z");
}
SemanticVersion result{0, 0, 0};
TF_ASSIGN_OR_RETURN(result.major(), ParseUnsignedNumber(components[0]));
TF_ASSIGN_OR_RETURN(result.minor(), ParseUnsignedNumber(components[1]));
TF_ASSIGN_OR_RETURN(result.patch(), ParseUnsignedNumber(components[2]));
return result;
}
} | #include "xla/stream_executor/semantic_version.h"
#include <algorithm>
#include <array>
#include <sstream>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/hash/hash_testing.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
namespace stream_executor {
namespace {
TEST(SemanticVersion, Construction) {
SemanticVersion version{1, 2, 3};
EXPECT_EQ(version.major(), 1);
EXPECT_EQ(version.minor(), 2);
EXPECT_EQ(version.patch(), 3);
}
TEST(SemanticVersion, ConstructionFromArray) {
SemanticVersion version{std::array<unsigned, 3>{1, 2, 3}};
EXPECT_EQ(version.major(), 1);
EXPECT_EQ(version.minor(), 2);
EXPECT_EQ(version.patch(), 3);
}
TEST(SemanticVersion, Mutation) {
SemanticVersion version{0, 0, 0};
version.major() = 1;
version.minor() = 2;
version.patch() = 3;
EXPECT_EQ(version.major(), 1);
EXPECT_EQ(version.minor(), 2);
EXPECT_EQ(version.patch(), 3);
}
TEST(SemanticVersion, ParseFromStringSuccess) {
absl::StatusOr<SemanticVersion> version =
SemanticVersion::ParseFromString("1.2.3");
ASSERT_THAT(version, tsl::testing::IsOk());
EXPECT_EQ(version->major(), 1);
EXPECT_EQ(version->minor(), 2);
EXPECT_EQ(version->patch(), 3);
}
TEST(SemanticVersion, ParseFromStringInvalid) {
auto test = [](absl::string_view str) {
absl::StatusOr<SemanticVersion> version =
SemanticVersion::ParseFromString(str);
EXPECT_THAT(version,
tsl::testing::StatusIs(absl::StatusCode::kInvalidArgument));
};
test("1.2");
test("1.2.3dev5");
}
TEST(SemanticVersion, ToString) {
SemanticVersion version{1, 2, 3};
EXPECT_EQ(version.ToString(), "1.2.3");
}
TEST(SemanticVersion, AbslStringify) {
SemanticVersion version{1, 2, 3};
EXPECT_EQ(absl::StrCat(version), version.ToString());
}
TEST(SemanticVersion, OStream) {
SemanticVersion version{1, 2, 3};
std::ostringstream os;
os << version;
EXPECT_EQ(os.str(), version.ToString());
}
TEST(SemanticVersion, Equality) {
SemanticVersion version{1, 2, 3};
SemanticVersion other{1, 2, 4};
EXPECT_EQ(version, version);
EXPECT_FALSE(version != version);
EXPECT_NE(version, other);
EXPECT_FALSE(version == other);
}
TEST(SemanticVersion, Ordering) {
std::array<SemanticVersion, 5> versions = {
SemanticVersion{3, 3, 3}, SemanticVersion{0, 0, 0},
SemanticVersion{1, 2, 3}, SemanticVersion{1, 2, 4},
SemanticVersion{1, 3, 0}};
std::sort(versions.begin(), versions.end());
EXPECT_THAT(versions, testing::ElementsAre(
SemanticVersion{0, 0, 0}, SemanticVersion{1, 2, 3},
SemanticVersion{1, 2, 4}, SemanticVersion{1, 3, 0},
SemanticVersion{3, 3, 3}));
}
TEST(SemanticVersion, Hash) {
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({
SemanticVersion{0, 0, 0},
SemanticVersion{1, 2, 3},
SemanticVersion{1, 2, 4},
SemanticVersion{1, 3, 0},
SemanticVersion{3, 3, 3},
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/semantic_version.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/semantic_version_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8deb721c-2e7b-42dd-ac52-c0f6e048c817 | cpp | tensorflow/tensorflow | executor_cache | third_party/xla/xla/stream_executor/executor_cache.cc | third_party/xla/xla/stream_executor/executor_cache_test.cc | #include "xla/stream_executor/executor_cache.h"
#include <memory>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace stream_executor {
ExecutorCache::ExecutorCache() = default;
ExecutorCache::~ExecutorCache() = default;
absl::StatusOr<StreamExecutor*> ExecutorCache::GetOrCreate(
int ordinal, const ExecutorFactory& factory) {
if (auto fast_result = Get(ordinal); fast_result.ok()) {
return fast_result;
}
VLOG(2) << "building executor";
TF_ASSIGN_OR_RETURN(std::unique_ptr<StreamExecutor> result, factory());
auto returned_executor = result.get();
absl::MutexLock lock(&mutex_);
cache_.emplace(ordinal, std::move(result));
return returned_executor;
}
absl::StatusOr<StreamExecutor*> ExecutorCache::Get(int ordinal) {
absl::ReaderMutexLock lock{&mutex_};
if (auto it = cache_.find(ordinal); it != cache_.end()) {
return it->second.get();
}
return absl::NotFoundError(
absl::StrFormat("No executors registered for ordinal %d", ordinal));
}
} | #include "xla/stream_executor/executor_cache.h"
#include <memory>
#include "absl/log/log.h"
#include "xla/stream_executor/mock_stream_executor.h"
#include "xla/stream_executor/stream.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace stream_executor {
namespace {
TEST(ExecutorCacheTest, GetOnEmptyCacheFails) {
ExecutorCache cache;
EXPECT_FALSE(cache.Get(0).ok());
}
TEST(ExecutorCacheTest, GetReturnsExpectedExecutor) {
ExecutorCache cache;
StreamExecutor *executor0 = nullptr;
StreamExecutor *executor1 = nullptr;
auto factory = [&executor0, &executor1]() {
auto executor = std::make_unique<MockStreamExecutor>();
if (executor0 == nullptr) {
executor0 = executor.get();
} else if (executor1 == nullptr) {
executor1 = executor.get();
} else {
LOG(FATAL) << "Bad call to factory.";
}
return executor;
};
TF_ASSERT_OK_AND_ASSIGN(auto found, cache.GetOrCreate(0, factory));
EXPECT_EQ(found, executor0);
TF_ASSERT_OK_AND_ASSIGN(found, cache.GetOrCreate(1, factory));
EXPECT_EQ(found, executor1);
TF_ASSERT_OK_AND_ASSIGN(found, cache.GetOrCreate(0, factory));
EXPECT_EQ(found, executor0);
TF_ASSERT_OK_AND_ASSIGN(found, cache.GetOrCreate(1, factory));
EXPECT_EQ(found, executor1);
TF_ASSERT_OK_AND_ASSIGN(found, cache.Get(0));
EXPECT_EQ(found, executor0);
TF_ASSERT_OK_AND_ASSIGN(found, cache.Get(1));
EXPECT_EQ(found, executor1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/executor_cache.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/executor_cache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
baa74cd6-fc3c-4f7b-8b1c-93eae8b57fad | cpp | tensorflow/tensorflow | stream_finder | third_party/xla/xla/stream_executor/stream_finder.cc | third_party/xla/xla/stream_executor/stream_finder_test.cc | #include "xla/stream_executor/stream_finder.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
namespace stream_executor {
absl::StatusOr<Stream*> FindStream(Platform* platform, void* gpu_stream) {
int number_devices = platform->VisibleDeviceCount();
for (int i = 0; i < number_devices; ++i) {
auto stream_executor = platform->FindExisting(i);
if (!stream_executor.ok()) {
continue;
}
Stream* found_stream = nullptr;
if ((found_stream = (*stream_executor)->FindAllocatedStream(gpu_stream)) !=
nullptr) {
return found_stream;
}
}
return absl::NotFoundError("Stream not found");
}
} | #include "xla/stream_executor/stream_finder.h"
#include "absl/status/status.h"
#include "xla/stream_executor/mock_platform.h"
#include "xla/stream_executor/mock_stream.h"
#include "xla/stream_executor/mock_stream_executor.h"
#include "xla/test.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
using testing::Return;
namespace stream_executor {
namespace {
TEST(StreamFinderTest, FindStreamFailsWithNoExecutors) {
MockStreamExecutor stream_executor;
MockPlatform platform;
EXPECT_CALL(platform, VisibleDeviceCount()).WillOnce(Return(0));
EXPECT_FALSE(FindStream(&platform, nullptr).ok());
}
TEST(StreamFinderTest, FindStreamFailsWithNoMatchingStream) {
MockStreamExecutor stream_executor;
MockPlatform platform;
EXPECT_CALL(platform, VisibleDeviceCount()).WillOnce(Return(1));
EXPECT_CALL(platform, FindExisting(0)).WillOnce(Return(&stream_executor));
void *gpu_stream = reinterpret_cast<void *>(0x1234);
EXPECT_CALL(stream_executor, FindAllocatedStream(gpu_stream))
.WillOnce(Return(nullptr));
EXPECT_FALSE(FindStream(&platform, gpu_stream).ok());
}
TEST(StreamFinderTest, FindStreamSucceeds) {
MockStreamExecutor stream_executor0;
MockStreamExecutor stream_executor1;
MockPlatform platform;
EXPECT_CALL(platform, VisibleDeviceCount()).WillOnce(Return(2));
EXPECT_CALL(platform, FindExisting(0)).WillOnce(Return(&stream_executor0));
EXPECT_CALL(platform, FindExisting(1)).WillOnce(Return(&stream_executor1));
void *gpu_stream = reinterpret_cast<void *>(0x1234);
MockStream stream;
EXPECT_CALL(stream_executor0, FindAllocatedStream(gpu_stream))
.WillOnce(Return(nullptr));
EXPECT_CALL(stream_executor1, FindAllocatedStream(gpu_stream))
.WillOnce(Return(&stream));
TF_ASSERT_OK_AND_ASSIGN(auto found_stream, FindStream(&platform, gpu_stream));
EXPECT_EQ(found_stream, &stream);
}
TEST(StreamFinderTest, OnlyExecutor1Exists) {
MockStreamExecutor stream_executor1;
MockPlatform platform;
EXPECT_CALL(platform, VisibleDeviceCount()).WillOnce(Return(2));
EXPECT_CALL(platform, FindExisting(0))
.WillRepeatedly(Return(absl::NotFoundError("Nope")));
EXPECT_CALL(platform, FindExisting(1)).WillOnce(Return(&stream_executor1));
void *gpu_stream = reinterpret_cast<void *>(0x1234);
MockStream stream;
EXPECT_CALL(stream_executor1, FindAllocatedStream(gpu_stream))
.WillOnce(Return(&stream));
TF_ASSERT_OK_AND_ASSIGN(auto found_stream, FindStream(&platform, gpu_stream));
EXPECT_EQ(found_stream, &stream);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/stream_finder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/stream_finder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5680cdf5-915e-4052-877f-c155631c5ca6 | cpp | tensorflow/tensorflow | device_description | third_party/xla/xla/stream_executor/device_description.cc | third_party/xla/xla/stream_executor/device_description_test.cc | #include "xla/stream_executor/device_description.h"
#include <cstdint>
#include <string>
#include <variant>
#include "xla/stream_executor/device_description.pb.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/tsl/lib/math/math_util.h"
#include "tsl/platform/logging.h"
namespace stream_executor {
DeviceDescription::DeviceDescription(const GpuDeviceInfoProto &proto)
: block_dim_limit_(BlockDim(proto.block_dim_limit_x(),
proto.block_dim_limit_y(),
proto.block_dim_limit_z())),
threads_per_core_limit_(proto.threads_per_core_limit()),
threads_per_block_limit_(proto.threads_per_block_limit()),
threads_per_warp_(proto.threads_per_warp()),
registers_per_core_limit_(proto.registers_per_core_limit()),
registers_per_block_limit_(proto.registers_per_block_limit()),
device_memory_size_(proto.device_memory_size()),
l2_cache_size_(proto.l2_cache_size()),
memory_bandwidth_(proto.memory_bandwidth()),
shared_memory_per_core_(proto.shared_memory_per_core()),
shared_memory_per_block_(proto.shared_memory_per_block()),
shared_memory_per_block_optin_(proto.shared_memory_per_block_optin()),
clock_rate_ghz_(proto.clock_rate_ghz()),
gpu_compute_capability_(
proto.has_cuda_compute_capability()
? GpuComputeCapability(stream_executor::CudaComputeCapability(
proto.cuda_compute_capability()))
: GpuComputeCapability(stream_executor::RocmComputeCapability(
proto.rocm_compute_capability()))),
core_count_(proto.core_count()),
fpus_per_core_(proto.fpus_per_core()) {}
GpuDeviceInfoProto DeviceDescription::ToGpuProto() const {
stream_executor::GpuDeviceInfoProto proto;
if (auto *ptr = std::get_if<stream_executor::CudaComputeCapability>(
&gpu_compute_capability_))
*proto.mutable_cuda_compute_capability() = ptr->ToProto();
if (auto *ptr = std::get_if<stream_executor::RocmComputeCapability>(
&gpu_compute_capability_))
*proto.mutable_rocm_compute_capability() = ptr->ToProto();
proto.set_threads_per_block_limit(threads_per_block_limit_);
proto.set_threads_per_warp(threads_per_warp_);
proto.set_shared_memory_per_block(shared_memory_per_block_);
proto.set_shared_memory_per_block_optin(shared_memory_per_block_optin_);
proto.set_shared_memory_per_core(shared_memory_per_core_);
proto.set_threads_per_core_limit(threads_per_core_limit_);
proto.set_core_count(core_count_);
proto.set_fpus_per_core(fpus_per_core_);
proto.set_block_dim_limit_x(block_dim_limit().x);
proto.set_block_dim_limit_y(block_dim_limit().y);
proto.set_block_dim_limit_z(block_dim_limit().z);
proto.set_memory_bandwidth(memory_bandwidth_);
proto.set_l2_cache_size(l2_cache_size_);
proto.set_clock_rate_ghz(clock_rate_ghz_);
proto.set_device_memory_size(device_memory_size_);
proto.set_registers_per_core_limit(registers_per_core_limit_);
proto.set_registers_per_block_limit(registers_per_block_limit_);
return proto;
}
std::string DeviceDescription::ToString() const {
return ToGpuProto().DebugString();
}
const GpuComputeCapability &DeviceDescription::gpu_compute_capability() const {
return gpu_compute_capability_;
}
CudaComputeCapability DeviceDescription::cuda_compute_capability() const {
if (auto *ptr =
std::get_if<CudaComputeCapability>(&gpu_compute_capability_)) {
return *ptr;
}
return CudaComputeCapability{-1, -1};
}
RocmComputeCapability DeviceDescription::rocm_compute_capability() const {
if (auto *ptr =
std::get_if<RocmComputeCapability>(&gpu_compute_capability_)) {
return *ptr;
}
return RocmComputeCapability{};
}
bool ThreadDimOk(const DeviceDescription &device_description,
const ThreadDim &thread_dim) {
const int64_t total_threads = thread_dim.x * thread_dim.y * thread_dim.z;
const int64_t threads_per_block_limit =
device_description.threads_per_block_limit();
if (total_threads > threads_per_block_limit) {
VLOG(2) << "exceeded total-thread-per-block limit: " << total_threads
<< " vs limit " << threads_per_block_limit;
return false;
}
const auto &limit = device_description.thread_dim_limit();
bool ok = thread_dim.x <= limit.x && thread_dim.y <= limit.y &&
thread_dim.z <= limit.z;
if (!ok) {
VLOG(2) << "thread dim " << thread_dim.ToString()
<< " exceeds limit constraints of " << limit.ToString();
}
return ok;
}
void CalculateDimensionality(const DeviceDescription &device_description,
int64_t element_count, int64_t *threads_per_block,
int64_t *block_count) {
*threads_per_block = device_description.threads_per_block_limit();
*block_count = tsl::MathUtil::CeilOfRatio(element_count, *threads_per_block);
if (*block_count == 1) {
CHECK_LE(element_count, *threads_per_block);
*threads_per_block = element_count;
}
}
} | #include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/semantic_version.h"
#include "tsl/platform/test.h"
namespace stream_executor {
namespace {
TEST(DeviceDescription, DefaultConstruction) {
DeviceDescription desc;
EXPECT_EQ(desc.device_address_bits(), -1);
EXPECT_EQ(desc.device_memory_size(), -1);
EXPECT_EQ(desc.clock_rate_ghz(), -1);
EXPECT_EQ(desc.name(), "<undefined>");
EXPECT_EQ(desc.platform_version(), "<undefined>");
constexpr SemanticVersion kZeroVersion = {0, 0, 0};
EXPECT_EQ(desc.driver_version(), kZeroVersion);
EXPECT_EQ(desc.runtime_version(), kZeroVersion);
EXPECT_EQ(desc.pci_bus_id(), "<undefined>");
}
TEST(CudaComputeCapability, GenerationNumericTest) {
EXPECT_TRUE(CudaComputeCapability(7, 5).IsAtLeastVolta());
EXPECT_TRUE(CudaComputeCapability(8, 0).IsAtLeastAmpere());
EXPECT_TRUE(CudaComputeCapability(9, 0).IsAtLeastHopper());
EXPECT_TRUE(CudaComputeCapability(10, 0).IsAtLeastBlackwell());
}
TEST(CudaComputeCapability, GenerationLiteralTest) {
EXPECT_TRUE(CudaComputeCapability::Volta().IsAtLeast(7));
EXPECT_TRUE(CudaComputeCapability::Ampere().IsAtLeast(8));
EXPECT_TRUE(CudaComputeCapability::Hopper().IsAtLeast(9));
EXPECT_TRUE(CudaComputeCapability::Blackwell().IsAtLeast(10));
}
TEST(CudaComputeCapability, ComparisonTest) {
CudaComputeCapability lower{1, 0};
CudaComputeCapability slightly_higher{1, 1};
CudaComputeCapability higher{2, 0};
EXPECT_TRUE(lower == lower);
EXPECT_FALSE(lower == slightly_higher);
EXPECT_FALSE(lower == higher);
EXPECT_TRUE(lower <= lower);
EXPECT_TRUE(lower < slightly_higher);
EXPECT_TRUE(lower <= slightly_higher);
EXPECT_FALSE(lower < lower);
EXPECT_FALSE(slightly_higher <= lower);
EXPECT_FALSE(slightly_higher < lower);
EXPECT_TRUE(slightly_higher >= slightly_higher);
EXPECT_TRUE(slightly_higher > lower);
EXPECT_TRUE(slightly_higher >= lower);
EXPECT_FALSE(slightly_higher > slightly_higher);
EXPECT_FALSE(lower > slightly_higher);
EXPECT_FALSE(lower >= slightly_higher);
EXPECT_TRUE(higher > slightly_higher);
EXPECT_TRUE(higher >= slightly_higher);
EXPECT_TRUE(higher >= higher);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/device_description.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/device_description_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3e320629-eb4a-45d7-99a1-6485f30cd69f | cpp | tensorflow/tensorflow | dnn | third_party/xla/xla/stream_executor/dnn.cc | third_party/xla/xla/stream_executor/dnn_test.cc | #include "xla/stream_executor/dnn.h"
#include <Eigen/Core>
#include <algorithm>
#include <complex>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <optional>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/btree_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/stream_executor/data_type.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/numeric_options.h"
#include "xla/tsl/lib/strings/proto_serialization.h"
#include "xla/tsl/protobuf/dnn.pb.h"
#include "tsl/platform/ml_dtypes.h"
namespace stream_executor {
namespace dnn {
namespace {
bool ProtoMapIsSubset(const google::protobuf::Map<int64_t, int64_t>& x,
const google::protobuf::Map<int64_t, int64_t>& y) {
for (const auto& ypair : y) {
const auto it = x.find(ypair.first);
if (it == x.end() || it->second != ypair.second) return false;
}
return true;
}
bool ProtoMapsEqual(const google::protobuf::Map<int64_t, int64_t>& x,
const google::protobuf::Map<int64_t, int64_t>& y) {
return ProtoMapIsSubset(x, y) && ProtoMapIsSubset(y, x);
}
}
constexpr DataType ToDataType<tsl::float8_e3m4>::value;
constexpr DataType ToDataType<tsl::float8_e4m3>::value;
constexpr DataType ToDataType<tsl::float8_e4m3fn>::value;
constexpr DataType ToDataType<tsl::float8_e4m3fnuz>::value;
constexpr DataType ToDataType<tsl::float8_e5m2>::value;
constexpr DataType ToDataType<tsl::float8_e5m2fnuz>::value;
constexpr DataType ToDataType<float>::value;
constexpr DataType ToDataType<double>::value;
constexpr DataType ToDataType<Eigen::half>::value;
constexpr DataType ToDataType<Eigen::bfloat16>::value;
constexpr DataType ToDataType<int8_t>::value;
constexpr DataType ToDataType<int32_t>::value;
constexpr DataType ToDataType<int64_t>::value;
constexpr DataType ToDataType<std::complex<float>>::value;
constexpr DataType ToDataType<std::complex<double>>::value;
AlgorithmDesc::AlgorithmDesc(
int64_t engine_id,
const std::vector<std::pair<int64_t, int64_t>>& tuning_knobs,
std::optional<uint64_t> workspace_size) {
proto_.set_is_cudnn_frontend(true);
proto_.set_algo_id(engine_id);
if (workspace_size) {
proto_.mutable_workspace_size()->set_value(*workspace_size);
}
for (const auto& pair : tuning_knobs) {
(*proto_.mutable_tuning_knobs())[pair.first] = pair.second;
}
}
uint64_t AlgorithmDesc::hash() const {
return tsl::DeterministicProtoHash64(proto_);
}
bool AlgorithmDesc::operator==(const AlgorithmDesc& other) const {
if (is_cudnn_frontend()) {
return other.is_cudnn_frontend() && algo_id() == other.algo_id() &&
ProtoMapsEqual(proto_.tuning_knobs(), other.proto_.tuning_knobs());
}
return !other.is_cudnn_frontend() && algo_id() == other.algo_id() &&
tensor_ops_enabled() == other.tensor_ops_enabled();
}
std::string AlgorithmDesc::ToString() const {
if (is_cudnn_frontend()) {
absl::btree_map<int64_t, int64_t> tuning_knobs_sorted;
absl::c_copy(proto_.tuning_knobs(),
std::inserter(tuning_knobs_sorted, tuning_knobs_sorted.end()));
return absl::StrFormat(
"eng%d{%s}", proto_.algo_id(),
absl::StrJoin(
tuning_knobs_sorted, ",",
[](std::string* out, const std::pair<int64_t, int64_t>& pair) {
absl::StrAppendFormat(out, "k%d=%d", pair.first, pair.second);
}));
}
if (tensor_ops_enabled()) {
return absl::StrCat(algo_id(), "#TC");
} else {
return absl::StrCat(algo_id());
}
}
std::vector<std::pair<int64_t, int64_t>> AlgorithmDesc::TuningKnobs() const {
std::vector<std::pair<int64_t, int64_t>> result;
result.reserve(proto_.tuning_knobs().size());
for (const auto& pair : proto_.tuning_knobs()) {
result.emplace_back(pair.first, pair.second);
}
return result;
}
absl::Status DnnSupport::GetConvolveRunners(
bool , dnn::ConvolutionKind ,
dnn::DataType , dnn::DataType ,
Stream* , const dnn::BatchDescriptor& ,
DeviceMemoryBase ,
const dnn::FilterDescriptor& ,
DeviceMemoryBase ,
const dnn::BatchDescriptor& ,
DeviceMemoryBase ,
const dnn::ConvolutionDescriptor& ,
bool , ScratchAllocator* ,
const NumericOptions& ,
std::vector<std::unique_ptr<const dnn::ConvRunner>>* ) {
return absl::UnimplementedError("GetConvolveRunners not implemented.");
}
absl::StatusOr<std::unique_ptr<const dnn::ConvRunner>>
DnnSupport::ConvolveRunnerFromDesc(
Stream* stream, const dnn::AlgorithmDesc& algorithm_desc,
dnn::ConvolutionKind kind, dnn::DataType element_type,
dnn::DataType output_type, const dnn::BatchDescriptor& input_descriptor,
const dnn::FilterDescriptor& filter_descriptor,
const dnn::BatchDescriptor& output_descriptor,
const dnn::ConvolutionDescriptor& convolution_descriptor) {
return absl::UnimplementedError("ConvolveRunnerFromDesc not implemented.");
}
absl::Status DnnSupport::GetGraphConvolveRunners(
dnn::ConvolutionKind , dnn::DataType ,
dnn::DataType , Stream* ,
const dnn::BatchDescriptor& ,
const dnn::FilterDescriptor& ,
const dnn::BatchDescriptor& ,
const dnn::ConvolutionDescriptor& ,
bool , const NumericOptions& ,
std::vector<std::unique_ptr<const dnn::GraphConvRunner>>* ,
std::string ) {
return absl::UnimplementedError("GetGraphConvolveRunners not implemented.");
}
absl::StatusOr<std::unique_ptr<const dnn::GraphConvRunner>>
DnnSupport::GraphConvolveRunnerFromDesc(
Stream* stream, const dnn::AlgorithmDesc& algorithm_desc,
dnn::ConvolutionKind kind, dnn::DataType element_type,
dnn::DataType output_type, const dnn::BatchDescriptor& input_descriptor,
const dnn::FilterDescriptor& filter_descriptor,
const dnn::BatchDescriptor& output_descriptor,
const dnn::ConvolutionDescriptor& convolution_descriptor,
std::string serialized_graph) {
return absl::UnimplementedError(
"GraphConvolveRunnerFromDesc not implemented.");
}
absl::Status DnnSupport::GetFusedConvolveRunners(
bool use_cudnn_frontend, dnn::ConvolutionKind kind,
dnn::DataType element_type, dnn::DataType bias_type,
dnn::DataType output_type, double conv_input_scale, double side_input_scale,
double leakyrelu_alpha, Stream* stream,
const dnn::BatchDescriptor& input_descriptor,
const dnn::FilterDescriptor& filter_descriptor,
const dnn::BatchDescriptor& bias_descriptor,
const dnn::BatchDescriptor& output_descriptor,
const dnn::ConvolutionDescriptor& convolution_descriptor, bool use_fallback,
dnn::ActivationMode activation_mode, const NumericOptions& numeric_options,
std::vector<std::unique_ptr<const dnn::FusedConvRunner>>* out_exec_plans) {
return absl::UnimplementedError("GetFusedConvolveRunners not implemented.");
}
absl::Status DnnSupport::GetFusedMatmulRunners(
bool use_cudnn_frontend, dnn::DataType element_type,
dnn::DataType bias_type, dnn::DataType output_type, Stream* stream,
bool trans_a, bool trans_b, uint64_t m, uint64_t n, uint64_t k, int64_t lda,
int64_t ldb, int64_t ldc, dnn::ActivationMode activation_mode,
bool use_fallback, const NumericOptions& numeric_options,
std::vector<std::unique_ptr<const dnn::FusedMatmulRunner>>*
out_exec_plans) {
return absl::UnimplementedError("GetFusedMatmulRunners not implemented.");
}
absl::StatusOr<std::unique_ptr<const dnn::FusedConvRunner>>
DnnSupport::FusedConvolveRunnerFromDesc(
Stream* stream, const dnn::AlgorithmDesc& algorithm_desc,
dnn::ConvolutionKind kind, dnn::DataType element_type,
dnn::DataType bias_type, dnn::DataType output_type, double conv_scale,
double side_input_scale, double leakyrelu_alpha,
const dnn::BatchDescriptor& input_descriptor,
const dnn::FilterDescriptor& filter_descriptor,
const dnn::BatchDescriptor& bias_descriptor,
const dnn::BatchDescriptor& output_descriptor,
const dnn::ConvolutionDescriptor& convolution_descriptor,
dnn::ActivationMode activation_mode) {
return absl::UnimplementedError(
"FusedConvolveRunnerFromDesc not implemented.");
}
absl::StatusOr<std::unique_ptr<const dnn::NormRunner>>
DnnSupport::NormRunnerFromDesc(
Stream* stream, const dnn::AlgorithmDesc& algorithm_desc,
dnn::NormKind kind, double epsilon,
const dnn::TensorDescriptor& x_descriptor,
const dnn::TensorDescriptor& scale_descriptor,
const dnn::TensorDescriptor& y_or_dx_descriptor,
std::optional<dnn::TensorDescriptor> bias_descriptor,
std::optional<dnn::TensorDescriptor> dy_descriptor,
std::optional<dnn::TensorDescriptor> expectation_descriptor,
std::optional<dnn::TensorDescriptor> norm_factor_descriptor,
std::optional<dnn::TensorDescriptor> dscale_descriptor,
std::optional<dnn::TensorDescriptor> dbias_descriptor) {
return absl::UnimplementedError("NormRunnerFromDesc not implemented.");
}
bool DnnSupport::GetMIOpenConvolveAlgorithms(
dnn::ConvolutionKind , dnn::DataType ,
dnn::DataType , Stream* ,
const dnn::BatchDescriptor& ,
DeviceMemoryBase input_data,
const dnn::FilterDescriptor& ,
DeviceMemoryBase filter_data,
const dnn::BatchDescriptor& ,
DeviceMemoryBase output_data,
const dnn::ConvolutionDescriptor& ,
ScratchAllocator* scratch_allocator,
std::vector<ProfileResult>* ) {
return false;
}
bool DnnSupport::GetRnnAlgorithms(std::vector<AlgorithmDesc>* out_algorithms) {
return false;
}
absl::Status DnnSupport::DoPoolForward(
DataType element_type, Stream* stream,
const dnn::PoolingDescriptor& pooling_dimensions,
const NumericOptions& numeric_options,
const dnn::BatchDescriptor& input_dimensions, DeviceMemoryBase input_data,
const dnn::BatchDescriptor& output_dimensions, DeviceMemoryBase output_data,
ScratchAllocator* workspace_allocator) {
return DoPoolForward(element_type, stream, pooling_dimensions,
input_dimensions, input_data, output_dimensions,
output_data, workspace_allocator);
}
absl::Status DnnSupport::DoPoolBackward(
DataType element_type, Stream* stream,
const dnn::PoolingDescriptor& pooling_dimensions,
const NumericOptions& numeric_options,
const dnn::BatchDescriptor& input_dimensions, DeviceMemoryBase input_data,
const dnn::BatchDescriptor& output_dimensions, DeviceMemoryBase output_data,
DeviceMemoryBase input_diff_data, DeviceMemoryBase output_diff_data,
ScratchAllocator* workspace_allocator) {
return DoPoolBackward(element_type, stream, pooling_dimensions,
input_dimensions, input_data, output_dimensions,
output_data, input_diff_data, output_diff_data,
workspace_allocator);
}
std::string QuantizedActivationModeString(QuantizedActivationMode mode) {
switch (mode) {
case dnn::QuantizedActivationMode::k8Bit:
return "uint8";
case dnn::QuantizedActivationMode::k16Bit:
return "uint16";
case dnn::QuantizedActivationMode::k32Bit:
return "int32";
default:
return absl::StrCat("unknown: ", static_cast<int32_t>(mode));
}
}
std::string ActivationModeString(ActivationMode mode) {
switch (mode) {
case ActivationMode::kNone:
return "none";
case ActivationMode::kSigmoid:
return "sigmoid";
case ActivationMode::kRelu:
return "relu";
case ActivationMode::kRelu6:
return "relu6";
case ActivationMode::kReluX:
return "reluX";
case ActivationMode::kTanh:
return "tanh";
case ActivationMode::kBandPass:
return "bandpass";
case ActivationMode::kElu:
return "elu";
case ActivationMode::kLeakyRelu:
return "leakyrelu";
default:
return absl::StrCat("unknown: ", static_cast<int32_t>(mode));
}
}
std::string ElementwiseOperationString(ElementwiseOperation op) {
switch (op) {
case ElementwiseOperation::kAdd:
return "add";
case ElementwiseOperation::kMultiply:
return "multiply";
default:
return absl::StrCat("unknown: ", static_cast<int32_t>(op));
}
}
std::string DataLayoutString(DataLayout layout) {
switch (layout) {
case DataLayout::kYXDepthBatch:
return "YXDepthBatch";
case DataLayout::kYXBatchDepth:
return "YXBatchDepth";
case DataLayout::kBatchYXDepth:
return "BatchYXDepth";
case DataLayout::kBatchDepthYX:
return "BatchDepthYX";
case DataLayout::kBatchDepthYX4:
return "BatchDepthYX4";
case DataLayout::kBatchDepthYX32:
return "BatchDepthYX32";
default:
return absl::StrCat("unknown: ", static_cast<int32_t>(layout));
}
}
std::string FilterLayoutString(FilterLayout layout) {
switch (layout) {
case FilterLayout::kOutputInputYX:
return "OutputInputYX";
case FilterLayout::kOutputYXInput:
return "OutputYXInput";
case FilterLayout::kOutputInputYX4:
return "OutputInputYX4";
case FilterLayout::kOutputInputYX32:
return "OutputInputYX32";
case FilterLayout::kOutputInputYX32_CudnnReordered:
return "OutputInputYX32_CudnnReordered";
case FilterLayout::kInputYXOutput:
return "InputYXOutput";
case FilterLayout::kYXInputOutput:
return "YXInputOutput";
default:
return absl::StrCat("unknown: ", static_cast<int32_t>(layout));
}
}
std::string PadAlignmentString(PadAlignment alignment) {
switch (alignment) {
case PadAlignment::kDefault:
return "default";
case PadAlignment::kCudnnPadding:
return "cuDNN padding";
case PadAlignment::kTensorFlowPadding:
return "TensorFlow padding";
default:
return absl::StrCat("unknown: ", static_cast<int32_t>(alignment));
}
}
std::ostream& operator<<(std::ostream& str, dnn::PadAlignment alignment) {
return str << PadAlignmentString(alignment);
}
std::string ShortPoolingModeString(PoolingMode mode) {
switch (mode) {
case PoolingMode::kMaximum:
return "Max";
case PoolingMode::kAverage:
return "Avg";
default:
return absl::StrCat("unknown: ", static_cast<int32_t>(mode));
}
}
struct ConvDimIndices {
union {
struct {
int depth_idx;
int batch_idx;
int spatial_idx;
} data;
struct {
int output_idx;
int input_idx;
int spatial_idx;
} filter;
};
};
ConvDimIndices GetDimIndices(const DataLayout& layout, const int data_dims) {
ConvDimIndices dim_indices;
switch (layout) {
case DataLayout::kYXBatchDepth:
dim_indices.data.depth_idx = data_dims - 1;
dim_indices.data.batch_idx = data_dims - 2;
dim_indices.data.spatial_idx = 0;
break;
case DataLayout::kYXDepthBatch:
dim_indices.data.depth_idx = data_dims - 2;
dim_indices.data.batch_idx = data_dims - 1;
dim_indices.data.spatial_idx = 0;
break;
case DataLayout::kBatchYXDepth:
dim_indices.data.depth_idx = data_dims - 1;
dim_indices.data.batch_idx = 0;
dim_indices.data.spatial_idx = 1;
break;
case DataLayout::kBatchDepthYX:
case DataLayout::kBatchDepthYX4:
case DataLayout::kBatchDepthYX32:
dim_indices.data.depth_idx = 1;
dim_indices.data.batch_idx = 0;
dim_indices.data.spatial_idx = 2;
break;
default:
LOG(FATAL) << "Unknown layout " << layout;
}
return dim_indices;
}
ConvDimIndices GetDimIndices(const FilterLayout& layout, const int data_dims) {
ConvDimIndices dim_indices;
switch (layout) {
case FilterLayout::kOutputInputYX:
case FilterLayout::kOutputInputYX4:
case FilterLayout::kOutputInputYX32:
case FilterLayout::kOutputInputYX32_CudnnReordered:
dim_indices.filter.input_idx = 1;
dim_indices.filter.output_idx = 0;
dim_indices.filter.spatial_idx = 2;
break;
case FilterLayout::kOutputYXInput:
dim_indices.filter.input_idx = data_dims - 1;
dim_indices.filter.output_idx = 0;
dim_indices.filter.spatial_idx = 1;
break;
case FilterLayout::kInputYXOutput:
dim_indices.filter.input_idx = 0;
dim_indices.filter.output_idx = data_dims - 1;
dim_indices.filter.spatial_idx = 1;
break;
case FilterLayout::kYXInputOutput:
dim_indices.filter.input_idx = data_dims - 2;
dim_indices.filter.output_idx = data_dims - 1;
dim_indices.filter.spatial_idx = 0;
break;
default:
LOG(FATAL) << "Unknown layout " << layout;
}
return dim_indices;
}
std::vector<int64_t> ReorderDims(const std::vector<int64_t>& input,
const DataLayout& from, const DataLayout& to) {
if (from == to) return input;
ConvDimIndices from_indices = GetDimIndices(from, input.size());
ConvDimIndices to_indices = GetDimIndices(to, input.size());
std::vector<int64_t> reordered(input.size());
reordered[to_indices.data.batch_idx] = input[from_indices.data.batch_idx];
reordered[to_indices.data.depth_idx] = input[from_indices.data.depth_idx];
int spatial_idx_from = from_indices.data.spatial_idx;
int spatial_idx_to = to_indices.data.spatial_idx;
for (size_t i = 0; i < input.size() - 2;
i++, spatial_idx_from++, spatial_idx_to++) {
reordered[spatial_idx_to] = input[spatial_idx_from];
}
return reordered;
}
std::vector<int64_t> ReorderDims(const std::vector<int64_t>& input,
const FilterLayout& from,
const FilterLayout& to) {
if (from == to) return input;
ConvDimIndices from_indices = GetDimIndices(from, input.size());
ConvDimIndices to_indices = GetDimIndices(to, input.size());
std::vector<int64_t> reordered(input.size());
reordered[to_indices.filter.output_idx] =
input[from_indices.filter.output_idx];
reordered[to_indices.filter.input_idx] = input[from_indices.filter.input_idx];
int spatial_idx_from = from_indices.filter.spatial_idx;
int spatial_idx_to = to_indices.filter.spatial_idx;
for (size_t i = 0; i < input.size() - 2;
i++, spatial_idx_from++, spatial_idx_to++) {
reordered[spatial_idx_to] = input[spatial_idx_from];
}
return reordered;
}
std::string AlgorithmConfig::ToString() const {
std::string algo = "none";
if (algorithm().has_value()) {
algo = algorithm()->ToString();
}
std::string algo_no_scratch = "none";
if (algorithm_no_scratch().has_value()) {
algo_no_scratch = algorithm_no_scratch()->ToString();
}
return absl::StrCat(algo, ", ", algo_no_scratch);
}
int TensorDescriptor::ndims() const {
CHECK_EQ(dimensions_.size(), minor_to_major_.size());
return dimensions_.size();
}
absl::StatusOr<std::vector<int64_t>>
TensorDescriptor::GetPhysicalDimensionsMajorToMinor() const {
std::vector<int64_t> logical_to_physical(minor_to_major_.size());
for (int64_t physical = 0; physical < logical_to_physical.size();
++physical) {
int64_t logical = minor_to_major_.at(minor_to_major_.size() - 1 - physical);
logical_to_physical[logical] = physical;
}
if (dimensions_.size() != minor_to_major_.size())
return absl::InternalError("Dimensions size should match the layout size.");
std::vector<int64_t> physical_dims(dimensions_.size());
for (int64_t i = 0; i < physical_dims.size(); ++i) {
physical_dims[logical_to_physical[i]] = dimensions_[i];
}
return physical_dims;
}
std::vector<int64_t> TensorDescriptor::GetPhysicalStridesMajorToMinor() const {
std::vector<int64_t> phys_dims = GetPhysicalDimensionsMajorToMinor().value();
std::vector<int64_t> phys_strides(ndims());
phys_strides[ndims() - 1] = 1;
for (int i = ndims() - 2; i >= 0; i--) {
phys_strides[i] = phys_strides[i + 1] * phys_dims[i + 1];
}
return phys_strides;
}
std::vector<int64_t> TensorDescriptor::GetLogicalStrides() const {
std::vector<int64_t> physical_strides = GetPhysicalStridesMajorToMinor();
std::reverse(physical_strides.begin(), physical_strides.end());
std::vector<int64_t> logical_strides(physical_strides.size());
for (int i = 0; i < ndims(); i++) {
logical_strides[minor_to_major_[i]] = physical_strides[i];
}
return logical_strides;
}
TensorDescriptor TensorDescriptor::For(
DataType type, absl::Span<const int64_t> dimensions,
absl::Span<const int64_t> minor_to_major) {
std::vector<int64_t> dims(dimensions.size());
std::vector<int64_t> minor_to_major_vec(minor_to_major.size());
CHECK_EQ(dimensions.size(), minor_to_major.size());
for (int i = 0; i < dimensions.size(); i++) {
dims[i] = dimensions[i];
minor_to_major_vec[i] = minor_to_major[i];
}
return TensorDescriptor(type, dims, minor_to_major_vec);
}
std::string TensorDescriptor::ToString() const {
return absl::StrFormat("{dimensions: %s minor_to_major: %s}",
absl::StrJoin(dimensions(), ","),
absl::StrJoin(minor_to_major(), ","));
}
absl::StatusOr<std::vector<int64_t>>
MatmulTensorDescriptor::GetNonContractingDims() const {
std::vector<int64_t> non_contracting_dims;
for (int64_t dim = 0; dim < tensor_.dimensions().size(); ++dim) {
bool is_batch = absl::c_count(batch_dimension_numbers_, dim) != 0;
bool is_contracting = absl::c_count(contracting_dim_, dim) != 0;
if (is_batch && is_contracting)
return absl::InternalError(
"A dimension cannot be both a batch dimension and a contracting "
"dimension.");
if (!(is_batch || is_contracting)) non_contracting_dims.push_back(dim);
}
if (batch_dimension_numbers_.size() + contracting_dim_.size() +
non_contracting_dims.size() !=
tensor_.dimensions().size())
return absl::InternalError(
"Batch_dimension_numbers, contracting_dim and non_contracting_dims "
"should sum up to the total number of dimensions.");
return non_contracting_dims;
}
absl::StatusOr<std::vector<int64_t>>
MatmulTensorDescriptor::MakeCudnnCompatible(const std::vector<int64_t>& vec,
bool is_lhs) const {
std::vector<int64_t> cudnn_compatible(vec.size());
int batch_dim_size = batch_dimension_numbers_.size();
CHECK_LT(batch_dim_size, vec.size());
for (int i = 0; i < batch_dim_size; i++) {
cudnn_compatible[i] = vec.at(batch_dimension_numbers_.at(i));
}
std::vector<int64_t> non_contracting_dims = GetNonContractingDims().value();
if (batch_dimension_numbers_.size() + contracting_dim_.size() +
non_contracting_dims.size() !=
vec.size())
return absl::InternalError(
"Batch_dimension_numbers, contracting_dim and non_contracting_dims "
"should sum up to the total number of dimensions.");
if (is_lhs) {
for (int i = 0; i < non_contracting_dims.size(); i++) {
cudnn_compatible[batch_dim_size + i] = vec.at(non_contracting_dims.at(i));
}
for (int i = 0; i < contracting_dim_.size(); i++) {
cudnn_compatible[batch_dim_size + non_contracting_dims.size() + i] =
vec.at(contracting_dim_.at(i));
}
} else {
for (int i = 0; i < contracting_dim_.size(); i++) {
cudnn_compatible[batch_dim_size + i] = vec.at(contracting_dim_.at(i));
}
for (int i = 0; i < non_contracting_dims.size(); i++) {
cudnn_compatible[batch_dim_size + contracting_dim_.size() + i] =
vec.at(non_contracting_dims.at(i));
}
}
return cudnn_compatible;
}
std::vector<int64_t> MatmulTensorDescriptor::GetCudnnCompatibleDimensions(
bool is_lhs) const {
std::vector<int64_t> cudnn_compatible_dims =
MakeCudnnCompatible(tensor_.dimensions(), is_lhs).value();
return cudnn_compatible_dims;
}
std::vector<int64_t> MatmulTensorDescriptor::GetCudnnCompatibleStrides(
bool is_lhs) const {
std::vector<int64_t> cudnn_compatible_strides =
MakeCudnnCompatible(tensor_.GetLogicalStrides(), is_lhs).value();
return cudnn_compatible_strides;
}
MatmulTensorDescriptor MatmulTensorDescriptor::For(
DataType type, absl::Span<const int64_t> dimensions,
absl::Span<const int64_t> minor_to_major,
absl::Span<const int64_t> batch_dims,
absl::Span<const int64_t> contracting_dims) {
std::vector<int64_t> batch_dims_vec(batch_dims.size());
std::vector<int64_t> contracting_dims_vec(contracting_dims.size());
for (int i = 0; i < batch_dims.size(); i++) {
batch_dims_vec[i] = batch_dims[i];
}
for (int i = 0; i < contracting_dims.size(); i++) {
contracting_dims_vec[i] = contracting_dims[i];
}
return MatmulTensorDescriptor(
TensorDescriptor::For(type, dimensions, minor_to_major), batch_dims_vec,
contracting_dims_vec);
}
std::string MatmulTensorDescriptor::ToString() const {
return absl::StrFormat(
"{%s, batch_dimension_numbers: %s contracting_dim: %s}",
tensor_.ToString(), absl::StrJoin(batch_dimension_numbers_, ","),
absl::StrJoin(contracting_dim_, ","));
}
BatchDescriptor::BatchDescriptor(int ndims)
: value_max_(0.0),
value_min_(0.0),
quantized_activation_mode_(QuantizedActivationMode::k8Bit) {
tensor_.mutable_dimensions()->Resize(ndims + 2, 0);
set_layout(DataLayout::kYXDepthBatch);
}
BatchDescriptor::BatchDescriptor() : BatchDescriptor(2) {}
std::vector<int64_t> BatchDescriptor::full_dims(
const DataLayout& layout) const {
std::vector<int64_t> bdyx_dims(ndims() + 2);
bdyx_dims[0] = count();
bdyx_dims[1] = feature_map_count();
std::copy(spatial_size().begin(), spatial_size().end(),
bdyx_dims.begin() + 2);
return ReorderDims(bdyx_dims, DataLayout::kBatchDepthYX, layout);
}
std::vector<int64_t> BatchDescriptor::full_strides(
const DataLayout& layout) const {
std::vector<int64_t> phys_dims = full_dims(this->layout());
std::vector<int64_t> phys_strides(phys_dims.size());
phys_strides[ndims() + 1] = 1;
for (int i = ndims(); i >= 0; i--) {
phys_strides[i] = phys_strides[i + 1] * phys_dims[i + 1];
}
return ReorderDims(phys_strides, this->layout(), layout);
}
std::vector<int64_t> BatchDescriptor::vectorized_dims(const DataLayout& layout,
int vector_size,
int vector_dim) const {
std::vector<int64_t> bdyx_dims = full_dims(dnn::DataLayout::kBatchDepthYX);
if (vector_dim != -1) {
bdyx_dims[vector_dim] /= vector_size;
}
return dnn::ReorderDims(bdyx_dims, dnn::DataLayout::kBatchDepthYX, layout);
}
std::vector<int64_t> BatchDescriptor::vectorized_strides(
const DataLayout& layout, int vector_size, int vector_dim) const {
std::vector<int64_t> phys_dims =
vectorized_dims(this->layout(), vector_size, vector_dim);
std::vector<int64_t> phys_strides(phys_dims.size());
phys_strides[phys_dims.size() - 1] = 1;
for (int i = phys_dims.size() - 2; i >= 0; i--) {
phys_strides[i] = phys_strides[i + 1] * phys_dims[i + 1];
}
return ReorderDims(phys_strides, this->layout(), layout);
}
void BatchDescriptor::CloneFrom(const BatchDescriptor& other) {
tensor_ = other.tensor_;
value_max_ = other.value_max_;
value_min_ = other.value_min_;
quantized_activation_mode_ = other.quantized_activation_mode_;
}
std::string BatchDescriptor::ToString() const {
std::string spatial;
for (int i = 0; i < ndims(); i++) {
absl::StrAppendFormat(&spatial, "%d ", spatial_size()[i]);
}
return absl::StrFormat(
"{count: %d feature_map_count: %d spatial: %s "
"value_min: %f value_max: %f layout: %s}",
count(), feature_map_count(), spatial, value_min_, value_max_,
DataLayoutString(layout()));
}
std::string BatchDescriptor::ToShortString() const {
std::string depth = absl::StrCat("d", feature_map_count());
std::string batch = absl::StrCat("b", count());
std::string spatial = "s";
for (int i = 0; i < ndims(); i++) {
absl::StrAppendFormat(&spatial, "%d ", spatial_size()[i]);
}
std::string suffix;
if (value_min() != value_max()) {
absl::StrAppend(&suffix, "[", value_min(), ";", value_max(), "]");
}
if (quantized_activation_mode() == QuantizedActivationMode::k16Bit) {
suffix += "_16bit";
}
switch (layout()) {
case DataLayout::kYXDepthBatch:
return absl::StrCat(spatial, depth, batch, suffix);
case DataLayout::kYXBatchDepth:
return absl::StrCat(spatial, batch, depth, suffix);
case DataLayout::kBatchYXDepth:
return absl::StrCat(batch, spatial, depth, suffix);
case DataLayout::kBatchDepthYX:
return absl::StrCat(batch, depth, spatial, suffix);
case DataLayout::kBatchDepthYX4:
case DataLayout::kBatchDepthYX32:
return absl::StrCat(batch, depth, spatial, suffix, "(VECT_C)");
default:
LOG(FATAL) << "Unknown layout " << static_cast<int32_t>(layout());
return "";
}
}
int64_t BatchDescriptor::NodesPerFeatureMap() const {
int64_t ret = 1;
for (int i = 0; i < ndims(); i++) {
ret *= spatial_size()[i];
}
return ret;
}
int64_t BatchDescriptor::NodesAcrossFeatureMaps() const {
return NodesPerFeatureMap() * feature_map_count();
}
int64_t BatchDescriptor::ElementCount() const {
return count() * feature_map_count() * NodesPerFeatureMap();
}
int64_t BatchDescriptor::FullyConnectedWeightCount(
const BatchDescriptor& input, const BatchDescriptor& output) {
return input.NodesAcrossFeatureMaps() * output.NodesAcrossFeatureMaps();
}
int64_t BatchDescriptor::FullyConnectedBiasCount(
const BatchDescriptor& output) {
return output.NodesAcrossFeatureMaps();
}
BatchDescriptor BatchDescriptor::DepthConcatenateOutputDescriptor(
absl::Span<const dnn::BatchDescriptor> inputs) {
if (inputs.empty()) {
return BatchDescriptor();
}
int feature_map_count = 0;
for (const auto& dimensions : inputs) {
feature_map_count += dimensions.feature_map_count();
}
BatchDescriptor output = inputs[0];
output.set_feature_map_count(feature_map_count);
return output;
}
TensorDescriptorProto BatchDescriptor::ToProto(DataType data_type) const {
CHECK_EQ(0.0, value_max_);
CHECK_EQ(0.0, value_min_);
CHECK(quantized_activation_mode_ == QuantizedActivationMode::k8Bit);
TensorDescriptorProto ret = tensor_;
ret.set_data_type(data_type);
return ret;
}
FilterDescriptor::FilterDescriptor(int ndims) {
tensor_.mutable_dimensions()->Resize(ndims + 2, 0);
set_layout(FilterLayout::kOutputInputYX);
}
FilterDescriptor::FilterDescriptor() : FilterDescriptor(2) {}
FilterDescriptor::~FilterDescriptor() {}
void FilterDescriptor::CloneFrom(const FilterDescriptor& other) {
tensor_ = other.tensor_;
}
std::string FilterDescriptor::ToString() const {
std::string desc = absl::StrFormat(
"{output_feature_map_count: %d input_feature_map_count: %d "
"layout: %s shape: ",
output_feature_map_count(), input_feature_map_count(),
FilterLayoutString(layout()));
for (int i = 0; i < ndims(); i++) {
absl::StrAppendFormat(&desc, "%d ", input_filter_dims()[i]);
}
absl::StrAppend(&desc, "}");
return desc;
}
std::string FilterDescriptor::ToShortString() const {
std::string od = absl::StrCat("od", output_feature_map_count());
std::string id = absl::StrCat("id", input_feature_map_count());
std::string spatial = "s";
for (int i = 0; i < ndims(); i++) {
absl::StrAppendFormat(&spatial, "%d ", input_filter_dims()[i]);
}
switch (layout()) {
case FilterLayout::kOutputInputYX:
return absl::StrCat(od, id, spatial);
case FilterLayout::kOutputYXInput:
return absl::StrCat(od, spatial, id);
case FilterLayout::kOutputInputYX4:
case FilterLayout::kOutputInputYX32:
case FilterLayout::kOutputInputYX32_CudnnReordered:
return absl::StrCat(od, id, spatial, "(VECT_C)");
case FilterLayout::kInputYXOutput:
return absl::StrCat(id, spatial, od);
case FilterLayout::kYXInputOutput:
return absl::StrCat(spatial, id, od);
default:
LOG(FATAL) << "Unknown layout " << static_cast<int32_t>(layout());
return "";
}
}
int64_t FilterDescriptor::ComputeWeightCount() const {
int64_t ret = output_feature_map_count() * input_feature_map_count();
for (int i = 0; i < ndims(); i++) {
ret *= input_filter_dims()[i];
}
return ret;
}
std::vector<int64_t> FilterDescriptor::full_dims(
const FilterLayout& layout) const {
std::vector<int64_t> oiyx_dims(ndims() + 2);
oiyx_dims[0] = output_feature_map_count();
oiyx_dims[1] = input_feature_map_count();
std::copy(input_filter_dims().begin(), input_filter_dims().end(),
oiyx_dims.begin() + 2);
return ReorderDims(oiyx_dims, FilterLayout::kOutputInputYX, layout);
}
std::vector<int64_t> FilterDescriptor::full_strides(
const FilterLayout& layout) const {
std::vector<int64_t> phys_dims = full_dims(this->layout());
std::vector<int64_t> phys_strides(phys_dims.size());
phys_strides[ndims() + 1] = 1;
for (int i = ndims(); i >= 0; i--) {
phys_strides[i] = phys_strides[i + 1] * phys_dims[i + 1];
}
return ReorderDims(phys_strides, this->layout(), layout);
}
std::vector<int64_t> FilterDescriptor::vectorized_dims(
const FilterLayout& layout, int vector_size, int vector_dim) const {
std::vector<int64_t> oiyx_dims = full_dims(dnn::FilterLayout::kOutputInputYX);
if (vector_dim != -1) {
oiyx_dims[vector_dim] /= vector_size;
}
return ReorderDims(oiyx_dims, FilterLayout::kOutputInputYX, layout);
}
std::vector<int64_t> FilterDescriptor::vectorized_strides(
const FilterLayout& layout, int vector_size, int vector_dim) const {
std::vector<int64_t> phys_dims =
vectorized_dims(this->layout(), vector_size, vector_dim);
std::vector<int64_t> phys_strides(phys_dims.size());
phys_strides[phys_dims.size() - 1] = 1;
for (int i = phys_dims.size() - 2; i >= 0; i--) {
phys_strides[i] = phys_strides[i + 1] * phys_dims[i + 1];
}
return ReorderDims(phys_strides, this->layout(), layout);
}
TensorDescriptorProto FilterDescriptor::ToProto(DataType data_type) const {
TensorDescriptorProto ret = tensor_;
ret.set_data_type(data_type);
return ret;
}
ConvolutionDescriptor::ConvolutionDescriptor(int ndims) {
proto_.mutable_paddings()->Resize(ndims, 0);
proto_.mutable_strides()->Resize(ndims, 1);
proto_.mutable_dilations()->Resize(ndims, 1);
proto_.set_group_count(1);
proto_.set_convolution_mode(ConvolutionMode::CROSS_CORRELATION);
}
ConvolutionDescriptor::ConvolutionDescriptor()
: ConvolutionDescriptor(2) {}
ConvolutionDescriptor::~ConvolutionDescriptor() {}
std::string ConvolutionDescriptor::ToString() const {
std::string padding;
std::string strides;
std::string dilations;
for (int i = 0; i < ndims(); i++) {
absl::StrAppendFormat(&padding, "%d ", this->padding()[i]);
absl::StrAppendFormat(&strides, "%d ", this->strides()[i]);
absl::StrAppendFormat(&dilations, "%d ", this->dilations()[i]);
}
return absl::StrFormat(
"{zero_padding: %s pad_alignment: %s filter_strides: %s dilation_rates: "
"%s}",
padding, PadAlignmentString(pad_alignment()), strides, dilations);
}
std::string ConvolutionDescriptor::ToShortString() const {
std::string desc;
for (int i = 0; i < ndims(); i++) {
if (i > 0) absl::StrAppend(&desc, "_");
absl::StrAppendFormat(&desc, "p%d:%d", i, padding()[i]);
}
for (int i = 0; i < ndims(); i++) {
absl::StrAppendFormat(&desc, "_s%d:%d", i, strides()[i]);
}
for (int i = 0; i < ndims(); i++) {
absl::StrAppendFormat(&desc, "_d%d:%d", i, dilations()[i]);
}
return desc;
}
PoolingDescriptor::PoolingDescriptor(int ndims)
: mode_(dnn::PoolingMode::kMaximum),
ndims_(ndims),
propagate_nans_(false),
window_(ndims, 0),
padding_(ndims, 0),
strides_(ndims, 1) {}
PoolingDescriptor::PoolingDescriptor() : PoolingDescriptor(2) {}
void PoolingDescriptor::CloneFrom(const PoolingDescriptor& other) {
mode_ = other.mode_;
ndims_ = other.ndims_;
window_ = other.window_;
padding_ = other.padding_;
strides_ = other.strides_;
propagate_nans_ = other.propagate_nans_;
}
std::string PoolingDescriptor::ToString() const {
const char* mode_string =
mode_ == dnn::PoolingMode::kMaximum ? "kMaximum" : "kAverage";
std::string window, strides, padding;
for (int i = 0; i < ndims_; i++) {
absl::StrAppendFormat(&window, "%d ", window_[i]);
absl::StrAppendFormat(&strides, "%d ", strides_[i]);
absl::StrAppendFormat(&padding, "%d", padding_[i]);
}
const char* propagate_string = propagate_nans_ ? "Yes" : "No";
return absl::StrFormat(
"{mode: %s window: %s strides: %s padding: %s propagate NaNs: %s}",
mode_string, window, strides, padding, propagate_string);
}
std::string PoolingDescriptor::ToShortString() const {
std::string window, strides, padding;
for (int i = 0; i < ndims_; i++) {
absl::StrAppendFormat(&window, "_w%d:%d", i, window_[i]);
absl::StrAppendFormat(&strides, "_s%d:%d", i, strides_[i]);
absl::StrAppendFormat(&padding, "_p%d:%d", i, padding_[i]);
}
return absl::StrCat(mode_ == dnn::PoolingMode::kMaximum ? "max" : "avg",
window, strides, padding,
propagate_nans_ ? "propagate_nans" : "ignore_nans");
}
NormalizeDescriptor::NormalizeDescriptor()
: bias_(0.0),
range_(0),
alpha_(0.0),
beta_(0.0),
wrap_around_(false),
segment_size_(0) {}
void NormalizeDescriptor::CloneFrom(const NormalizeDescriptor& other) {
bias_ = other.bias_;
range_ = other.range_;
alpha_ = other.alpha_;
beta_ = other.beta_;
wrap_around_ = other.wrap_around_;
segment_size_ = other.segment_size_;
}
std::string NormalizeDescriptor::ToString() const {
return absl::StrFormat(
"{bias: %f range: %d alpha: %f beta: %f wrap_around: %d "
"segment_size: %d}",
bias_, range_, alpha_, beta_, wrap_around_, segment_size_);
}
std::string NormalizeDescriptor::ToShortString() const {
return absl::StrCat("bias:", bias_, "_range:", range_, "_alpha:", alpha_,
"_beta:", beta_, "_wrap:", wrap_around_,
"_size:", segment_size_);
}
bool DnnSupport::IsStatusOk(const absl::Status& status, bool report_error) {
if (status.ok()) {
return true;
}
if (report_error) {
LOG(ERROR) << status.message();
}
return false;
}
absl::Status DnnSupport::DoCtcLoss(
Stream* stream, dnn::DataType element_type,
const RnnStateTensorDescriptor& probs_desc,
const DeviceMemoryBase probs_data, absl::Span<const int> labels_data,
absl::Span<const int> labels_lengths_data,
absl::Span<const int> input_lengths_data, DeviceMemoryBase costs_data,
const RnnStateTensorDescriptor& grads_desc, DeviceMemoryBase grads_data,
DeviceMemory<uint8_t> scratch_memory, int ctc_loss_algo_id) {
return absl::UnimplementedError("CtcLoss not implemented");
}
}
} | #include "xla/stream_executor/dnn.h"
#include <tuple>
#include <vector>
#include "tsl/platform/test.h"
namespace stream_executor {
namespace {
TEST(DnnTest, AlgorithmDescToString) {
dnn::AlgorithmDesc desc(17, {{12, 1}, {1, 0}, {3, 1}}, 0);
EXPECT_EQ(desc.ToString(), "eng17{k1=0,k3=1,k12=1}");
}
TEST(DnnTest, VersionInfoComparisonOperators) {
std::vector<std::tuple<int, int, int>> vs;
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
for (int k = 0; k < 4; k++) {
vs.push_back(std::make_tuple(i, j, k));
}
}
}
for (const auto& a : vs) {
for (const auto& b : vs) {
auto [a1, a2, a3] = a;
auto [b1, b2, b3] = b;
dnn::VersionInfo va(a1, a2, a3);
dnn::VersionInfo vb(b1, b2, b3);
EXPECT_EQ((a == b), va == vb);
EXPECT_EQ((a != b), va != vb);
EXPECT_EQ((a < b), va < vb);
EXPECT_EQ((a <= b), va <= vb);
EXPECT_EQ((a > b), va > vb);
EXPECT_EQ((a >= b), va >= vb);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/dnn.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/dnn_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ebe25b54-ad49-46c8-a552-3bfae8fe7623 | cpp | tensorflow/tensorflow | device_memory_handle | third_party/xla/xla/stream_executor/device_memory_handle.cc | third_party/xla/xla/stream_executor/device_memory_handle_test.cc | #include "xla/stream_executor/device_memory_handle.h"
#include <utility>
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/stream_executor.h"
namespace stream_executor {
DeviceMemoryHandle::DeviceMemoryHandle(StreamExecutor *executor,
DeviceMemoryBase memory)
: memory_(std::move(memory)), executor_(executor) {}
DeviceMemoryHandle::DeviceMemoryHandle(DeviceMemoryHandle &&other) noexcept
: memory_(std::move(other.memory_)), executor_(other.executor_) {
other.memory_ = DeviceMemoryBase();
}
DeviceMemoryHandle::~DeviceMemoryHandle() { Free(); }
void DeviceMemoryHandle::Free() {
if (!memory_.is_null()) {
executor_->Deallocate(&memory_);
}
}
DeviceMemoryHandle &DeviceMemoryHandle::operator=(
DeviceMemoryHandle &&other) noexcept {
Free();
memory_ = std::move(other.memory_);
other.memory_ = DeviceMemoryBase();
executor_ = other.executor_;
return *this;
}
} | #include "xla/stream_executor/device_memory_handle.h"
#include <utility>
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/mock_stream_executor.h"
#include "tsl/platform/test.h"
namespace stream_executor {
namespace {
TEST(DeviceMemoryHandle, NullMemoryNoDeallocate) {
DeviceMemoryBase null_memory;
MockStreamExecutor executor;
EXPECT_CALL(executor, Deallocate).Times(0);
{ DeviceMemoryHandle releaser(&executor, null_memory); }
}
TEST(DeviceMemoryHandle, Deallocates) {
MockStreamExecutor executor;
DeviceMemoryBase memory(&executor, sizeof(executor));
EXPECT_CALL(executor, Deallocate).Times(1);
{ DeviceMemoryHandle releaser(&executor, memory); }
}
TEST(DeviceMemoryHandle, MoveDeallocatesOnce) {
MockStreamExecutor executor;
DeviceMemoryBase memory(&executor, sizeof(executor));
EXPECT_CALL(executor, Deallocate).Times(1);
{
DeviceMemoryHandle releaser(&executor, memory);
DeviceMemoryHandle releaser_moved(std::move(releaser));
}
}
TEST(DeviceMemoryHandle, MoveAssignmentDeallocatesOnce) {
MockStreamExecutor executor;
DeviceMemoryBase memory(&executor, sizeof(executor));
EXPECT_CALL(executor, Deallocate).Times(1);
{
DeviceMemoryHandle releaser(&executor, memory);
DeviceMemoryHandle releaser2;
releaser2 = std::move(releaser);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/device_memory_handle.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/device_memory_handle_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e022bc24-e0e7-4a97-befe-c3cdf0fbf2d6 | cpp | tensorflow/tensorflow | kernel | tensorflow/lite/delegates/flex/kernel.cc | tensorflow/lite/delegates/flex/kernel_test.cc | #include "tensorflow/lite/delegates/flex/kernel.h"
#include <algorithm>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/flex/delegate.h"
#include "tensorflow/lite/delegates/flex/delegate_data.h"
#include "tensorflow/lite/delegates/flex/util.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/string_type.h"
using tensorflow::shape_inference::DimensionHandle;
using tensorflow::shape_inference::InferenceContext;
using tensorflow::shape_inference::ShapeAndType;
using tensorflow::shape_inference::ShapeHandle;
namespace tflite {
namespace flex {
constexpr char kReadVariableOp[] = "ReadVariableOp";
constexpr char kInterOpParallelismAttrName[] = "use_inter_op_parallelism";
struct OpNode;
struct TensorSource {
OpNode* node;
int node_output_index;
};
class OpInputs {
public:
explicit OpInputs(const TfLiteIntArray* indexes) {
for (int index : TfLiteIntArrayView(indexes)) {
inputs_.push_back(index);
}
forwardable_.resize(inputs_.size());
}
~OpInputs() = default;
int Size() const { return inputs_.size(); }
int TfLiteIndex(int i) const { return inputs_[i]; }
void InitializeTensorSources(
const std::map<int, TensorSource>& tflite_tensor_sources) {
sources_.clear();
for (int i : inputs_) {
auto it = tflite_tensor_sources.find(i);
if (it == tflite_tensor_sources.end()) {
sources_.push_back({nullptr, 0});
} else {
sources_.push_back(it->second);
}
}
}
void SetForwardable(int i, bool v) { forwardable_[i] = v; }
bool IsForwardable(int i) const { return forwardable_[i]; }
TensorSource GetTensorSource(int i) const { return sources_[i]; }
private:
std::vector<int> inputs_;
std::vector<TensorSource> sources_;
std::vector<int> forwardable_;
};
class OpOutputs {
public:
explicit OpOutputs(const TfLiteIntArray* indexes) {
for (int index : TfLiteIntArrayView(indexes)) {
outputs_.push_back(index);
}
vector_.resize(outputs_.size());
}
~OpOutputs() = default;
void InitializeGraphOutputs(const std::set<int>& subgraph_outputs) {
subgraph_outputs_.clear();
for (int i : outputs_) {
subgraph_outputs_.push_back(subgraph_outputs.count(i) > 0);
}
}
bool IsSubgraphOutput(int i) const { return subgraph_outputs_[i]; }
const tensorflow::Tensor& GetTensor(int i) const { return vector_[i]; }
tensorflow::Tensor ReleaseTensor(int i) { return std::move(vector_[i]); }
int Size() const { return outputs_.size(); }
int TfLiteIndex(int i) const { return outputs_[i]; }
absl::InlinedVector<tensorflow::Tensor, 2UL>* GetTensors() {
return &vector_;
}
private:
std::vector<int> outputs_;
std::vector<bool> subgraph_outputs_;
absl::InlinedVector<tensorflow::Tensor, 2UL> vector_;
};
struct OpDataInfo {
BufferMap* buffer_map;
std::map<int, int>* tensor_release_map;
std::set<int> already_transferred_outputs;
};
class OpNode {
public:
OpNode(const TfLiteIntArray* inputs, const TfLiteIntArray* outputs)
: inputs_(inputs), outputs_(outputs) {}
~OpNode() = default;
const string& name() const { return name_; }
void set_name(const string& name) { name_ = name; }
int index() const { return index_; }
void set_index(int index) { index_ = index; }
const tensorflow::NodeDef& nodedef() const { return nodedef_; }
const tensorflow::OpRegistrationData* op_reg_data() const {
return op_reg_data_;
}
const OpInputs& inputs() const { return inputs_; }
OpInputs* mutable_inputs() { return &inputs_; }
const OpOutputs& outputs() const { return outputs_; }
OpOutputs* mutable_outputs() { return &outputs_; }
int NumInputs() const { return inputs_.Size(); }
int NumOutputs() const { return outputs_.Size(); }
const tensorflow::tfrt_stub::OpKernelRunner& op_kernel_runner() const {
return op_kernel_runner_;
}
tensorflow::Status InitializeNodeDef(const void* custom_initial_data,
int custom_initial_data_size) {
if (!custom_initial_data) {
return tensorflow::errors::Internal(
"Cannot convert empty data into a valid NodeDef");
}
const flexbuffers::Vector& v =
flexbuffers::GetRoot(
reinterpret_cast<const uint8_t*>(custom_initial_data),
custom_initial_data_size)
.AsVector();
name_ = v[0].AsString().str();
if (!nodedef_.ParseFromString(v[1].AsString().str())) {
nodedef_.Clear();
return tensorflow::errors::Internal(
"Failed to parse data into a valid NodeDef");
}
TF_RETURN_IF_ERROR(
tensorflow::OpRegistry::Global()->LookUp(nodedef_.op(), &op_reg_data_));
AddDefaultsToNodeDef(op_reg_data_->op_def, &nodedef_);
const auto& op_def = op_reg_data_->op_def;
for (const auto& attr : op_def.attr()) {
if (attr.name() == kInterOpParallelismAttrName) {
(*nodedef_.mutable_attr())[kInterOpParallelismAttrName].set_b(false);
break;
}
}
return absl::OkStatus();
}
tensorflow::Status BuildOpKernelRunner(
tensorflow::EagerContext* eager_context) {
TF_ASSIGN_OR_RETURN(op_kernel_runner_,
tensorflow::tfrt_stub::OpKernelRunner::Create(
name_, inputs_.Size(),
[this](tensorflow::AttrValueMap* attr_value_map) {
*attr_value_map = nodedef_.attr();
return absl::OkStatus();
},
*eager_context->pflr(),
eager_context->local_device_mgr()->HostCPU()));
return absl::OkStatus();
}
tensorflow::Status BuildOpKernelInputs(
const BufferMap* buffer_map,
tensorflow::tfrt_stub::OpKernelRunState* run_state) {
run_state->input_tf_tensors.resize(inputs_.Size());
run_state->input_tf_tensor_values.resize(inputs_.Size());
for (int i = 0; i < inputs_.Size(); ++i) {
int input_index = inputs_.TfLiteIndex(i);
TensorSource s = inputs_.GetTensorSource(i);
if (!s.node) {
if (!buffer_map->HasTensor(input_index)) {
return tensorflow::errors::Internal(
"Cannot read from invalid tensor index ", input_index);
}
run_state->input_tf_tensors[i] = buffer_map->GetTensor(input_index);
} else {
if (inputs_.IsForwardable(i)) {
run_state->input_tf_tensors[i] =
s.node->outputs_.ReleaseTensor(s.node_output_index);
} else {
run_state->input_tf_tensors[i] =
s.node->outputs_.GetTensor(s.node_output_index);
}
}
run_state->input_tf_tensor_values[i].tensor =
&run_state->input_tf_tensors[i];
}
return absl::OkStatus();
}
bool ShouldPersistTensorflowTensor(TfLiteContext* context,
const OpDataInfo* shared_info,
int tensor_index, int node_index) {
TfLiteTensor* tensor = &context->tensors[tensor_index];
if (IsResourceOrVariant(tensor) || tensor->type == kTfLiteString) {
return true;
}
auto it = shared_info->tensor_release_map->find(tensor_index);
return it != shared_info->tensor_release_map->end() &&
it->second > node_index;
}
TfLiteStatus CopyToTfLiteTensor(TfLiteContext* context,
OpDataInfo* shared_info, TfLiteTensor* tensor,
tensorflow::Tensor* tf_tensor,
int tensor_index) const {
if (tensor->allocation_type == kTfLiteDynamic) {
CopyShapeAndType(context, *tf_tensor, tensor);
}
tensorflow::StringPiece t_data = tf_tensor->tensor_data();
if (tf_tensor->NumElements() != NumElements(tensor) ||
tf_tensor->TotalBytes() != tensor->bytes) {
TF_LITE_KERNEL_LOG(context,
"FlexDelegate: Tensor %s(%d) buffer size mismatch "
"%zu(%lld) != %ld(%ld)",
tensor->name, tensor_index, tf_tensor->TotalBytes(),
tf_tensor->NumElements(), tensor->bytes,
NumElements(tensor));
return kTfLiteError;
}
memcpy(tensor->data.raw, t_data.data(), t_data.size());
*tf_tensor = {};
shared_info->already_transferred_outputs.insert(tensor_index);
return kTfLiteOk;
}
tensorflow::Status MaybePersistTensorflowOutputs(TfLiteContext* context,
OpDataInfo* shared_info,
int node_index) {
auto* tensors = outputs_.GetTensors();
for (int i = 0; i < outputs_.Size(); ++i) {
if (outputs_.IsSubgraphOutput(i)) {
tensorflow::Tensor& tf_tensor = tensors->at(i);
const int tflite_index = outputs_.TfLiteIndex(i);
TfLiteTensor* tensor = &context->tensors[tflite_index];
if (!ShouldPersistTensorflowTensor(context, shared_info, tflite_index,
node_index)) {
if (CopyToTfLiteTensor(context, shared_info, tensor, &tf_tensor,
tflite_index) != kTfLiteOk) {
return tensorflow::Status(absl::StatusCode::kInternal,
"failed to copy data from TF tensor");
}
} else {
shared_info->buffer_map->SetFromTensorFlow(outputs_.TfLiteIndex(i),
tf_tensor);
}
}
}
return absl::OkStatus();
}
private:
OpNode(const OpNode&) = delete;
OpNode& operator=(const OpNode&) = delete;
string name_;
int index_;
tensorflow::NodeDef nodedef_;
const tensorflow::OpRegistrationData* op_reg_data_;
OpInputs inputs_;
OpOutputs outputs_;
tensorflow::tfrt_stub::OpKernelRunner op_kernel_runner_;
};
struct OpData {
tensorflow::EagerContext* eager_context;
tensorflow::CancellationManager* cancellation_manager;
std::vector<std::unique_ptr<OpNode>> nodes;
std::vector<int> subgraph_inputs;
std::vector<int> subgraph_outputs;
std::set<int>
disable_reusing_buffer_tensors;
OpDataInfo shared_info;
};
tensorflow::Status DelegateKernel::ExecuteOpKernelRunner(
tensorflow::tfrt_stub::OpKernelRunState* run_state, TfLiteContext* context,
OpNode* node_data) {
const auto& op_kernel_runner = node_data->op_kernel_runner();
if (op_kernel_runner.op_kernel()->num_outputs() != node_data->NumOutputs()) {
return tensorflow::errors::Internal(
"Unexpected number of outputs from tensorflow::OpKernel");
}
TF_RETURN_IF_ERROR(node_data->BuildOpKernelInputs(
op_data_->shared_info.buffer_map, run_state));
run_state->params.inputs = run_state->input_tf_tensor_values;
run_state->params.op_kernel = op_kernel_runner.op_kernel();
run_state->params.input_alloc_attrs = op_kernel_runner.input_alloc_attrs();
run_state->params.output_attr_array =
op_kernel_runner.output_alloc_attrs().data();
run_state->params.function_library =
op_kernel_runner.function_library_runtime();
tensorflow::OpKernelContext tf_context(&run_state->params,
node_data->NumOutputs());
op_kernel_runner.Run(&tf_context);
TF_RETURN_IF_ERROR(tf_context.status());
auto& outputs = *node_data->mutable_outputs()->GetTensors();
for (int i = 0; i < tf_context.num_outputs(); ++i) {
outputs[i] = std::move(*tf_context.mutable_output(i));
}
return node_data->MaybePersistTensorflowOutputs(
context, &(op_data_->shared_info), node_data->index());
}
DelegateKernel::DelegateKernel() : op_data_(new OpData) {}
DelegateKernel::~DelegateKernel() = default;
TfLiteStatus DelegateKernel::Init(TfLiteContext* context,
const TfLiteDelegateParams* params) {
auto* flex_delegate_data =
reinterpret_cast<FlexDelegate*>(params->delegate->data_)->mutable_data();
op_data_->eager_context = flex_delegate_data->GetEagerContext();
op_data_->cancellation_manager = flex_delegate_data->GetCancellationManager();
op_data_->shared_info.buffer_map = flex_delegate_data->GetBufferMap(context);
op_data_->shared_info.tensor_release_map =
flex_delegate_data->GetTensorReleaseMap(context);
CHECK(params->output_tensors);
std::set<int> output_set;
for (auto tensor_index : TfLiteIntArrayView(params->output_tensors)) {
op_data_->subgraph_outputs.push_back(tensor_index);
output_set.insert(tensor_index);
}
CHECK(params->input_tensors);
for (auto tensor_index : TfLiteIntArrayView(params->input_tensors)) {
op_data_->subgraph_inputs.push_back(tensor_index);
}
std::set<int> subgraph_inputs(op_data_->subgraph_inputs.begin(),
op_data_->subgraph_inputs.end());
op_data_->nodes.reserve(params->nodes_to_replace->size);
CHECK(params->nodes_to_replace);
tensorflow::Status status;
auto check_if_op_reuses_input = [](const string& op_name) {
return op_name == "TensorListPushBack" || op_name == "TensorListSetItem" ||
op_name == "SparseReshape" || op_name == "StridedSlice" ||
op_name == "RaggedTensorToVariant" || op_name == "TensorMapInsert";
};
for (auto node_index : TfLiteIntArrayView(params->nodes_to_replace)) {
TfLiteNode* node;
TfLiteRegistration* reg;
context->GetNodeAndRegistration(context, node_index, &node, ®);
op_data_->nodes.emplace_back(new OpNode(node->inputs, node->outputs));
OpNode& node_data = *op_data_->nodes.back();
node_data.set_index(node_index);
node_data.set_name("");
status = node_data.InitializeNodeDef(node->custom_initial_data,
node->custom_initial_data_size);
if (!status.ok()) break;
status = node_data.BuildOpKernelRunner(op_data_->eager_context);
if (!status.ok()) break;
for (auto tensor_index : TfLiteIntArrayView(node->inputs)) {
int node_id = node_index;
if (const std::map<int, int>::iterator it =
op_data_->shared_info.tensor_release_map->find(tensor_index);
it != op_data_->shared_info.tensor_release_map->end()) {
node_id = std::max(it->second, node_index);
}
(*op_data_->shared_info.tensor_release_map)[tensor_index] = node_id;
if (subgraph_inputs.count(tensor_index) &&
check_if_op_reuses_input(node_data.nodedef().op())) {
op_data_->disable_reusing_buffer_tensors.insert(tensor_index);
}
}
}
TF_LITE_ENSURE_STATUS(ConvertStatus(context, status));
std::map<int, TensorSource> tflite_tensor_sources;
for (auto& node_data : op_data_->nodes) {
node_data->mutable_outputs()->InitializeGraphOutputs(output_set);
for (int i = 0; i < node_data->outputs().Size(); ++i) {
int output_index = node_data->outputs().TfLiteIndex(i);
tflite_tensor_sources[output_index] = TensorSource{node_data.get(), i};
}
}
for (auto& node_data : op_data_->nodes) {
node_data->mutable_inputs()->InitializeTensorSources(tflite_tensor_sources);
}
return kTfLiteOk;
}
TfLiteStatus DelegateKernel::Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_MSG(
context, op_data_->eager_context != nullptr,
"Failed to initialize eager context. This often happens when a CPU "
"device has not been registered, presumably because some symbols from "
"tensorflow/core:core_cpu_impl were not linked into the binary.");
std::map<int, int> tensor_ref_count;
BufferMap* buffer_map = op_data_->shared_info.buffer_map;
for (auto tensor_index : op_data_->subgraph_inputs) {
TfLiteTensor* tensor = &context->tensors[tensor_index];
if (IsConstantTensor(tensor)) {
if (!tensor->data_is_stale || !buffer_map->HasTensor(tensor_index)) {
buffer_map->SetFromTfLite(tensor_index, tensor);
}
}
tensor_ref_count[tensor_index] += 2;
}
if (shapes_are_valid_) {
shapes_are_valid_ =
(ValidateOutputTensorShapeConsistency(context) == kTfLiteOk);
if (shapes_are_valid_) {
TFLITE_LOG(tflite::TFLITE_LOG_INFO,
"FlexDelegate: All tensor shapes are consistent.");
} else {
TFLITE_LOG(tflite::TFLITE_LOG_WARNING,
"FlexDelegate: Some tensor shapes are inconsistent.");
}
}
for (auto tensor_index : op_data_->subgraph_outputs) {
if (!shapes_are_valid_) {
SetTensorToDynamic(&context->tensors[tensor_index]);
}
++tensor_ref_count[tensor_index];
}
for (const auto& node_data : op_data_->nodes) {
if (node_data->nodedef().op().empty()) {
TF_LITE_KERNEL_LOG(context, "Invalid NodeDef in Flex op '%s'",
node_data->name().c_str());
return kTfLiteError;
}
TF_LITE_ENSURE(context, node_data->op_kernel_runner());
for (int i = 0; i < node_data->inputs().Size(); ++i) {
++tensor_ref_count[node_data->inputs().TfLiteIndex(i)];
}
}
for (auto& node_data : op_data_->nodes) {
for (int i = 0; i < node_data->inputs().Size(); ++i) {
bool f = (tensor_ref_count[node_data->inputs().TfLiteIndex(i)] == 1);
node_data->mutable_inputs()->SetForwardable(i, f);
}
}
return kTfLiteOk;
}
TfLiteStatus DelegateKernel::ValidateOutputTensorShapeConsistency(
TfLiteContext* context) const {
for (const auto& node_data : op_data_->nodes) {
auto op_name = node_data->name().c_str();
auto num_inputs = node_data->inputs().Size();
std::vector<const tensorflow::Tensor*> input_tensors_vector(num_inputs,
nullptr);
InferenceContext c(
TF_GRAPH_DEF_VERSION, node_data->nodedef(),
node_data->op_reg_data()->op_def, std::vector<ShapeHandle>(num_inputs),
input_tensors_vector, {},
std::vector<std::unique_ptr<std::vector<ShapeAndType>>>());
for (int i = 0; i < num_inputs; ++i) {
const auto input_tensor_index = node_data->inputs().TfLiteIndex(i);
TfLiteTensor* tfl_tensor = &context->tensors[input_tensor_index];
if (IsConstantTensor(tfl_tensor)) {
input_tensors_vector[i] =
op_data_->shared_info.buffer_map->GetTensorPtr(input_tensor_index);
}
const auto dims_array = tfl_tensor->dims;
std::vector<DimensionHandle> dims(dims_array->size);
for (int j = 0; j < dims_array->size; ++j) {
dims[j] = c.MakeDim(dims_array->data[j]);
}
c.SetInput(i, c.MakeShape(dims));
}
c.set_input_tensors(input_tensors_vector);
tensorflow::Status status = c.construction_status();
if (!status.ok()) {
TFLITE_LOG(tflite::TFLITE_LOG_WARNING,
"Shape construction failed for op '%s'", op_name);
return kTfLiteError;
}
if (node_data->op_reg_data()->shape_inference_fn == nullptr) {
TFLITE_LOG(tflite::TFLITE_LOG_WARNING,
"No shape inference function exists for op '%s'", op_name);
return kTfLiteError;
}
status = c.Run(node_data->op_reg_data()->shape_inference_fn);
auto num_outputs = node_data->outputs().Size();
if (num_outputs != c.num_outputs()) {
TFLITE_LOG(tflite::TFLITE_LOG_WARNING,
"Number of output tensors are mismatched for op '%s' %d != %d",
op_name, num_outputs, c.num_outputs());
return kTfLiteError;
}
for (int i = 0; i < num_outputs; ++i) {
const auto output_tensor_index = node_data->outputs().TfLiteIndex(i);
TfLiteTensor* tfl_tensor = &context->tensors[output_tensor_index];
const std::string tfl_shape_string =
GetShapeDebugString(tfl_tensor->dims);
const std::string calculated_shape_string = c.DebugString(c.output(i));
if (tfl_shape_string != calculated_shape_string) {
if ((strcmp(op_name, kReadVariableOp) == 0) &&
(tfl_tensor->dims->size > 0)) {
continue;
}
TFLITE_LOG(tflite::TFLITE_LOG_WARNING,
"op '%s' output%d tensor#%d shape mismatch for %s != %s",
op_name, i, output_tensor_index, tfl_shape_string.c_str(),
calculated_shape_string.c_str());
return kTfLiteError;
}
}
}
return kTfLiteOk;
}
static tensorflow::CancellationManager* GetDefaultCancellationManager() {
static auto* const cancellation_manager = new tensorflow::CancellationManager;
return cancellation_manager;
}
TfLiteStatus DelegateKernel::Eval(TfLiteContext* context, TfLiteNode* node) {
BufferMap* buffer_map = op_data_->shared_info.buffer_map;
for (auto tensor_index : op_data_->subgraph_inputs) {
TfLiteTensor* tensor = &context->tensors[tensor_index];
if (!IsConstantTensor(tensor)) {
if (!tensor->data_is_stale || !buffer_map->HasTensor(tensor_index)) {
buffer_map->SetFromTfLite(
tensor_index, tensor,
!op_data_->disable_reusing_buffer_tensors.count(tensor_index));
}
}
}
auto& eager_context = *op_data_->eager_context;
{
tensorflow::tfrt_stub::OpKernelRunState run_state;
run_state.params.step_container = eager_context.StepContainer();
auto* device = eager_context.local_device_mgr()->HostCPU();
run_state.params.device = device;
run_state.params.resource_manager = device->resource_manager();
run_state.params.runner = eager_context.runner();
run_state.params.cancellation_manager =
op_data_->cancellation_manager ? op_data_->cancellation_manager
: GetDefaultCancellationManager();
for (auto& node_data : op_data_->nodes) {
TFLITE_SCOPED_DELEGATE_PROFILED_OPERATOR_PROFILE(
reinterpret_cast<Profiler*>(context->profiler),
node_data->name().c_str(), node_data->index());
if (op_data_->cancellation_manager != nullptr &&
op_data_->cancellation_manager->IsCancelled()) {
TF_LITE_KERNEL_LOG(
context, "Client requested cancel during DelegateKernel::Eval");
return kTfLiteError;
}
auto status = ExecuteOpKernelRunner(&run_state, context, node_data.get());
TF_LITE_ENSURE_OK(context, ConvertStatus(context, status));
}
}
for (auto tensor_index : op_data_->subgraph_outputs) {
if (op_data_->shared_info.already_transferred_outputs.count(tensor_index) !=
0) {
continue;
}
if (!buffer_map->HasTensor(tensor_index)) {
TF_LITE_KERNEL_LOG(context, "Cannot write to invalid tensor index %d",
tensor_index);
return kTfLiteError;
}
TfLiteTensor* tensor = &context->tensors[tensor_index];
const tensorflow::Tensor& tf_tensor = buffer_map->GetTensor(tensor_index);
if (tensor->allocation_type == kTfLiteDynamic) {
TF_LITE_ENSURE_OK(context, CopyShapeAndType(context, tf_tensor, tensor));
tensor->buffer_handle = tensor_index;
tensor->data_is_stale = true;
continue;
}
if (tf_tensor.NumElements() != NumElements(tensor) ||
tf_tensor.TotalBytes() != tensor->bytes) {
TF_LITE_KERNEL_LOG(context,
"FlexDelegate: Tensor %s(%d) buffer size mismatch "
"%zu(%lld) != %ld(%ld)",
tensor->name, tensor_index, tf_tensor.TotalBytes(),
tf_tensor.NumElements(), tensor->bytes,
NumElements(tensor));
return kTfLiteError;
}
tensorflow::StringPiece t_data = tf_tensor.tensor_data();
memcpy(tensor->data.raw, t_data.data(), t_data.size());
}
return kTfLiteOk;
}
const std::map<int, int>& DelegateKernel::GetTensorReleaseMap() const {
return *(op_data_->shared_info.tensor_release_map);
}
}
} | #include "tensorflow/lite/delegates/flex/kernel.h"
#include <functional>
#include <initializer_list>
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/flex/delegate.h"
#include "tensorflow/lite/delegates/flex/delegate_data.h"
#include "tensorflow/lite/delegates/flex/test_util.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace flex {
namespace testing {
using ::testing::ContainsRegex;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
class TestFlexDelegate : public FlexDelegate {
protected:
bool IsNodeSupportedByDelegate(const TfLiteRegistration* registration,
const TfLiteNode* node,
TfLiteContext* context) const override {
return true;
}
};
class KernelTest : public testing::FlexModelTest {
public:
static constexpr int kOnes = 1;
static constexpr int kTwos = 2;
static constexpr int kMaxTensors = 30;
KernelTest() {
interpreter_ = std::make_unique<Interpreter>(&error_reporter_);
}
void ApplyFlexDelegate(std::unique_ptr<FlexDelegate> delegate = nullptr) {
auto flex_delegate = FlexDelegate::Create(std::move(delegate));
delegate_data_ =
reinterpret_cast<FlexDelegate*>(flex_delegate->data_)->mutable_data();
CHECK(delegate_data_->Prepare(tensorflow::SessionOptions{}).ok());
CHECK(interpreter_->ModifyGraphWithDelegate(std::move(flex_delegate)) ==
kTfLiteOk);
}
const std::map<int, int>& GetTensorReleaseMap(DelegateKernel* kernel) {
return kernel->GetTensorReleaseMap();
}
protected:
tflite::flex::DelegateData* delegate_data_;
};
TEST_F(KernelTest, FullGraph) {
AddTensors(9, {0, 3}, {8}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfOp(testing::kAdd, {1, 4}, {6});
AddTfOp(testing::kAdd, {2, 5}, {7});
AddTfOp(testing::kMul, {6, 7}, {8});
ApplyFlexDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
SetShape(3, {2, 2, 1});
SetValues(3, {1.1f, 2.2f, 3.3f, 4.4f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(8), ElementsAre(2, 1));
ASSERT_THAT(GetValues(8), ElementsAre(14.52f, 38.72f));
SetShape(0, {2, 3, 1});
SetValues(0, {2.0f, 2.0f, 3.0f, 3.0f, 4.0f, 4.0f});
SetShape(3, {2, 3, 1});
SetValues(3, {2.0f, 2.0f, 3.0f, 3.0f, 4.0f, 4.0f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(8), ElementsAre(3, 1));
ASSERT_THAT(GetValues(8), ElementsAre(24.0f, 32.0f, 48.0f));
}
TEST_F(KernelTest, ValidateTensorReleaseMap) {
AddTensors(9, {0, 3}, {8}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfOp(testing::kAdd, {1, 4}, {6});
AddTfOp(testing::kAdd, {2, 5}, {7});
AddTfOp(testing::kMul, {6, 7}, {8});
ApplyFlexDelegate();
const int node_size = interpreter_->primary_subgraph().nodes_size();
const std::pair<TfLiteNode, TfLiteRegistration>* node_and_reg =
interpreter_->primary_subgraph().node_and_registration(node_size - 1);
DelegateKernel* delegate_kernel =
reinterpret_cast<DelegateKernel*>(node_and_reg->first.user_data);
const auto& tensor_release_map = GetTensorReleaseMap(delegate_kernel);
EXPECT_THAT(
tensor_release_map,
UnorderedElementsAre(Pair(0, 0), Pair(1, 2), Pair(2, 3), Pair(3, 1),
Pair(4, 2), Pair(5, 3), Pair(6, 4), Pair(7, 4)));
}
TEST_F(KernelTest, PersistEagerTensor) {
AddTensors(10, {0, 3}, {9}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfOp(testing::kAdd, {1, 4}, {6});
AddTfOp(testing::kAdd, {2, 5}, {7});
AddTfLiteMulOp({6, 7}, {8});
AddTfOp(testing::kAdd, {6, 8}, {9});
ApplyFlexDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
SetShape(3, {2, 2, 1});
SetValues(3, {1.1f, 2.2f, 3.3f, 4.4f});
ASSERT_TRUE(Invoke());
auto* buffer_map =
delegate_data_->GetBufferMap(interpreter_->primary_subgraph().context());
EXPECT_TRUE(buffer_map->HasTensor(6));
EXPECT_FALSE(buffer_map->HasTensor(7));
}
TEST_F(KernelTest, BadTensorFlowOp) {
AddTensors(2, {0}, {1}, kTfLiteFloat32, {3});
AddTfOp(testing::kNonExistent, {0}, {1});
ApplyFlexDelegate(std::unique_ptr<FlexDelegate>(new TestFlexDelegate()));
ASSERT_NE(interpreter_->AllocateTensors(), kTfLiteOk);
ASSERT_THAT(error_reporter().error_messages(),
ContainsRegex("Op type not registered 'NonExistentOp'"));
}
TEST_F(KernelTest, BadNumberOfOutputs) {
AddTensors(3, {0}, {1, 2}, kTfLiteFloat32, {3});
AddTfOp(testing::kIdentity, {0}, {1, 2});
ApplyFlexDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
ASSERT_FALSE(Invoke());
ASSERT_THAT(error_reporter().error_messages(),
ContainsRegex("Unexpected number of outputs"));
}
TEST_F(KernelTest, IncompatibleNodeDef) {
AddTensors(2, {0}, {1}, kTfLiteFloat32, {3});
AddTfOp(testing::kIncompatibleNodeDef, {0}, {1});
ApplyFlexDelegate();
ASSERT_NE(interpreter_->AllocateTensors(), kTfLiteOk);
ASSERT_THAT(error_reporter().error_messages(),
ContainsRegex("No attr named 'SrcT' in NodeDef"));
}
TEST_F(KernelTest, WrongSetOfNodes) {
AddTensors(4, {0}, {3}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfLiteMulOp({1, 2}, {3});
ApplyFlexDelegate(std::unique_ptr<FlexDelegate>(new TestFlexDelegate()));
ASSERT_NE(interpreter_->AllocateTensors(), kTfLiteOk);
ASSERT_THAT(error_reporter().error_messages(),
ContainsRegex("Cannot convert empty data into a valid NodeDef"));
}
TEST_F(KernelTest, MixedGraph) {
AddTensors(9, {0, 3}, {8}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfOp(testing::kAdd, {1, 4}, {6});
AddTfOp(testing::kAdd, {2, 5}, {7});
AddTfLiteMulOp({6, 7}, {8});
ApplyFlexDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
SetShape(3, {2, 2, 1});
SetValues(3, {1.1f, 2.2f, 3.3f, 4.4f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(8), ElementsAre(2, 1));
ASSERT_THAT(GetValues(8), ElementsAre(14.52f, 38.72f));
}
TEST_F(KernelTest, SplitGraph) {
std::vector<float> a = {3.0f, 1.0f, 0.5f, -1.0f, 4.0f, -1.0f, -2.0f, 5.0f};
std::vector<float> b = {0.0f, 1.0f, 1.5f, 3.0f};
AddTensors(18, {0, 1}, {17}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {2, 10});
AddTfOp(testing::kAdd, {1, 2}, {3});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfLiteMulOp({4, 5}, {6});
AddTfOp(testing::kUnpack, {6}, {7, 8});
AddTfOp(testing::kAdd, {7, 8}, {9});
AddTfOp(testing::kUnpack, {10}, {11, 12});
AddTfOp(testing::kAdd, {11, 12}, {13});
AddTfOp(testing::kUnpack, {13}, {14, 15});
AddTfOp(testing::kAdd, {14, 15}, {16});
AddTfOp(testing::kAdd, {9, 16}, {17});
ApplyFlexDelegate();
SetShape(0, {2, 2, 2, 1});
SetValues(0, a);
SetShape(1, {2, 2, 1});
SetValues(1, b);
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(17), ElementsAre(1));
ASSERT_THAT(GetValues(17), ElementsAre(16.0f));
SetShape(0, {2, 2, 2, 1});
SetValues(0, {4.0f, 1.0f, 1.5f, -2.0f, 2.0f, 0.0f, -2.0f, 3.0f});
SetShape(1, {2, 2, 1});
SetValues(1, {0.0f, 2.0f, 1.5f, 3.0f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(17), ElementsAre(1));
ASSERT_THAT(GetValues(17), ElementsAre(18.0f));
}
class MultipleSubgraphsTest : public KernelTest {
public:
static constexpr int kInput = 0;
void PrepareInterpreter(const std::vector<float>& input) {
ApplyFlexDelegate();
SetShape(kOnes, {3});
SetValues(kOnes, {1.0f, 1.0f, 1.0f});
SetShape(kTwos, {3});
SetValues(kTwos, {2.0f, 2.0f, 2.0f});
SetValues(kInput, input);
}
std::vector<float> Apply(const std::vector<float>& input,
std::function<float(float)> function) {
std::vector<float> result;
for (float f : input) {
result.push_back(function(f));
}
return result;
}
};
TEST_F(MultipleSubgraphsTest, ForwardabilityIsLocal) {
AddTensors(kMaxTensors, {kInput, kOnes, kTwos}, {12}, kTfLiteFloat32, {3});
AddTfOp(testing::kAdd, {0, kOnes}, {3});
AddTfOp(testing::kAdd, {0, kOnes}, {10});
AddTfLiteMulOp({3, kTwos}, {4});
AddTfOp(testing::kAdd, {10, 4}, {11});
AddTfOp(testing::kAdd, {11, 10}, {7});
AddTfLiteMulOp({10, 7}, {12});
auto input = {3.0f, 4.0f, 5.0f};
PrepareInterpreter(input);
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetValues(12), ElementsAreArray(Apply(input, [](float in) {
return (4 * in + 4) * (in + 1);
})));
}
TEST_F(MultipleSubgraphsTest, DoNotRemoveInputTensors) {
AddTensors(kMaxTensors, {kInput, kOnes, kTwos}, {12}, kTfLiteFloat32, {3});
AddTfOp(testing::kAdd, {0, kOnes}, {3});
AddTfOp(testing::kAdd, {0, kOnes}, {10});
AddTfOp(testing::kAdd, {10, kOnes}, {15});
AddTfOp(testing::kAdd, {10, kOnes}, {16});
AddTfLiteMulOp({3, kTwos}, {4});
AddTfOp(testing::kAdd, {10, 4}, {11});
AddTfOp(testing::kAdd, {10, 11}, {7});
AddTfLiteMulOp({10, 7}, {12});
auto input = {3.0f, 4.0f, 5.0f};
PrepareInterpreter(input);
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetValues(12), ElementsAreArray(Apply(input, [](float in) {
return (4 * in + 4) * (in + 1);
})));
}
TEST_F(MultipleSubgraphsTest, DoNotForwardInputTensors) {
AddTensors(kMaxTensors, {kInput, kOnes, kTwos}, {12}, kTfLiteFloat32, {3});
AddTfOp(testing::kAdd, {0, kOnes}, {3});
AddTfOp(testing::kAdd, {0, kOnes}, {10});
AddTfLiteMulOp({3, kTwos}, {4});
AddTfOp(testing::kAdd, {10, 4}, {11});
AddTfOp(testing::kAdd, {11, 4}, {7});
AddTfLiteMulOp({10, 7}, {12});
auto input = {3.0f, 4.0f, 5.0f};
PrepareInterpreter(input);
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetValues(12), ElementsAreArray(Apply(input, [](float in) {
return (5 * in + 5) * (in + 1);
})));
}
tensorflow::OpDef MakeOpDef(int num_inputs, int num_outputs) {
tensorflow::OpRegistrationData op_reg_data;
tensorflow::OpDefBuilder b("dummy");
for (int i = 0; i < num_inputs; ++i) {
b.Input(tensorflow::strings::StrCat("i", i, ": float"));
}
for (int i = 0; i < num_outputs; ++i) {
b.Output(tensorflow::strings::StrCat("o", i, ": float"));
}
CHECK(b.Attr("foo:string").Finalize(&op_reg_data).ok());
return op_reg_data.op_def;
}
tensorflow::PartialTensorShape S(std::initializer_list<int64_t> dims) {
return tensorflow::PartialTensorShape(dims);
}
TEST(ValidateOutputTensorShapeConsistencyTest, ShapeHandleDebugString) {
tensorflow::OpDef op_def = MakeOpDef(4, 1);
tensorflow::NodeDef def;
tensorflow::shape_inference::InferenceContext c(
0, def, op_def, {S({1}), S({2, 3}), S({4, 5, 6}), {}}, {}, {}, {});
c.SetInput(3, c.UnknownShape());
std::vector<tensorflow::shape_inference::ShapeHandle> shapes;
EXPECT_EQ("[1]", c.DebugString(c.input(0)));
EXPECT_EQ("[2,3]", c.DebugString(c.input(1)));
EXPECT_EQ("[4,5,6]", c.DebugString(c.input(2)));
EXPECT_EQ("?", c.DebugString(c.input(3)));
}
TEST(ValidateOutputTensorShapeConsistencyTest, GetShapeDebugString) {
TfLiteIntArray* dims1 = TfLiteIntArrayCreate(1);
dims1->data[0] = 1;
EXPECT_EQ("[1]", GetShapeDebugString(dims1));
TfLiteIntArrayFree(dims1);
TfLiteIntArray* dims2 = TfLiteIntArrayCreate(2);
dims2->data[0] = 2;
dims2->data[1] = 3;
EXPECT_EQ("[2,3]", GetShapeDebugString(dims2));
TfLiteIntArrayFree(dims2);
TfLiteIntArray* dims3 = TfLiteIntArrayCreate(3);
dims3->data[0] = 4;
dims3->data[1] = 5;
dims3->data[2] = 6;
EXPECT_EQ("[4,5,6]", GetShapeDebugString(dims3));
TfLiteIntArrayFree(dims3);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/flex/kernel.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/flex/kernel_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d81c2368-f61b-45b4-9f04-9d2cbe7d1611 | cpp | tensorflow/tensorflow | gpu_cudamallocasync_allocator | third_party/xla/xla/stream_executor/gpu/gpu_cudamallocasync_allocator.cc | third_party/xla/xla/stream_executor/gpu/gpu_cudamallocasync_allocator_test.cc | #include "xla/stream_executor/gpu/gpu_cudamallocasync_allocator.h"
#include <algorithm>
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/synchronization/mutex.h"
#include "third_party/gpus/cuda/include/cuda.h"
#include "xla/stream_executor/cuda/cuda_status.h"
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/stream_executor/gpu/scoped_activate_context.h"
#include "xla/tsl/framework/allocator.h"
#include "xla/tsl/framework/device_id.h"
#include "xla/tsl/util/env_var.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace stream_executor {
struct GpuCudaMallocAsyncAllocator::CudaState {
CUstream cuda_stream{};
CUmemoryPool pool{};
};
void GpuCudaMallocAsyncAllocator::PrintAllocatorStatisticsNoLock() {
std::map<size_t, int> size_map_histogram;
std::vector<std::string> ptr_size_string;
for (auto p : size_map_) {
if (VLOG_IS_ON(8)) {
ptr_size_string.push_back(
absl::StrCat("(", absl::Hex(p.first), ",", p.second) + ")");
}
size_map_histogram[p.second]++;
}
LOG(ERROR) << "Histogram of current allocation: (allocation_size_in_bytes, "
<< "nb_allocation_of_that_sizes), ...;";
for (auto p : size_map_histogram) {
LOG(ERROR) << p.first << ", " << p.second;
}
VLOG(8) << "\nThe sorted list of (ptr,size):";
VLOG(8) << absl::StrJoin(ptr_size_string, ",");
cuuint64_t mem_reserved_current;
if (auto result = cuMemPoolGetAttribute(cuda_state_->pool,
CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT,
&mem_reserved_current)) {
LOG(ERROR) << "Error while fetching extra cudaMallocAsync pool attribute: "
<< cuda::ToStatus(result);
}
cuuint64_t mem_used_current;
if (auto result = cuMemPoolGetAttribute(cuda_state_->pool,
CU_MEMPOOL_ATTR_USED_MEM_CURRENT,
&mem_used_current)) {
LOG(ERROR) << "Error while fetching extra cudaMallocAsync pool attribute: "
<< cuda::ToStatus(result);
}
cuuint64_t mem_reserved_high;
if (auto result = cuMemPoolGetAttribute(cuda_state_->pool,
CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH,
&mem_reserved_high)) {
LOG(ERROR) << "Error while fetching extra cudaMallocAsync pool attribute: "
<< cuda::ToStatus(result);
}
cuuint64_t mem_used_high;
if (auto result = cuMemPoolGetAttribute(
cuda_state_->pool, CU_MEMPOOL_ATTR_USED_MEM_HIGH, &mem_used_high)) {
LOG(ERROR) << "Error while fetching extra cudaMallocAsync pool attribute: "
<< cuda::ToStatus(result);
}
LOG(ERROR) << "CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT: "
<< mem_reserved_current;
LOG(ERROR) << "CU_MEMPOOL_ATTR_USED_MEM_CURRENT: " << mem_used_current;
LOG(ERROR) << "CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH: " << mem_reserved_high;
LOG(ERROR) << "CU_MEMPOOL_ATTR_USED_MEM_HIGH: " << mem_used_high;
}
std::atomic<int> GpuCudaMallocAsyncAllocator::number_instantiated_(0);
GpuCudaMallocAsyncAllocator::GpuCudaMallocAsyncAllocator(
tsl::PlatformDeviceId platform_device_id, bool create_new_pool,
size_t new_pool_size, bool reserve_memory, size_t reserve_memory_size,
bool sync_mode, bool compute_stats)
: cuda_state_{std::make_unique<CudaState>()},
name_(absl::StrCat("gpu_async_", platform_device_id.value())),
reserve_memory_(reserve_memory),
create_new_pool_(create_new_pool),
sync_mode_(sync_mode) {
++number_instantiated_;
stream_exec_ = GPUMachineManager()
->ExecutorForDevice(platform_device_id.value())
.value();
int driverVersion;
cuDriverGetVersion(&driverVersion);
VLOG(2) << "DRIVER VERSION: " << driverVersion;
if (driverVersion < 11020) {
LOG(FATAL)
<< "Disable cuda_malloc_async or update your CUDA driver to a version"
<< " compatible with CUDA 11.2 or higher."
<< " We detected a version compatible with: " << driverVersion;
}
if (platform_device_id.value() > 0 && driverVersion < 11030) {
CUcontext pctx;
if (auto result = cuDevicePrimaryCtxRetain(&pctx, 0))
LOG(FATAL)
<< "Failed to retain context: " << cuda::ToStatus(result);
}
gpu::ScopedActivateContext scoped_activation{stream_exec_};
if (auto status2 = cuDriverGetVersion(&driverVersion)) {
LOG(FATAL)
<< "Error while fetching driver version: " << cuda::ToStatus(status2);
}
int cuda_malloc_async_supported;
if (auto status =
cuDeviceGetAttribute(&cuda_malloc_async_supported,
CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED,
platform_device_id.value())) {
LOG(FATAL)
<< "On device: " << platform_device_id.value()
<< " Current driver: " << driverVersion
<< ". Failed to get device attribute : " << cuda::ToStatus(status);
}
if (!cuda_malloc_async_supported)
LOG(FATAL)
<< "TF_GPU_ALLOCATOR=cuda_malloc_async isn't currently supported on "
<< "GPU id " << platform_device_id.value() << ":"
<< " Possible causes: device not supported (request SM60+), driver too "
"old, "
<< " OS not supported, CUDA version too old(request CUDA11.2+).";
size_t pool_size;
if (create_new_pool_) {
pool_size = new_pool_size;
CUmemPoolProps pool_props;
memset(reinterpret_cast<void*>(&pool_props), 0, sizeof(pool_props));
pool_props.allocType = CU_MEM_ALLOCATION_TYPE_PINNED;
pool_props.handleTypes = CU_MEM_HANDLE_TYPE_NONE;
pool_props.location.id = platform_device_id.value();
pool_props.location.type = CU_MEM_LOCATION_TYPE_DEVICE;
#if CUDA_VERSION >= 12030
pool_props.maxSize = new_pool_size;
#endif
if (auto status = cuMemPoolCreate(&cuda_state_->pool, &pool_props))
LOG(FATAL) <<
"Failed to create CUDA pool: " << cuda::ToStatus(status);
} else {
pool_size = reserve_memory_size;
if (auto status = cuDeviceGetDefaultMemPool(&cuda_state_->pool,
platform_device_id.value()))
LOG(FATAL) <<
"Failed to get default CUDA pool: " << cuda::ToStatus(status);
VLOG(2) << "using default memory pool " << cuda_state_->pool;
}
VLOG(1) << Name() << " CudaMallocAsync initialized on platform: "
<< platform_device_id.value() << " with pool size of: " << pool_size
<< " this ptr: " << this;
uint64_t release_threshold_64 = reserve_memory_size;
if (auto status = cuMemPoolSetAttribute(cuda_state_->pool,
CU_MEMPOOL_ATTR_RELEASE_THRESHOLD,
&release_threshold_64))
LOG(FATAL) <<
"Failed to set CUDA pool attribute: " << cuda::ToStatus(status);
if (compute_stats) {
stats_ = std::make_unique<tsl::AllocatorStats>();
stats_->bytes_limit = static_cast<int64_t>(pool_size);
}
bool deterministic = false;
TF_CHECK_OK(tsl::ReadBoolFromEnvVar("TF_DETERMINISTIC_ALLOCATOR",
false, &deterministic));
if (deterministic) {
int disable = 0;
if (auto status = cuMemPoolSetAttribute(
cuda_state_->pool, CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC,
&disable)) {
LOG(FATAL) <<
"Failed to set CUDA pool attribute: " << cuda::ToStatus(status);
}
if (auto status = cuMemPoolSetAttribute(
cuda_state_->pool,
CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES, &disable)) {
LOG(FATAL) <<
"Failed to set CUDA pool attribute: " << cuda::ToStatus(status);
}
}
static auto* all_pools_ = new std::vector<CUmemoryPool>();
static auto* all_ids_ = new std::vector<tsl::PlatformDeviceId>();
DCHECK(all_pools_->size() == all_ids_->size());
for (auto pool_item_ : *all_pools_) {
if (pool_item_ == cuda_state_->pool) {
VLOG(2) << Name()
<< " GpuCudaMallocAsyncAllocator pool already initialized. "
"PoolSize "
<< pool_size;
return;
}
}
for (int i = 0; i < all_pools_->size(); ++i) {
CUmemAccessDesc map;
map.flags = CU_MEM_ACCESS_FLAGS_PROT_READWRITE;
map.location.id = (*all_ids_)[i].value();
map.location.type = CU_MEM_LOCATION_TYPE_DEVICE;
VLOG(2) << "Setting access of the current pool to "
<< " location id: " << map.location.id;
int canAccessPeer;
if (auto status = cuDeviceCanAccessPeer(
&canAccessPeer, platform_device_id.value(), map.location.id)) {
cuda_state_->pool = nullptr;
LOG(FATAL)
<< "cuDeviceCanAccessPeer failed to know if GPU id "
<< map.location.id << " can access GPU id "
<< platform_device_id.value() << ": " << cuda::ToStatus(status);
}
if (canAccessPeer == 1) {
if (auto status = cuMemPoolSetAccess(cuda_state_->pool, &map, 1)) {
cuda_state_->pool = nullptr;
LOG(FATAL)
<< "Error when setting access to the pool id: " << i
<< " location id: " << map.location.id
<< " error: " << cuda::ToStatus(status);
}
}
map.location.id = platform_device_id.value();
int previous_pool_id = (*all_ids_)[i].value();
VLOG(2) << "Set access to the pool id: " << previous_pool_id
<< " location id: " << map.location.id;
if (auto status = cuDeviceCanAccessPeer(&canAccessPeer, previous_pool_id,
platform_device_id.value())) {
cuda_state_->pool = nullptr;
LOG(FATAL)
<< "cuDeviceCanAccessPeer failed: " << cuda::ToStatus(status);
}
if (canAccessPeer == 1) {
if (auto status = cuMemPoolSetAccess((*all_pools_)[i], &map, 1)) {
cuda_state_->pool = nullptr;
LOG(FATAL)
<< "Error when setting access to the pool id: " << previous_pool_id
<< " location id: " << map.location.id
<< " error: " << cuda::ToStatus(status);
}
}
}
all_pools_->push_back(cuda_state_->pool);
all_ids_->push_back(platform_device_id);
VLOG(2) << Name() << " GpuCudaMallocAsyncAllocator PoolSize " << pool_size;
}
GpuCudaMallocAsyncAllocator::GpuCudaMallocAsyncAllocator(
tsl::PlatformDeviceId platform_device_id, size_t release_threshold,
bool reserve_memory, bool compute_stats)
: GpuCudaMallocAsyncAllocator(platform_device_id, false, 0, reserve_memory,
release_threshold, false, compute_stats) {}
GpuCudaMallocAsyncAllocator::~GpuCudaMallocAsyncAllocator() {
if (create_new_pool_) {
VLOG(2) << "Delete memory pool "
<< reinterpret_cast<void*>(cuda_state_->pool);
if (auto status = cuMemPoolDestroy(cuda_state_->pool))
LOG(FATAL) << "Failed to destroy memory pool:" << cuda::ToStatus(status);
}
}
void* GpuCudaMallocAsyncAllocator::AllocateRaw(size_t alignment,
size_t num_bytes) {
CHECK(cuda_state_->cuda_stream != nullptr)
<< "A stream must be added to the GpuCudaMallocAsync allocator";
if (cuda_state_->pool == nullptr) {
LOG(FATAL)
<< "The instantiation of GpuCudaMallocAsyncAllocator failed."
<< " See previous errors.";
}
std::optional<absl::MutexLock> lock;
if (stats_) {
lock.emplace(&mutex_);
}
gpu::ScopedActivateContext scoped_activation{stream_exec_};
void* ptr = nullptr;
auto result =
cuMemAllocFromPoolAsync(reinterpret_cast<CUdeviceptr*>(&ptr), num_bytes,
cuda_state_->pool, cuda_state_->cuda_stream);
if (result == CUDA_ERROR_OUT_OF_MEMORY) {
cuStreamSynchronize(cuda_state_->cuda_stream);
result =
cuMemAllocFromPoolAsync(reinterpret_cast<CUdeviceptr*>(&ptr), num_bytes,
cuda_state_->pool, cuda_state_->cuda_stream);
}
if (result) {
size_t free, total;
cuMemGetInfo(&free, &total);
LOG(ERROR) << Name() << " cuMemAllocAsync failed to allocate " << num_bytes
<< " bytes: " << cuda::ToStatus(result)
<< "\n Reported by CUDA: Free memory/Total memory: " << free
<< "/" << total;
if (stats_) {
LOG(ERROR) << "Stats: " << stats_->DebugString();
PrintAllocatorStatisticsNoLock();
}
return nullptr;
}
if (sync_mode_) {
cuStreamSynchronize(cuda_state_->cuda_stream);
}
if (stats_) {
++(stats_->num_allocs);
stats_->bytes_in_use += num_bytes;
if (stats_->bytes_in_use > stats_->peak_bytes_in_use) {
VLOG(9) << "New Peak memory usage of " << stats_->bytes_in_use
<< " bytes.";
}
stats_->peak_bytes_in_use =
std::max(stats_->peak_bytes_in_use, stats_->bytes_in_use);
stats_->largest_alloc_size =
std::max<std::size_t>(stats_->largest_alloc_size, num_bytes);
bool ptr_inserted = size_map_.emplace(ptr, num_bytes).second;
DCHECK(ptr_inserted);
}
VLOG(10) << Name() << " Allocated " << num_bytes << " at " << ptr;
return ptr;
}
void GpuCudaMallocAsyncAllocator::DeallocateRaw(void* ptr) {
if (ptr == nullptr) return;
std::optional<absl::MutexLock> lock;
if (stats_) {
lock.emplace(&mutex_);
}
if (auto result = cuMemFreeAsync(reinterpret_cast<const CUdeviceptr&>(ptr),
cuda_state_->cuda_stream)) {
if (result == CUDA_ERROR_DEINITIALIZED) {
VLOG(1) << "Ignoring CUDA error: " << cuda::ToStatus(result);
} else {
size_t free, total;
gpu::ScopedActivateContext scoped_activation{stream_exec_};
cuMemGetInfo(&free, &total);
LOG(ERROR) << "cudaFreeAsync failed to free " << ptr << ": "
<< cuda::ToStatus(result)
<< "\n Free memory/Total memory: " << free << "/" << total;
if (stats_) {
LOG(ERROR) << "Stats: " << stats_->DebugString();
}
}
}
if (sync_mode_) {
cuStreamSynchronize(cuda_state_->cuda_stream);
}
if (stats_) {
DCHECK(size_map_.contains(ptr));
size_t size = size_map_[ptr];
stats_->bytes_in_use -= size;
size_map_.erase(ptr);
}
VLOG(10) << Name() << " Freed ptr: " << ptr;
}
bool GpuCudaMallocAsyncAllocator::TracksAllocationSizes() const {
return static_cast<bool>(stats_);
}
size_t GpuCudaMallocAsyncAllocator::RequestedSize(const void* ptr) const {
if (!stats_ || !ptr) return 0;
absl::MutexLock l(&mutex_);
return size_map_.at(ptr);
}
size_t GpuCudaMallocAsyncAllocator::AllocatedSize(const void* ptr) const {
if (!stats_ || !ptr) return 0;
absl::MutexLock l(&mutex_);
return size_map_.at(ptr);
}
std::optional<tsl::AllocatorStats> GpuCudaMallocAsyncAllocator::GetStats() {
if (!stats_) return std::nullopt;
absl::MutexLock l(&mutex_);
return *stats_;
}
bool GpuCudaMallocAsyncAllocator::ClearStats() {
if (!stats_) return false;
absl::MutexLock l(&mutex_);
stats_->num_allocs = 0;
stats_->peak_bytes_in_use = stats_->bytes_in_use;
stats_->largest_alloc_size = 0;
return true;
}
void GpuCudaMallocAsyncAllocator::SetStreamAndPreallocateMemory(void* stream) {
auto new_cuda_stream = static_cast<CUstream>(stream);
if (cuda_state_->cuda_stream != nullptr &&
new_cuda_stream != cuda_state_->cuda_stream) {
LOG(FATAL) <<
"Trying to set the stream twice. This isn't supported. ";
}
uint64_t pool_size_64 = 0;
if (auto status = cuMemPoolGetAttribute(cuda_state_->pool,
CU_MEMPOOL_ATTR_RELEASE_THRESHOLD,
&pool_size_64)) {
LOG(FATAL) <<
"Failed to get CUDA pool attribute: " << cuda::ToStatus(status);
}
cuda_state_->cuda_stream = new_cuda_stream;
int64_t prealloc_size = 0;
TF_CHECK_OK(tsl::ReadInt64FromEnvVar(
"TF_CUDA_MALLOC_ASYNC_SUPPORTED_PREALLOC", 0, &prealloc_size));
if (prealloc_size == -1) {
prealloc_size = pool_size_64;
} else if (reserve_memory_) {
prealloc_size = pool_size_64;
}
if (prealloc_size != 0) {
void* ptr = AllocateRaw(0, prealloc_size);
DeallocateRaw(ptr);
VLOG(2) << Name() << " GpuCudaMallocAsyncAllocator reserved the pool for "
<< prealloc_size << " bytes" << ". First ptr: " << ptr;
ClearStats();
}
}
} | #include "xla/stream_executor/gpu/gpu_cudamallocasync_allocator.h"
#include <cstdint>
#include <memory>
#include <string>
#include "absl/log/check.h"
#include "absl/strings/ascii.h"
#include "xla/service/platform_util.h"
#include "xla/stream_executor/gpu/gpu_stream.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/framework/device_id.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace se = stream_executor;
namespace {
static se::StreamExecutor* GpuExecutor() {
auto name = absl::AsciiStrToUpper(
xla::PlatformUtil::CanonicalPlatformName("gpu").value());
auto* platform = se::PlatformManager::PlatformWithName(name).value();
return platform->ExecutorForDevice(0).value();
}
}
namespace stream_executor {
TEST(GpuCudaMallocAsyncAllocator, TwoAllocatorsShareDefaultPool) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream1, executor->CreateStream());
auto allocator1 = GpuCudaMallocAsyncAllocator(
tsl::PlatformDeviceId(executor->device_ordinal()),
2048,
true,
true);
allocator1.SetStreamAndPreallocateMemory(
se::gpu::AsGpuStreamValue(stream1.get()));
TF_ASSERT_OK_AND_ASSIGN(auto stream2, executor->CreateStream());
auto allocator2 = GpuCudaMallocAsyncAllocator(
tsl::PlatformDeviceId(executor->device_ordinal()),
2048,
true,
true);
allocator2.SetStreamAndPreallocateMemory(
se::gpu::AsGpuStreamValue(stream2.get()));
void* addr1 = allocator1.AllocateRaw(128, 127);
void* addr2 = allocator2.AllocateRaw(128, 129);
CHECK_EQ((reinterpret_cast<uintptr_t>(addr1) & 127), 0);
CHECK_EQ((reinterpret_cast<uintptr_t>(addr2) & 127), 0);
allocator1.DeallocateRaw(addr1);
allocator2.DeallocateRaw(addr2);
EXPECT_TRUE(stream1->ok());
EXPECT_TRUE(stream2->ok());
}
TEST(GpuCudaMallocAsyncAllocator, AddressAlignedDefaultPool) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
auto allocator = GpuCudaMallocAsyncAllocator(
tsl::PlatformDeviceId(executor->device_ordinal()),
2048,
true,
true);
allocator.SetStreamAndPreallocateMemory(
se::gpu::AsGpuStreamValue(stream.get()));
void* addr1 = allocator.AllocateRaw(128, 127);
void* addr2 = allocator.AllocateRaw(128, 129);
CHECK_EQ((reinterpret_cast<uintptr_t>(addr1) & 127), 0);
CHECK_EQ((reinterpret_cast<uintptr_t>(addr2) & 127), 0);
allocator.DeallocateRaw(addr1);
allocator.DeallocateRaw(addr2);
EXPECT_TRUE(stream->ok());
}
TEST(GpuCudaMallocAsyncAllocator, AddressAlignedNewPool) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
auto allocator = GpuCudaMallocAsyncAllocator(
tsl::PlatformDeviceId(executor->device_ordinal()),
true,
2048,
true,
0,
false,
false);
allocator.SetStreamAndPreallocateMemory(
se::gpu::AsGpuStreamValue(stream.get()));
void* addr1 = allocator.AllocateRaw(128, 127);
void* addr2 = allocator.AllocateRaw(128, 129);
CHECK_EQ((reinterpret_cast<uintptr_t>(addr1) & 127), 0);
CHECK_EQ((reinterpret_cast<uintptr_t>(addr2) & 127), 0);
allocator.DeallocateRaw(addr1);
allocator.DeallocateRaw(addr2);
EXPECT_TRUE(stream->ok());
}
TEST(GpuCudaMallocAsyncAllocator, SyncAddressAlignedNewPool) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
auto allocator = GpuCudaMallocAsyncAllocator(
tsl::PlatformDeviceId(executor->device_ordinal()),
true,
2048,
true,
0,
true,
true);
allocator.SetStreamAndPreallocateMemory(
se::gpu::AsGpuStreamValue(stream.get()));
void* addr1 = allocator.AllocateRaw(128, 127);
void* addr2 = allocator.AllocateRaw(128, 129);
CHECK_EQ((reinterpret_cast<uintptr_t>(addr1) & 127), 0);
CHECK_EQ((reinterpret_cast<uintptr_t>(addr2) & 127), 0);
allocator.DeallocateRaw(addr1);
allocator.DeallocateRaw(addr2);
EXPECT_TRUE(stream->ok());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/gpu_cudamallocasync_allocator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/gpu_cudamallocasync_allocator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
59a59525-5523-4242-bd39-1cd2892685de | cpp | tensorflow/tensorflow | redzone_allocator | third_party/xla/xla/stream_executor/gpu/redzone_allocator.cc | third_party/xla/xla/stream_executor/gpu/redzone_allocator_test.cc | #include "xla/stream_executor/gpu/redzone_allocator.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include "absl/container/fixed_array.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_handle.h"
#include "xla/stream_executor/gpu/gpu_asm_opts.h"
#include "xla/stream_executor/gpu/redzone_allocator_kernel.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/framework/allocator.h"
#include "xla/tsl/lib/math/math_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace stream_executor {
template <typename T>
static T RoundUpToNearest(T value, T divisor) {
return tsl::MathUtil::CeilOfRatio(value, divisor) * divisor;
}
constexpr int64_t kRhsRedzoneAlign = 4;
using RedzoneCheckStatus = RedzoneAllocator::RedzoneCheckStatus;
RedzoneAllocator::RedzoneAllocator(Stream* stream,
DeviceMemoryAllocator* memory_allocator,
const GpuAsmOpts& gpu_compilation_opts,
int64_t memory_limit, int64_t redzone_size,
uint8_t redzone_pattern)
: device_ordinal_(stream->parent()->device_ordinal()),
stream_(stream),
memory_limit_(memory_limit),
redzone_size_(RoundUpToNearest(
redzone_size,
static_cast<int64_t>(tsl::Allocator::kAllocatorAlignment))),
redzone_pattern_(redzone_pattern),
memory_allocator_(memory_allocator),
gpu_compilation_opts_(gpu_compilation_opts) {}
absl::StatusOr<DeviceMemory<uint8_t>> RedzoneAllocator::AllocateBytes(
int64_t byte_size) {
CHECK_GE(byte_size, 0) << "byte_size must be positive.";
if (byte_size > GetMemoryLimitInBytes()) {
return absl::ResourceExhaustedError(absl::StrFormat(
"Allocating %d bytes exceeds the memory limit of %d bytes.", byte_size,
GetMemoryLimitInBytes()));
}
int64_t rhs_slop = RoundUpToNearest(byte_size, kRhsRedzoneAlign) - byte_size;
TF_ASSIGN_OR_RETURN(
OwningDeviceMemory allocated_buffer,
memory_allocator_->Allocate(device_ordinal_,
byte_size + 2 * redzone_size_ + rhs_slop,
false));
allocated_bytes_excluding_redzones_ += byte_size;
static_assert(sizeof(uint8_t) == 1, "Unexpected size");
DeviceMemory<uint8_t> allocated_buffer_memory(*allocated_buffer);
DeviceMemory<uint8_t> lhs_redzone =
allocated_buffer_memory.GetSlice(0, redzone_size_);
DeviceMemory<uint8_t> data_chunk =
allocated_buffer_memory.GetSlice(redzone_size_, byte_size);
DeviceMemory<uint8_t> rhs_redzone_slop =
allocated_buffer_memory.GetSlice(redzone_size_ + byte_size, rhs_slop);
DeviceMemory<uint8_t> rhs_redzone_nonslop = allocated_buffer_memory.GetSlice(
redzone_size_ + byte_size + rhs_slop, redzone_size_);
uint8_t pattern_arr[] = {redzone_pattern_, redzone_pattern_, redzone_pattern_,
redzone_pattern_};
uint32_t pattern32;
std::memcpy(&pattern32, pattern_arr, sizeof(pattern32));
TF_RETURN_IF_ERROR(stream_->Memset32(&lhs_redzone, pattern32, redzone_size_));
if (rhs_slop != 0) {
TF_RETURN_IF_ERROR(
stream_->Memcpy(&rhs_redzone_slop, &pattern32, rhs_slop));
}
TF_RETURN_IF_ERROR(
stream_->Memset32(&rhs_redzone_nonslop, pattern32, redzone_size_));
allocated_buffers_.emplace_back(std::move(allocated_buffer), byte_size);
return data_chunk;
}
static absl::StatusOr<RedzoneCheckStatus> CheckRedzoneHost(
DeviceMemoryBase redzone, DeviceMemoryBase user_allocation,
absl::string_view name, Stream* stream, uint8_t redzone_pattern) {
uint64_t size = redzone.size();
auto redzone_data = std::make_unique<uint8_t[]>(size);
TF_RETURN_IF_ERROR(stream->Memcpy(redzone_data.get(), redzone, size));
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
std::array<uint8_t, sizeof(uint64_t)> pattern_arr;
pattern_arr.fill(redzone_pattern);
uint64_t pattern64;
std::memcpy(&pattern64, pattern_arr.data(), sizeof(uint64_t));
int64_t i;
for (i = 0; i + 7 < size; i += sizeof(uint64_t)) {
uint64_t rz_value = *reinterpret_cast<uint64_t*>(&redzone_data[i]);
if (rz_value != pattern64) {
return RedzoneCheckStatus(name, user_allocation.opaque(), i, pattern64,
rz_value);
}
}
for (; i < size; ++i) {
uint8_t rz_value = redzone_data[i];
if (rz_value != redzone_pattern) {
return RedzoneCheckStatus(name, user_allocation.opaque(), i,
redzone_pattern, rz_value);
}
}
return RedzoneCheckStatus::OK();
}
static absl::Status RunRedzoneChecker(
Stream* stream, const DeviceMemory<uint8_t>& redzone,
uint8_t redzone_pattern, const DeviceMemory<uint64_t>& out_param,
const ComparisonKernel& comparison_kernel) {
StreamExecutor* executor = stream->parent();
if (redzone.size() == 0) {
return absl::OkStatus();
}
int64_t num_elements = redzone.size();
int64_t threads_per_block = std::min(
executor->GetDeviceDescription().threads_per_block_limit(), num_elements);
int64_t block_count =
tsl::MathUtil::CeilOfRatio(num_elements, threads_per_block);
TF_RETURN_IF_ERROR(stream->ThenLaunch(
ThreadDim(threads_per_block), BlockDim(block_count), comparison_kernel,
redzone, redzone_pattern, redzone.size(), out_param));
return absl::OkStatus();
}
static absl::Status ReinitializeRedzone(Stream* stream,
DeviceMemoryBase redzone,
uint8_t redzone_pattern) {
absl::FixedArray<uint8_t> redzone_array(redzone.size());
redzone_array.fill(redzone_pattern);
TF_RETURN_IF_ERROR(
stream->Memcpy(&redzone, redzone_array.data(), redzone.size()));
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
return absl::OkStatus();
}
static absl::StatusOr<RedzoneCheckStatus> CheckRedzonesForBuffer(
Stream* stream, DeviceMemoryBase memory,
const DeviceMemory<uint64_t>& out_param,
const ComparisonKernel& comparison_kernel, int64_t user_allocation_size,
uint64_t redzone_size, uint8_t redzone_pattern) {
int64_t rhs_slop =
RoundUpToNearest<int64_t>(user_allocation_size, kRhsRedzoneAlign) -
user_allocation_size;
CHECK_EQ(memory.size(), user_allocation_size + rhs_slop + 2 * redzone_size);
DeviceMemory<uint8_t> buffer_uint8(memory);
DeviceMemory<uint8_t> lhs_redzone =
buffer_uint8.GetSlice(0,
redzone_size);
DeviceMemory<uint8_t> user_allocation =
buffer_uint8.GetSlice(redzone_size,
user_allocation_size);
DeviceMemory<uint8_t> rhs_redzone =
buffer_uint8.GetSlice(redzone_size + user_allocation_size,
redzone_size + rhs_slop);
TF_RETURN_IF_ERROR(RunRedzoneChecker(stream, lhs_redzone, redzone_pattern,
out_param, comparison_kernel));
TF_RETURN_IF_ERROR(RunRedzoneChecker(stream, rhs_redzone, redzone_pattern,
out_param, comparison_kernel));
int64_t result;
CHECK_EQ(out_param.size(), sizeof(result));
TF_RETURN_IF_ERROR(stream->Memcpy(&result, out_param, sizeof(result)));
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
if (result != 0) {
TF_ASSIGN_OR_RETURN(RedzoneCheckStatus lhs_check,
CheckRedzoneHost(lhs_redzone, user_allocation, "LHS",
stream, redzone_pattern));
TF_ASSIGN_OR_RETURN(RedzoneCheckStatus rhs_check,
CheckRedzoneHost(rhs_redzone, user_allocation, "RHS",
stream, redzone_pattern));
CHECK(!lhs_check.ok() || !rhs_check.ok())
<< "Mismatched results with host and device comparison";
TF_RETURN_IF_ERROR(
ReinitializeRedzone(stream, lhs_redzone, redzone_pattern));
TF_RETURN_IF_ERROR(
ReinitializeRedzone(stream, rhs_redzone, redzone_pattern));
return !lhs_check.ok() ? lhs_check : rhs_check;
}
return RedzoneCheckStatus::OK();
}
absl::StatusOr<RedzoneCheckStatus> RedzoneAllocator::CheckRedzones() const {
StreamExecutor* executor = stream_->parent();
TF_ASSIGN_OR_RETURN(
const ComparisonKernel* kernel,
GetComparisonKernel(stream_->parent(), gpu_compilation_opts_));
stream_executor::DeviceMemoryHandle out_param(
executor, executor->AllocateScalar<uint64_t>());
TF_RETURN_IF_ERROR(
stream_->MemZero(out_param.memory_ptr(), sizeof(uint64_t)));
for (const auto& buf_and_size : allocated_buffers_) {
TF_ASSIGN_OR_RETURN(
RedzoneCheckStatus redzone_status,
CheckRedzonesForBuffer(stream_, *buf_and_size.first,
DeviceMemory<uint64_t>(out_param.memory()),
*kernel, buf_and_size.second, redzone_size_,
redzone_pattern_));
if (!redzone_status.ok()) {
return redzone_status;
}
}
return RedzoneCheckStatus::OK();
}
std::string RedzoneCheckStatus::RedzoneFailureMsg() const {
return absl::StrFormat(
"Redzone mismatch in %s redzone of buffer %p at offset %d; "
"expected %08x but was %08x.",
buffer_name, user_buffer_address, offset, expected_value, actual_value);
}
} | #include "xla/stream_executor/gpu/redzone_allocator.h"
#include <cstdint>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/gpu/gpu_asm_opts.h"
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace stream_executor {
namespace gpu {
using RedzoneCheckStatus = RedzoneAllocator::RedzoneCheckStatus;
static void EXPECT_REDZONE_OK(absl::StatusOr<RedzoneCheckStatus> status) {
EXPECT_TRUE(status.ok());
EXPECT_TRUE(status.value().ok());
}
static void EXPECT_REDZONE_VIOLATION(
absl::StatusOr<RedzoneCheckStatus> status) {
EXPECT_TRUE(status.ok());
EXPECT_FALSE(status.value().ok());
}
TEST(RedzoneAllocatorTest, WriteToRedzone) {
constexpr int64_t kRedzoneSize = 1 << 23;
constexpr uint8_t kRedzonePattern = 0x7e;
constexpr int64_t kAllocSize = (1 << 25) + 1;
Platform* platform =
PlatformManager::PlatformWithName(GpuPlatformName()).value();
StreamExecutor* stream_exec = platform->ExecutorForDevice(0).value();
GpuAsmOpts opts;
StreamExecutorMemoryAllocator se_allocator(platform, {stream_exec});
TF_ASSERT_OK_AND_ASSIGN(auto stream, stream_exec->CreateStream());
RedzoneAllocator allocator(stream.get(), &se_allocator, opts,
(1LL << 32),
kRedzoneSize,
kRedzonePattern);
TF_ASSERT_OK_AND_ASSIGN(DeviceMemory<uint8_t> buf,
allocator.AllocateBytes(kAllocSize));
EXPECT_REDZONE_OK(allocator.CheckRedzones());
char* buf_addr = reinterpret_cast<char*>(buf.opaque());
DeviceMemoryBase lhs_redzone(buf_addr - kRedzoneSize, kRedzoneSize);
DeviceMemoryBase rhs_redzone(buf_addr + kAllocSize, kRedzoneSize);
auto check_redzone = [&](DeviceMemoryBase redzone, absl::string_view name) {
std::vector<uint8_t> host_buf(kRedzoneSize);
TF_ASSERT_OK(stream->Memcpy(host_buf.data(), redzone, kRedzoneSize));
TF_ASSERT_OK(stream->BlockHostUntilDone());
const int64_t kMaxMismatches = 16;
int64_t mismatches = 0;
for (int64_t i = 0; i < host_buf.size(); ++i) {
if (mismatches == kMaxMismatches) {
ADD_FAILURE() << "Hit max number of mismatches; skipping others.";
break;
}
if (host_buf[i] != kRedzonePattern) {
++mismatches;
EXPECT_EQ(host_buf[i], kRedzonePattern)
<< "at index " << i << " of " << name << " redzone";
}
}
};
check_redzone(lhs_redzone, "lhs");
check_redzone(rhs_redzone, "rhs");
auto modify_redzone = [&](DeviceMemoryBase redzone, int64_t offset,
absl::string_view name) {
SCOPED_TRACE(absl::StrCat(name, ", offset=", offset));
DeviceMemoryBase redzone_at_offset(
reinterpret_cast<char*>(redzone.opaque()) + offset, 1);
char old_redzone_value = 0;
{ EXPECT_REDZONE_OK(allocator.CheckRedzones()); }
TF_ASSERT_OK(stream->Memcpy(&old_redzone_value, redzone_at_offset, 1));
TF_ASSERT_OK(stream->MemZero(&redzone_at_offset, 1));
EXPECT_REDZONE_VIOLATION(allocator.CheckRedzones());
EXPECT_REDZONE_OK(allocator.CheckRedzones());
};
modify_redzone(lhs_redzone, 0, "lhs");
modify_redzone(lhs_redzone, kRedzoneSize - 1, "lhs");
modify_redzone(rhs_redzone, 0, "rhs");
modify_redzone(rhs_redzone, kRedzoneSize - 1, "rhs");
}
TEST(RedzoneAllocatorTest, VeryLargeRedzone) {
constexpr int64_t kRedzoneSize = 65535 * 1024 + 1;
Platform* platform =
PlatformManager::PlatformWithName(GpuPlatformName()).value();
StreamExecutor* stream_exec = platform->ExecutorForDevice(0).value();
GpuAsmOpts opts;
StreamExecutorMemoryAllocator se_allocator(platform, {stream_exec});
TF_ASSERT_OK_AND_ASSIGN(auto stream, stream_exec->CreateStream());
RedzoneAllocator allocator(stream.get(), &se_allocator, opts,
(1LL << 32),
kRedzoneSize,
-1);
(void)allocator.AllocateBytes(1);
EXPECT_REDZONE_OK(allocator.CheckRedzones());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/redzone_allocator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/redzone_allocator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
85cb7bf6-0fd4-4357-9ecf-51f39358593e | cpp | tensorflow/tensorflow | scoped_activate_context | third_party/xla/xla/stream_executor/gpu/scoped_activate_context.cc | third_party/xla/xla/stream_executor/gpu/scoped_activate_context_test.cc | #include "xla/stream_executor/gpu/scoped_activate_context.h"
#include "absl/log/check.h"
#include "xla/stream_executor/gpu/context.h"
#include "xla/stream_executor/gpu/gpu_executor.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/logging.h"
namespace stream_executor::gpu {
namespace {
thread_local struct ThreadLocalData {
Context* context;
int device_ordinal;
int depth;
} tls_data = {};
}
ScopedActivateContext::ScopedActivateContext(GpuExecutor* gpu_executor)
: ScopedActivateContext(gpu_executor->gpu_context()) {}
ScopedActivateContext::ScopedActivateContext(StreamExecutor* executor)
: ScopedActivateContext(ExtractGpuExecutor(executor)) {}
ScopedActivateContext::ScopedActivateContext(gpu::Context* gpu_context) {
auto* tls = &tls_data;
if (tls->depth == 0) {
VLOG(3) << "ScopedActivateContext switching to "
<< gpu_context->device_ordinal();
gpu_context->SetActive();
tls->depth = 1;
tls->device_ordinal = gpu_context->device_ordinal();
tls->context = gpu_context;
to_restore_ = nullptr;
return;
}
tls->depth++;
if (tls->device_ordinal == gpu_context->device_ordinal()) {
DCHECK(gpu_context->IsActive());
return;
}
VLOG(3) << "ScopedActivateContext switching context from "
<< tls->device_ordinal << " to " << gpu_context->device_ordinal();
to_restore_ = tls->context;
gpu_context->SetActive();
tls->device_ordinal = gpu_context->device_ordinal();
tls->context = gpu_context;
}
ScopedActivateContext::~ScopedActivateContext() {
auto* tls = &tls_data;
tls->depth--;
DCHECK_GE(tls->depth, 0);
if (to_restore_ == nullptr) {
return;
}
to_restore_->SetActive();
tls->device_ordinal = to_restore_->device_ordinal();
tls->context = to_restore_;
}
} | #include "xla/stream_executor/gpu/scoped_activate_context.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/stream_executor/gpu/mock_context.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
using testing::Return;
namespace stream_executor::gpu {
namespace {
TEST(ScopedActivateContextTest, SetsActiveOnceForSameContextWorks) {
MockContext context;
EXPECT_CALL(context, SetActive).Times(1);
EXPECT_CALL(context, device_ordinal).WillRepeatedly(Return(1));
EXPECT_CALL(context, IsActive).WillRepeatedly(Return(true));
{
ScopedActivateContext scoped_activate_context1(&context);
{ ScopedActivateContext scoped_activate_context2(&context); }
}
}
TEST(ScopedActivateContextTest, TwoDifferentContextsWorks) {
MockContext context1;
EXPECT_CALL(context1, SetActive).Times(2);
EXPECT_CALL(context1, device_ordinal).WillRepeatedly(Return(1));
EXPECT_CALL(context1, IsActive).WillRepeatedly(Return(true));
MockContext context2;
EXPECT_CALL(context2, SetActive).Times(1);
EXPECT_CALL(context2, device_ordinal).WillRepeatedly(Return(2));
EXPECT_CALL(context2, IsActive).WillRepeatedly(Return(true));
{
ScopedActivateContext scoped_activate_context1(&context1);
{ ScopedActivateContext scoped_activate_context2(&context2); }
}
}
TEST(ScopedActivateContextTest, TwoThreadsBothSetActiveButDontRestore) {
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "test", 2);
thread_pool.Schedule([&]() {
MockContext context1;
EXPECT_CALL(context1, SetActive).Times(1);
EXPECT_CALL(context1, device_ordinal).WillRepeatedly(Return(1));
EXPECT_CALL(context1, IsActive).Times(0);
ScopedActivateContext scoped_activate_context1(&context1);
});
thread_pool.Schedule([&]() {
MockContext context2;
EXPECT_CALL(context2, SetActive).Times(1);
EXPECT_CALL(context2, device_ordinal).WillRepeatedly(Return(1));
EXPECT_CALL(context2, IsActive).Times(0);
ScopedActivateContext scoped_activate_context2(&context2);
});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/scoped_activate_context.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/scoped_activate_context_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3d22aa2e-b468-4a33-978f-ded15f042652 | cpp | tensorflow/tensorflow | gpu_command_buffer | third_party/xla/xla/stream_executor/gpu/gpu_command_buffer.cc | third_party/xla/xla/stream_executor/gpu/gpu_command_buffer_test.cc | #include "xla/stream_executor/gpu/gpu_command_buffer.h"
#include <array>
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#endif
#include "absl/container/inlined_vector.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "xla/stream_executor/command_buffer.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/gpu/gpu_executor.h"
#include "xla/stream_executor/gpu/gpu_kernel.h"
#include "xla/stream_executor/gpu/gpu_stream.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/typed_kernel_factory.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/path.h"
#include "tsl/platform/statusor.h"
namespace stream_executor::gpu {
absl::StatusOr<MultiKernelLoaderSpec> GetSetIfConditionKernelLoaderSpec();
absl::StatusOr<MultiKernelLoaderSpec> GetSetIfElseConditionKernelLoaderSpec();
absl::StatusOr<MultiKernelLoaderSpec> GetSetCaseConditionKernelLoaderSpec();
absl::StatusOr<MultiKernelLoaderSpec> GetSetForConditionKernelLoaderSpec();
absl::StatusOr<MultiKernelLoaderSpec> GetSetWhileConditionKernelLoaderSpec();
absl::StatusOr<MultiKernelLoaderSpec> GetNoOpKernelLoaderSpec();
using Mode = CommandBuffer::Mode;
using State = CommandBuffer::State;
std::string_view to_string(State state) {
switch (state) {
case State::kCreate:
return "create";
case State::kUpdate:
return "update";
case State::kFinalized:
return "finalized";
}
}
absl::Status UnsupportedStateError(State state) {
return absl::InternalError(
absl::StrCat("Unsupported command buffer state: ", to_string(state)));
}
static std::atomic<int64_t> allocated_execs(0);
static std::atomic<int64_t> alive_execs(0);
static int64_t NotifyExecCreated() {
alive_execs.fetch_add(1, std::memory_order_relaxed);
return allocated_execs.fetch_add(1, std::memory_order_relaxed);
}
static int64_t NotifyExecDestroyed() {
DCHECK_GE(alive_execs.load(std::memory_order_relaxed), 1);
return alive_execs.fetch_sub(1, std::memory_order_relaxed) - 1;
}
int64_t GpuCommandBuffer::AliveExecs() {
return alive_execs.load(std::memory_order_relaxed);
}
static std::string_view ModeToString(CommandBuffer::Mode mode) {
switch (mode) {
case CommandBuffer::Mode::kPrimary:
return "primary";
case CommandBuffer::Mode::kNested:
return "nested";
}
}
GpuCommandBuffer::GpuCommandBuffer(Mode mode, GpuExecutor* parent,
GpuGraphHandle graph, bool is_owned_graph)
: mode_(mode),
parent_(parent),
graph_(graph),
is_owned_graph_(is_owned_graph) {
VLOG(5) << "Created command buffer for graph " << graph_
<< "; mode=" << ModeToString(mode)
<< "; is_owned_graph=" << is_owned_graph_;
execution_scopes_.try_emplace(kDefaulExecutionScope);
}
GpuCommandBuffer::~GpuCommandBuffer() {
if (exec_ != nullptr && is_owned_graph_exec_) {
VLOG(5) << "Destroy GPU command buffer executable graph " << exec_ << " "
<< "(remaining alive executable graphs: " << NotifyExecDestroyed()
<< ")";
if (auto status = GpuDriver::DestroyGraphExec(exec_); !status.ok()) {
LOG(ERROR) << "Failed to destroy GPU graph exec: " << status.message();
}
}
if (graph_ != nullptr && is_owned_graph_) {
if (auto status = GpuDriver::DestroyGraph(graph_); !status.ok()) {
LOG(ERROR) << "Failed to destroy GPU graph: " << status.message();
}
}
}
GpuCommandBuffer::ScopedGpuGraphExec::ScopedGpuGraphExec(
GpuCommandBuffer* cmd_buffer, GpuGraphExecHandle exec)
: cmd_buffer(cmd_buffer),
restore(cmd_buffer->exec_),
restore_is_owned(cmd_buffer->is_owned_graph_exec_) {
cmd_buffer->exec_ = exec;
cmd_buffer->is_owned_graph_exec_ = false;
}
GpuCommandBuffer::ScopedGpuGraphExec::~ScopedGpuGraphExec() {
cmd_buffer->exec_ = restore;
cmd_buffer->is_owned_graph_exec_ = restore_is_owned;
}
static GpuDevicePtr AsDevicePtr(const DeviceMemoryBase& mem) {
return reinterpret_cast<GpuDevicePtr>(const_cast<void*>(mem.opaque()));
}
absl::Status GpuCommandBuffer::Trace(
Stream* stream, absl::AnyInvocable<absl::Status()> function) {
TF_RETURN_IF_ERROR(CheckNotFinalized());
#if defined(TENSORFLOW_USE_ROCM)
TF_ASSIGN_OR_RETURN(size_t count, GpuDriver::GraphGetNodeCount(graph_));
if (count != 0 || !is_owned_graph_)
return absl::InternalError(
"Stream can't be traced on non empty command buffer");
#endif
VLOG(5) << "Trace into GPU command buffer graph " << graph_
<< " on a stream: " << stream;
auto gpu_stream = AsGpuStreamValue(stream);
uint64_t start_nanos = tsl::Env::Default()->NowNanos();
#if !defined(TENSORFLOW_USE_ROCM)
TF_RETURN_IF_ERROR(GpuDriver::StreamBeginCaptureToGraph(
gpu_stream, graph_, GpuDriver::StreamCaptureMode::kThreadLocal));
#else
TF_RETURN_IF_ERROR(GpuDriver::StreamBeginCapture(
gpu_stream, GpuDriver::StreamCaptureMode::kThreadLocal));
#endif
auto traced = function();
GpuGraphHandle captured_graph;
TF_RETURN_IF_ERROR(GpuDriver::StreamEndCapture(gpu_stream, &captured_graph));
#if !defined(TENSORFLOW_USE_ROCM)
DCHECK(captured_graph == graph_) << "Stream capture should update graph_";
#else
TF_RETURN_IF_ERROR(
GpuDriver::DestroyGraph(std::exchange(graph_, captured_graph)));
#endif
uint64_t end_nanos = tsl::Env::Default()->NowNanos();
if (!traced.ok())
return absl::InternalError(
absl::StrCat("Failed to capture gpu graph: ", traced.message()));
VLOG(5) << "Traced into the GPU command buffer graph " << graph_ << " (took "
<< (end_nanos - start_nanos) / 1000 << " μs)";
return absl::OkStatus();
}
GpuCommandBuffer::Dependencies GpuCommandBuffer::GetBarrier(
ExecutionScopeId execution_scope_id) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
return execution_scope.barriers.empty()
? Dependencies{}
: Dependencies{execution_scope.barriers.back().handle};
}
absl::StatusOr<GpuCommandBuffer::SetIfConditionKernel*>
GpuCommandBuffer::GetSetIfConditionKernel() {
if (!set_if_condition_kernel_) {
TF_ASSIGN_OR_RETURN(auto spec, GetSetIfConditionKernelLoaderSpec());
TF_ASSIGN_OR_RETURN(
set_if_condition_kernel_,
SetIfConditionKernel::FactoryType::Create(parent_, spec));
}
return &set_if_condition_kernel_;
}
absl::StatusOr<GpuCommandBuffer::SetIfElseConditionKernel*>
GpuCommandBuffer::GetSetIfElseConditionKernel() {
if (!set_if_else_condition_kernel_) {
TF_ASSIGN_OR_RETURN(auto spec, GetSetIfElseConditionKernelLoaderSpec());
TF_ASSIGN_OR_RETURN(
set_if_else_condition_kernel_,
SetIfElseConditionKernel::FactoryType::Create(parent_, spec));
}
return &set_if_else_condition_kernel_;
}
absl::StatusOr<GpuCommandBuffer::SetCaseConditionKernel*>
GpuCommandBuffer::GetSetCaseConditionKernel() {
if (!set_case_condition_kernel_) {
TF_ASSIGN_OR_RETURN(auto spec, GetSetCaseConditionKernelLoaderSpec());
TF_ASSIGN_OR_RETURN(
set_case_condition_kernel_,
SetCaseConditionKernel::FactoryType::Create(parent_, spec));
}
return &set_case_condition_kernel_;
}
absl::StatusOr<GpuCommandBuffer::SetForConditionKernel*>
GpuCommandBuffer::GetSetForConditionKernel() {
if (!set_for_condition_kernel_) {
TF_ASSIGN_OR_RETURN(auto spec, GetSetForConditionKernelLoaderSpec());
TF_ASSIGN_OR_RETURN(
set_for_condition_kernel_,
SetForConditionKernel::FactoryType::Create(parent_, spec));
}
return &set_for_condition_kernel_;
}
absl::StatusOr<GpuCommandBuffer::SetWhileConditionKernel*>
GpuCommandBuffer::GetSetWhileConditionKernel() {
if (!set_while_condition_kernel_) {
TF_ASSIGN_OR_RETURN(auto spec, GetSetWhileConditionKernelLoaderSpec());
TF_ASSIGN_OR_RETURN(
set_while_condition_kernel_,
SetWhileConditionKernel::FactoryType::Create(parent_, spec));
}
return &set_while_condition_kernel_;
}
absl::StatusOr<GpuCommandBuffer::NoOpKernel*>
GpuCommandBuffer::GetNoOpKernel() {
if (!noop_kernel_) {
TF_ASSIGN_OR_RETURN(auto spec, GetNoOpKernelLoaderSpec());
TF_ASSIGN_OR_RETURN(noop_kernel_,
NoOpKernel::FactoryType::Create(parent_, spec));
}
return &noop_kernel_;
}
absl::Status GpuCommandBuffer::DisableBarriersExecution(
GpuGraphExecHandle exec) {
#if !defined(TENSORFLOW_USE_ROCM)
ExecutionScope& execution_scope = execution_scopes_[kDefaulExecutionScope];
for (GpuGraphBarrierInfo& barrier : execution_scope.barriers) {
if (barrier.is_barrier_node) {
TF_RETURN_IF_ERROR(
GpuDriver::GraphNodeSetEnabled(exec, barrier.handle, false));
}
}
for (ConditionalCommandBuffers& cmd_buffers :
execution_scope.conditional_command_buffers) {
for (auto& cmd_buffer : cmd_buffers.command_buffers) {
TF_RETURN_IF_ERROR(cmd_buffer->DisableBarriersExecution(exec));
}
}
#endif
return absl::OkStatus();
}
absl::Status GpuCommandBuffer::CheckNotFinalized() {
if (state_ == State::kFinalized)
return absl::InternalError(
"Command can't be added to a command buffer after it was finalized");
return absl::OkStatus();
}
absl::Status GpuCommandBuffer::CheckNumCommandBuffers(
const ConditionalCommandBuffers& cmd_buffers, size_t num_cmd_buffers) {
if (cmd_buffers.handles.size() != num_cmd_buffers) {
return absl::InternalError(absl::StrCat(
"Expected to have ", num_cmd_buffers,
" conditional command buffers, got ", cmd_buffers.handles.size()));
}
return absl::OkStatus();
}
absl::StatusOr<GpuGraphNodeHandle> GpuCommandBuffer::CreateBarrierNode(
const Dependencies& dependencies) {
GpuGraphNodeHandle barrier_handle = nullptr;
#if !defined(TENSORFLOW_USE_ROCM) && CUDA_VERSION < 12040
TF_ASSIGN_OR_RETURN(NoOpKernel * noop, GetNoOpKernel());
TF_RETURN_IF_ERROR(GpuDriver::GraphAddKernelNode(
&barrier_handle, graph_, dependencies, "noop",
AsGpuKernel(&**noop)->gpu_function(), 1, 1, 1, 1, 1, 1, 0,
nullptr, nullptr));
#else
TF_RETURN_IF_ERROR(
GpuDriver::GraphAddEmptyNode(&barrier_handle, graph_, dependencies));
#endif
return barrier_handle;
}
GpuCommandBuffer::Dependencies GpuCommandBuffer::GetBarrierDependencies(
ExecutionScopeId execution_scope_id) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
auto& barriers = execution_scope.barriers;
Dependencies dependencies;
for (size_t i = barriers.empty() ? 0 : barriers.back().nodes_offset;
i < execution_scope.nodes.size(); ++i) {
dependencies.push_back(execution_scope.nodes[i].handle);
}
return dependencies;
}
absl::Status GpuCommandBuffer::Barrier(ExecutionScopeId execution_scope_id) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
if (state_ == State::kCreate) {
size_t nodes_offset = execution_scope.nodes.size();
Dependencies dependencies = GetBarrierDependencies(execution_scope_id);
if (dependencies.empty() && !execution_scope.barriers.empty()) {
execution_scope.barriers.push_back({execution_scope.barriers.back()});
return absl::OkStatus();
}
if (dependencies.size() == 1) {
execution_scope.barriers.push_back(
{execution_scope.nodes.back().handle, false, nodes_offset});
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(auto barrier_handle, CreateBarrierNode(dependencies));
execution_scope.barriers.push_back({barrier_handle, true, nodes_offset});
return absl::OkStatus();
}
if (state_ == State::kUpdate) {
if (execution_scope.update_state.barrier_idx++ >=
execution_scope.barriers.size()) {
return absl::InternalError(
absl::StrFormat("Execution scope %d barrier index out of range",
execution_scope_id.value()));
}
return absl::OkStatus();
}
return UnsupportedStateError(state_);
}
absl::Status GpuCommandBuffer::Barrier(
absl::Span<const ExecutionScopeId> execution_scope_ids) {
if (execution_scope_ids.empty()) return absl::OkStatus();
if (execution_scope_ids.size() == 1) {
return Barrier(execution_scope_ids[0]);
}
for (ExecutionScopeId execution_scope_id : execution_scope_ids) {
TF_RETURN_IF_ERROR(Barrier(execution_scope_id));
}
if (state_ == State::kCreate) {
Dependencies dependencies;
for (ExecutionScopeId execution_scope_id : execution_scope_ids) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
dependencies.push_back(execution_scope.barriers.back().handle);
}
TF_ASSIGN_OR_RETURN(auto barrier_handle, CreateBarrierNode(dependencies));
for (ExecutionScopeId execution_scope_id : execution_scope_ids) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
size_t nodes_offset = execution_scope.nodes.size();
execution_scope.barriers.push_back({barrier_handle, true, nodes_offset});
}
return absl::OkStatus();
}
if (state_ == State::kUpdate) {
for (ExecutionScopeId execution_scope_id : execution_scope_ids) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
if (execution_scope.update_state.barrier_idx++ >=
execution_scope.barriers.size()) {
return absl::InternalError(
absl::StrFormat("Execution scope %d barrier index out of range",
execution_scope_id.value()));
}
}
return absl::OkStatus();
}
return UnsupportedStateError(state_);
}
absl::Status GpuCommandBuffer::Barrier(ExecutionScopeId from_execution_scope_id,
ExecutionScopeId to_execution_scope_id) {
if (from_execution_scope_id == to_execution_scope_id) {
return Barrier(from_execution_scope_id);
}
TF_RETURN_IF_ERROR(Barrier(from_execution_scope_id));
TF_RETURN_IF_ERROR(Barrier(to_execution_scope_id));
if (state_ == State::kCreate) {
Dependencies dependencies = {
execution_scopes_[from_execution_scope_id].barriers.back().handle,
execution_scopes_[to_execution_scope_id].barriers.back().handle};
TF_ASSIGN_OR_RETURN(auto barrier_handle, CreateBarrierNode(dependencies));
ExecutionScope& execution_scope = execution_scopes_[to_execution_scope_id];
size_t nodes_offset = execution_scope.nodes.size();
execution_scope.barriers.push_back({barrier_handle, true, nodes_offset});
return absl::OkStatus();
}
if (state_ == State::kUpdate) {
ExecutionScope& execution_scope = execution_scopes_[to_execution_scope_id];
if (execution_scope.update_state.barrier_idx++ >=
execution_scope.barriers.size()) {
return absl::InternalError(
absl::StrFormat("Execution scope %d barrier index out of range",
to_execution_scope_id.value()));
}
return absl::OkStatus();
}
return UnsupportedStateError(state_);
}
absl::Status GpuCommandBuffer::LaunchWithPackedArgs(
ExecutionScopeId execution_scope_id, const ThreadDim& threads,
const BlockDim& blocks, const Kernel& kernel,
const KernelArgsPackedArrayBase& packed_args) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
CHECK_EQ(kernel.Arity() + (packed_args.number_of_shared_bytes() > 0),
packed_args.number_of_arguments());
const GpuKernel* gpu_kernel = AsGpuKernel(&kernel);
GpuFunctionHandle gpu_func = gpu_kernel->gpu_function();
void** kernel_params =
const_cast<void**>(packed_args.argument_addresses().data());
if (state_ == State::kCreate) {
Dependencies barrier = GetBarrier(execution_scope_id);
GpuGraphNodeInfo& node_info = execution_scope.nodes.emplace_back();
return GpuDriver::GraphAddKernelNode(
&node_info.handle, graph_, barrier, kernel.name(), gpu_func, blocks.x,
blocks.y, blocks.z, threads.x, threads.y, threads.z,
packed_args.number_of_shared_bytes(), kernel_params, nullptr);
}
if (state_ == State::kUpdate) {
GpuGraphNodeHandle node =
execution_scope.nodes[execution_scope.update_state.node_idx++].handle;
return GpuDriver::GraphExecKernelNodeSetParams(
exec_, node, kernel.name(), gpu_func, blocks.x, blocks.y, blocks.z,
threads.x, threads.y, threads.z, packed_args.number_of_shared_bytes(),
kernel_params, nullptr);
}
return UnsupportedStateError(state_);
}
absl::Status GpuCommandBuffer::Launch(ExecutionScopeId execution_scope_id,
const ThreadDim& threads,
const BlockDim& blocks,
const Kernel& kernel,
const KernelArgs& args) {
TF_RETURN_IF_ERROR(CheckNotFinalized());
if (auto* packed = DynCast<KernelArgsPackedArrayBase>(&args)) {
return LaunchWithPackedArgs(execution_scope_id, threads, blocks, kernel,
*packed);
}
if (auto* device_mem = DynCast<KernelArgsDeviceMemoryArray>(&args)) {
auto& pack = kernel.args_packing();
if (!pack) {
return absl::InternalError(
"Kernel is missing a custom arguments packing function for device "
"memory arguments array");
}
TF_ASSIGN_OR_RETURN(auto packed, pack(kernel, *device_mem));
return LaunchWithPackedArgs(execution_scope_id, threads, blocks, kernel,
*packed);
}
return absl::InternalError("Unsupported kernel arguments type");
}
absl::Status GpuCommandBuffer::AddNestedCommandBuffer(
ExecutionScopeId execution_scope_id, const CommandBuffer& nested) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
TF_RETURN_IF_ERROR(CheckNotFinalized());
GpuGraphHandle child_graph = GpuCommandBuffer::Cast(&nested)->graph();
if (state_ == State::kCreate) {
Dependencies barrier = GetBarrier(execution_scope_id);
GpuGraphNodeInfo& node_info = execution_scope.nodes.emplace_back();
return GpuDriver::GraphAddChildNode(&node_info.handle, graph_, barrier,
child_graph);
}
if (state_ == State::kUpdate) {
GpuGraphNodeHandle node =
execution_scope.nodes[execution_scope.update_state.node_idx++].handle;
return GpuDriver::GraphExecChildNodeSetParams(exec_, node, child_graph);
}
return UnsupportedStateError(state_);
}
absl::Status GpuCommandBuffer::MemcpyDeviceToDevice(
ExecutionScopeId execution_scope_id, DeviceMemoryBase* dst,
const DeviceMemoryBase& src, uint64_t size) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
TF_RETURN_IF_ERROR(CheckNotFinalized());
if (state_ == State::kCreate) {
Dependencies barrier = GetBarrier(execution_scope_id);
GpuGraphNodeInfo& node_info = execution_scope.nodes.emplace_back();
return GpuDriver::GraphAddMemcpyD2DNode(
parent_->gpu_context(), &node_info.handle, graph_, barrier,
AsDevicePtr(*dst), AsDevicePtr(src), size);
}
if (state_ == State::kUpdate) {
GpuGraphNodeHandle node =
execution_scope.nodes[execution_scope.update_state.node_idx++].handle;
return GpuDriver::GraphExecMemcpyD2DNodeSetParams(
parent_->gpu_context(), exec_, node, AsDevicePtr(*dst),
AsDevicePtr(src), size);
}
return UnsupportedStateError(state_);
}
absl::Status GpuCommandBuffer::Memset(ExecutionScopeId execution_scope_id,
DeviceMemoryBase* dst,
CommandBuffer::BitPattern bit_pattern,
size_t num_elements) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
TF_RETURN_IF_ERROR(CheckNotFinalized());
if (state_ == State::kCreate) {
Dependencies barrier = GetBarrier(execution_scope_id);
GpuGraphNodeInfo& node_info = execution_scope.nodes.emplace_back();
return GpuDriver::GraphAddMemsetNode(
parent_->gpu_context(), &node_info.handle, graph_, barrier,
AsDevicePtr(*dst), bit_pattern, num_elements);
}
if (state_ == State::kUpdate) {
GpuGraphNodeHandle node =
execution_scope.nodes[execution_scope.update_state.node_idx++].handle;
return GpuDriver::GraphExecMemsetNodeSetParams(
parent_->gpu_context(), exec_, node, AsDevicePtr(*dst), bit_pattern,
num_elements);
}
return UnsupportedStateError(state_);
}
using ConditionalHandles = absl::Span<const GpuGraphConditionalHandle>;
GpuCommandBuffer::ConditionBuilder
GpuCommandBuffer::ToConditionBuilder(Builder builder) {
return [builder = std::move(builder)](CommandBuffer* cmd_buffer,
GpuGraphConditionalHandle) {
return builder(cmd_buffer);
};
}
absl::StatusOr<std::vector<GpuGraphConditionalHandle>>
GpuCommandBuffer::CreateConditionalHandles(size_t num_handles) {
std::vector<GpuGraphConditionalHandle> handles;
for (size_t i = 0; i < num_handles; ++i) {
TF_RETURN_IF_ERROR(GpuDriver::GraphConditionalHandleCreate(
&handles.emplace_back(), graph_, parent_->gpu_context(), 0, 0));
}
return handles;
}
absl::StatusOr<std::vector<std::unique_ptr<GpuCommandBuffer>>>
GpuCommandBuffer::CreateConditionalCommandBuffers(
absl::Span<const GpuGraphConditionalHandle> handles,
absl::Span<const GpuGraphHandle> graphs,
absl::Span<const ConditionBuilder> builders) {
std::vector<std::unique_ptr<GpuCommandBuffer>> cmd_buffers;
CommandBuffer::Mode nested = CommandBuffer::Mode::kNested;
bool is_owned_graph = false;
for (size_t i = 0; i < handles.size(); ++i) {
auto command_buffer = std::make_unique<GpuCommandBuffer>(
nested, parent_, graphs[i], is_owned_graph);
TF_RETURN_IF_ERROR(builders[i](command_buffer.get(), handles[i]));
TF_RETURN_IF_ERROR(command_buffer->Finalize());
cmd_buffers.push_back(std::move(command_buffer));
}
return cmd_buffers;
}
absl::Status GpuCommandBuffer::UpdateConditionalCommandBuffers(
absl::Span<const GpuGraphConditionalHandle> handles,
absl::Span<const std::unique_ptr<GpuCommandBuffer>> command_buffers,
absl::Span<const ConditionBuilder> builders) {
for (size_t i = 0; i < command_buffers.size(); ++i) {
ScopedGpuGraphExec scoped_exec(command_buffers[i].get(), exec_);
TF_RETURN_IF_ERROR(command_buffers[i]->Update());
TF_RETURN_IF_ERROR(builders[i](command_buffers[i].get(), handles[i]));
TF_RETURN_IF_ERROR(command_buffers[i]->Finalize());
}
return absl::OkStatus();
}
absl::StatusOr<std::vector<GpuGraphHandle>>
GpuCommandBuffer::CreateConditionalNodes(
ExecutionScopeId execution_scope_id, ConditionType type,
absl::Span<const GpuGraphConditionalHandle> handles) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
std::vector<GpuGraphHandle> conditional_graphs;
using ConditionalParams = GpuDriver::GpuGraphConditionalNodeParams;
using ConditionalResult = GpuDriver::GpuGraphConditionalNodeParams::Result;
for (GpuGraphConditionalHandle handle : handles) {
Dependencies barrier = GetBarrier(execution_scope_id);
GpuGraphNodeInfo& node_info = execution_scope.nodes.emplace_back();
ConditionalParams params;
params.type = type;
params.handle = handle;
params.context = parent_->gpu_context();
TF_ASSIGN_OR_RETURN(
GpuDriver::GpuGraphNodeResult result,
GpuDriver::GraphAddNode(&node_info.handle, graph_, barrier, params));
conditional_graphs.push_back(std::get<ConditionalResult>(result).graph);
}
return conditional_graphs;
}
absl::Status GpuCommandBuffer::CreateConditionalCommand(
ExecutionScopeId execution_scope_id, ConditionType type,
SetConditionFn set_condition, absl::Span<const ConditionBuilder> builders) {
ExecutionScope& execution_scope = execution_scopes_[execution_scope_id];
TF_RETURN_IF_ERROR(CheckNotFinalized());
size_t num_handles = builders.size();
if (state_ == State::kCreate) {
TF_ASSIGN_OR_RETURN(auto handles, CreateConditionalHandles(num_handles));
TF_RETURN_IF_ERROR(set_condition(execution_scope_id, handles));
TF_RETURN_IF_ERROR(Barrier(execution_scope_id));
TF_ASSIGN_OR_RETURN(
auto graphs, CreateConditionalNodes(execution_scope_id, type, handles));
TF_ASSIGN_OR_RETURN(auto cmd_buffers, CreateConditionalCommandBuffers(
handles, graphs, builders));
execution_scope.conditional_command_buffers.push_back(
{std::move(handles), std::move(cmd_buffers)});
return absl::OkStatus();
}
if (state_ == State::kUpdate) {
ConditionalCommandBuffers& cond_cmd_buffers =
execution_scope.conditional_command_buffers[execution_scope.update_state
.conditional_idx++];
TF_RETURN_IF_ERROR(CheckNumCommandBuffers(cond_cmd_buffers, num_handles));
TF_RETURN_IF_ERROR(
set_condition(execution_scope_id, cond_cmd_buffers.handles));
TF_RETURN_IF_ERROR(Barrier(execution_scope_id));
execution_scope.update_state.node_idx += num_handles;
return UpdateConditionalCommandBuffers(
cond_cmd_buffers.handles,
absl::MakeSpan(cond_cmd_buffers.command_buffers), builders);
}
return UnsupportedStateError(state_);
}
absl::Status GpuCommandBuffer::If(ExecutionScopeId execution_scope_id,
DeviceMemory<bool> predicate,
Builder then_builder) {
TF_ASSIGN_OR_RETURN(SetIfConditionKernel * set_if_condition,
GetSetIfConditionKernel());
auto set_cond_fn = [&](ExecutionScopeId id, ConditionalHandles handles) {
return CommandBuffer::Launch(*set_if_condition, id, ThreadDim(), BlockDim(),
handles[0], predicate);
};
std::array<ConditionBuilder, 1> builders = {
ToConditionBuilder(std::move(then_builder))};
return CreateConditionalCommand(execution_scope_id, ConditionType::kIf,
set_cond_fn, builders);
}
absl::Status GpuCommandBuffer::IfElse(ExecutionScopeId execution_scope_id,
DeviceMemory<bool> predicate,
Builder then_builder,
Builder else_builder) {
TF_ASSIGN_OR_RETURN(SetIfElseConditionKernel * set_if_else_condition,
GetSetIfElseConditionKernel());
auto set_cond_fn = [&](ExecutionScopeId id, ConditionalHandles handles) {
return CommandBuffer::Launch(*set_if_else_condition, id, ThreadDim(),
BlockDim(), handles[0], handles[1], predicate);
};
std::array<ConditionBuilder, 2> builders = {
ToConditionBuilder(std::move(then_builder)),
ToConditionBuilder(std::move(else_builder))};
return CreateConditionalCommand(execution_scope_id, ConditionType::kIf,
set_cond_fn, builders);
}
absl::Status GpuCommandBuffer::Case(ExecutionScopeId execution_scope_id,
DeviceMemory<int32_t> index,
std::vector<Builder> branches) {
TF_ASSIGN_OR_RETURN(SetCaseConditionKernel * set_case_condition,
GetSetCaseConditionKernel());
constexpr size_t kBranchBatchSize = 8;
int32_t batch_offset = 0;
while (batch_offset < branches.size()) {
int32_t remaining_branches = branches.size() - batch_offset;
int32_t batch_size;
bool enable_conditional_default;
if (remaining_branches <= kBranchBatchSize) {
batch_size = remaining_branches;
enable_conditional_default = true;
} else {
batch_size = kBranchBatchSize;
enable_conditional_default = false;
}
auto set_cond_fn = [&, batch_offset, enable_conditional_default](
ExecutionScopeId id, ConditionalHandles handles) {
int32_t num_handles = handles.size();
std::vector<GpuGraphConditionalHandle> padded_handles(handles.begin(),
handles.end());
padded_handles.resize(kBranchBatchSize);
return CommandBuffer::Launch(
*set_case_condition, id, ThreadDim(), BlockDim(), padded_handles[0],
padded_handles[1], padded_handles[2], padded_handles[3],
padded_handles[4], padded_handles[5], padded_handles[6],
padded_handles[7], index, batch_offset, num_handles,
enable_conditional_default);
};
absl::InlinedVector<ConditionBuilder, kBranchBatchSize> builders;
builders.reserve(batch_size);
for (int z = 0; z < batch_size; ++z) {
int branch_offset = z + batch_offset;
builders.push_back(
ToConditionBuilder(std::move(branches[branch_offset])));
}
TF_RETURN_IF_ERROR(CreateConditionalCommand(
execution_scope_id, ConditionType::kIf, set_cond_fn, builders));
batch_offset += batch_size;
}
return absl::OkStatus();
}
absl::Status GpuCommandBuffer::For(ExecutionScopeId execution_scope_id,
int32_t num_iteration,
DeviceMemory<int32_t> loop_counter,
Builder body_builder) {
TF_ASSIGN_OR_RETURN(SetForConditionKernel * set_for_condition,
GetSetForConditionKernel());
TF_RETURN_IF_ERROR(Memset(execution_scope_id, &loop_counter, uint32_t{0}, 1));
TF_RETURN_IF_ERROR(Barrier(execution_scope_id));
auto set_cond_fn = [&](ExecutionScopeId id, ConditionalHandles handles) {
return CommandBuffer::Launch(*set_for_condition, id, ThreadDim(),
BlockDim(), handles[0], loop_counter,
num_iteration);
};
auto body = [&](CommandBuffer* body, GpuGraphConditionalHandle handle) {
TF_RETURN_IF_ERROR(body_builder(body));
TF_RETURN_IF_ERROR(body->Barrier());
return body->Launch(*set_for_condition, ThreadDim(), BlockDim(), handle,
loop_counter, num_iteration);
};
std::array<ConditionBuilder, 1> builders = {std::move(body)};
return CreateConditionalCommand(execution_scope_id, ConditionType::kWhile,
set_cond_fn, builders);
}
absl::Status GpuCommandBuffer::While(ExecutionScopeId execution_scope_id,
DeviceMemory<bool> pred,
ExecutionScopeBuilder cond_builder,
Builder body_builder) {
TF_ASSIGN_OR_RETURN(SetWhileConditionKernel * set_while_condition,
GetSetWhileConditionKernel());
TF_RETURN_IF_ERROR(cond_builder(execution_scope_id, this));
TF_RETURN_IF_ERROR(Barrier(execution_scope_id));
auto set_cond_fn = [&](ExecutionScopeId id, ConditionalHandles handles) {
return CommandBuffer::Launch(*set_while_condition, id, ThreadDim(),
BlockDim(), handles[0], pred);
};
auto body = [&](CommandBuffer* body, GpuGraphConditionalHandle handle) {
TF_RETURN_IF_ERROR(body_builder(body));
TF_RETURN_IF_ERROR(body->Barrier());
TF_RETURN_IF_ERROR(cond_builder(kDefaulExecutionScope, body));
TF_RETURN_IF_ERROR(body->Barrier());
return body->Launch(*set_while_condition, ThreadDim(), BlockDim(), handle,
pred);
};
std::array<ConditionBuilder, 1> builders = {std::move(body)};
return CreateConditionalCommand(execution_scope_id, ConditionType::kWhile,
set_cond_fn, builders);
}
absl::Status GpuCommandBuffer::Finalize() {
TF_RETURN_IF_ERROR(CheckNotFinalized());
#if !defined(TENSORFLOW_USE_ROCM)
TF_ASSIGN_OR_RETURN(auto node_count, GpuDriver::GraphGetNodeCount(graph_));
if (node_count == 0) {
GpuGraphNodeHandle empty_node_handle = nullptr;
TF_ASSIGN_OR_RETURN(NoOpKernel * noop, GetNoOpKernel());
TF_RETURN_IF_ERROR(GpuDriver::GraphAddKernelNode(
&empty_node_handle, graph_, {}, "noop",
AsGpuKernel(&**noop)->gpu_function(), 1, 1, 1, 1, 1, 1, 0,
nullptr, nullptr));
}
#endif
if (state_ == State::kCreate && VLOG_IS_ON(10)) {
std::string path = tsl::io::GetTempFilename("dot");
auto printed = GpuDriver::GraphDebugDotPrint(
graph_, path.c_str(), VLOG_IS_ON(100));
if (VLOG_IS_ON(100) && printed.ok()) {
VLOG(100) << "Printed Gpu graph " << graph_ << " to: " << path << "\n"
<< *printed;
}
}
size_t num_nodes = 0, num_cond_cmd_buffers = 0;
for (auto& [_, execution_scope] : execution_scopes_) {
num_nodes += execution_scope.nodes.size();
num_cond_cmd_buffers += execution_scope.conditional_command_buffers.size();
}
if (mode_ == Mode::kPrimary && state_ == State::kCreate) {
GpuDriver::GraphInstantiateFlags flags;
uint64_t start_nanos = tsl::Env::Default()->NowNanos();
auto instantiated = GpuDriver::GraphInstantiate(&exec_, graph_, flags);
if (instantiated.code() == absl::StatusCode::kResourceExhausted) {
LOG(WARNING) << "Retry CUDA graph instantiation after OOM error"
<< "; execution_scopes: " << execution_scopes_.size()
<< "; nodes: " << num_nodes
<< "; conditionals: " << num_cond_cmd_buffers
<< "; alive executable graphs: " << AliveExecs();
TF_RETURN_IF_ERROR(parent_->TrimGraphMemory());
auto retry = GpuDriver::GraphInstantiate(&exec_, graph_, flags);
if (retry.code() == absl::StatusCode::kResourceExhausted) {
return absl::ResourceExhaustedError(absl::StrFormat(
"CUDA driver ran out of memory trying to instantiate CUDA graph "
"with %d nodes and %d conditionals (total of %d alive CUDA graphs "
"in the process). You can try to (a) Give more memory to CUDA "
"driver by reducing XLA_CLIENT_MEM_FRACTION (b) Disable "
"CUDA graph with 'XLA_FLAGS=--xla_gpu_enable_command_buffer=' "
"(empty set). Original error: %s",
num_nodes, num_cond_cmd_buffers, AliveExecs(), retry.message()));
} else {
TF_RETURN_IF_ERROR(retry);
}
} else {
TF_RETURN_IF_ERROR(instantiated);
}
uint64_t end_nanos = tsl::Env::Default()->NowNanos();
VLOG(5) << "Instantiated executable graph #" << NotifyExecCreated() << " "
<< exec_ << " in " << (end_nanos - start_nanos) / 1000 << " μs"
<< "; execution_scopes: " << execution_scopes_.size()
<< "; nodes: " << num_nodes
<< "; conditionals: " << num_cond_cmd_buffers
<< "; alive executable graphs: " << AliveExecs();
#if !defined(TENSORFLOW_USE_ROCM) && CUDA_VERSION < 12040
TF_RETURN_IF_ERROR(DisableBarriersExecution(exec_));
#endif
} else if (mode_ == Mode::kPrimary && state_ == State::kUpdate) {
VLOG(5) << "Finalize executable graph " << exec_ << " update #"
<< num_updates_++ << " "
<< "(alive executable graphs: " << AliveExecs() << ")";
} else if (mode_ == Mode::kNested) {
VLOG(5) << "Finalize nested command buffer without instantiating "
"executable graph";
}
state_ = State::kFinalized;
return absl::OkStatus();
}
absl::Status GpuCommandBuffer::Update() {
if (exec_ == nullptr) {
return absl::InternalError(
"Command buffer has to have a graph executable to be updated");
}
if (state_ != State::kFinalized) {
return absl::InternalError(
"Command buffer has to be finalized first before it can be updated");
}
VLOG(5) << "Begin " << (mode_ == Mode::kPrimary ? "primary" : "nested")
<< " command buffer update for executable graph " << exec_;
state_ = State::kUpdate;
for (auto& [_, execution_scope] : execution_scopes_) {
execution_scope.update_state = ExecutionScope::UpdateState();
}
return absl::OkStatus();
}
absl::Span<const GpuCommandBuffer::GpuGraphNodeInfo> GpuCommandBuffer::nodes(
ExecutionScopeId id) const {
if (auto it = execution_scopes_.find(id); it != execution_scopes_.end())
return it->second.nodes;
return {};
}
absl::Span<const GpuCommandBuffer::GpuGraphBarrierInfo>
GpuCommandBuffer::barriers(ExecutionScopeId id) const {
if (auto it = execution_scopes_.find(id); it != execution_scopes_.end())
return it->second.barriers;
return {};
}
absl::Status GpuCommandBuffer::Submit(Stream* stream) {
if (mode_ != CommandBuffer::Mode::kPrimary) {
return absl::InvalidArgumentError(
"Can't submit non-primary command buffer for execution");
}
VLOG(3) << "Launch command buffer executable graph " << exec_
<< " on a stream: " << stream;
return GpuDriver::GraphLaunch(exec_, AsGpuStreamValue(stream));
}
} | #include "xla/stream_executor/gpu/gpu_command_buffer.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "xla/service/platform_util.h"
#include "xla/stream_executor/command_buffer.h"
#include "xla/stream_executor/cuda/cuda_platform_id.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/gpu/gpu_test_kernels.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/rocm/rocm_platform_id.h"
#include "xla/stream_executor/semantic_version.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/trace_command_buffer_factory.h"
#include "xla/stream_executor/typed_kernel_factory.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace stream_executor::gpu {
using ExecutionScopeId = CommandBuffer::ExecutionScopeId;
static Platform* GpuPlatform() {
auto name = absl::AsciiStrToUpper(
xla::PlatformUtil::CanonicalPlatformName("gpu").value());
return PlatformManager::PlatformWithName(name).value();
}
static MultiKernelLoaderSpec GetAddI32KernelSpec() {
MultiKernelLoaderSpec spec(3);
spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
return spec;
}
using AddI32Kernel =
TypedKernelFactory<DeviceMemory<int32_t>, DeviceMemory<int32_t>,
DeviceMemory<int32_t>>;
using MulI32Kernel =
TypedKernelFactory<DeviceMemory<int32_t>, DeviceMemory<int32_t>,
DeviceMemory<int32_t>>;
using IncAndCmpKernel =
TypedKernelFactory<DeviceMemory<int32_t>, DeviceMemory<bool>, int32_t>;
using AddI32Ptrs3 = TypedKernelFactory<internal::Ptrs3<int32_t>>;
static constexpr auto nested = CommandBuffer::Mode::kNested;
static constexpr auto primary = CommandBuffer::Mode::kPrimary;
template <typename Info>
static std::vector<GpuGraphNodeHandle> Deps(Info info) {
if (auto deps = GpuDriver::GraphNodeGetDependencies(info.handle); deps.ok()) {
return *deps;
}
return {GpuGraphNodeHandle(0xDEADBEEF)};
}
template <typename... Infos>
static std::vector<GpuGraphNodeHandle> ExpectedDeps(Infos... info) {
return {info.handle...};
}
static bool IsAtLeastCuda12300(
const stream_executor::StreamExecutor* executor) {
if (executor->GetPlatform()->id() != cuda::kCudaPlatformId) {
return false;
}
if (std::min({executor->GetDeviceDescription().runtime_version(),
executor->GetDeviceDescription().driver_version()}) <
SemanticVersion{12, 3, 0}) {
return false;
}
return true;
}
TEST(GpuCommandBufferTest, LaunchSingleKernel) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec spec(3);
spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 1, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, 2, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->Launch(add, ThreadDim(), BlockDim(4), a, b, c));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected = {3, 3, 3, 3};
ASSERT_EQ(dst, expected);
DeviceMemory<int32_t> d = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->MemZero(&d, byte_length));
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(cmd_buffer->Launch(add, ThreadDim(), BlockDim(4), a, b, d));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::fill(dst.begin(), dst.end(), 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), d, byte_length));
ASSERT_EQ(dst, expected);
}
TEST(CudaCommandBufferTest, TraceSingleKernel) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (platform->id() == rocm::kROCmPlatformId) {
GTEST_SKIP() << "Not supported on ROCM";
}
if (platform->id() == cuda::kCudaPlatformId &&
executor->GetDeviceDescription().runtime_version() <
SemanticVersion{12, 3, 0}) {
GTEST_SKIP() << "Command buffer tracing is not supported";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec spec(1, [&](const Kernel& kernel,
const KernelArgs& args) {
auto bufs = Cast<KernelArgsDeviceMemoryArray>(&args)->device_memory_args();
auto cast = [](auto m) { return reinterpret_cast<int32_t*>(m.opaque()); };
return PackKernelArgs(0, internal::Ptrs3<int32_t>{
cast(bufs[0]),
cast(bufs[1]),
cast(bufs[2]),
});
});
spec.AddInProcessSymbol(internal::GetAddI32Ptrs3Kernel(), "AddI32Ptrs3");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Ptrs3::Create(executor, spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 1, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, 2, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
KernelArgsDeviceMemoryArray args({a, b, c}, 0);
TF_ASSERT_OK_AND_ASSIGN(auto cmd_buffer, TraceCommandBufferFactory::Create(
executor,
[&](Stream* stream) {
return stream->Launch(
ThreadDim(), BlockDim(4),
*add, args);
},
primary));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected = {3, 3, 3, 3};
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, LaunchNestedCommandBuffer) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec spec = GetAddI32KernelSpec();
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 1, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, 2, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
auto primary_cmd = executor->CreateCommandBuffer(primary).value();
auto nested_cmd = executor->CreateCommandBuffer(nested).value();
TF_ASSERT_OK(nested_cmd->Launch(add, ThreadDim(), BlockDim(4), a, b, c));
TF_ASSERT_OK(primary_cmd->AddNestedCommandBuffer(*nested_cmd));
TF_ASSERT_OK(primary_cmd->Finalize());
TF_ASSERT_OK(primary_cmd->Submit(stream.get()));
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected = {3, 3, 3, 3};
ASSERT_EQ(dst, expected);
DeviceMemory<int32_t> d = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->MemZero(&d, byte_length));
nested_cmd = executor->CreateCommandBuffer(nested).value();
TF_ASSERT_OK(nested_cmd->Launch(add, ThreadDim(), BlockDim(4), a, b, d));
TF_ASSERT_OK(primary_cmd->Update());
TF_ASSERT_OK(primary_cmd->AddNestedCommandBuffer(*nested_cmd));
TF_ASSERT_OK(primary_cmd->Finalize());
TF_ASSERT_OK(primary_cmd->Submit(stream.get()));
std::fill(dst.begin(), dst.end(), 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), d, byte_length));
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, MemcpyDeviceToDevice) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length));
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->MemcpyDeviceToDevice(&b, a, byte_length));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> dst(4, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), a, byte_length));
std::vector<int32_t> expected = {42, 42, 42, 42};
ASSERT_EQ(dst, expected);
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(cmd_buffer->MemcpyDeviceToDevice(&a, b, byte_length));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(stream->Memset32(&a, 0, byte_length));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::fill(dst.begin(), dst.end(), 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), a, byte_length));
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, Memset) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->Memset(&a, uint32_t{42}, length));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> dst(4, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), a, byte_length));
std::vector<int32_t> expected = {42, 42, 42, 42};
ASSERT_EQ(dst, expected);
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(cmd_buffer->Memset(&a, uint32_t{43}, length));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::fill(dst.begin(), dst.end(), 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), a, byte_length));
expected = {43, 43, 43, 43};
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, Barriers) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
std::vector<DeviceMemory<int32_t>> buffers;
for (size_t i = 0; i < 6; ++i) {
buffers.push_back(executor->AllocateArray<int32_t>(1, 0));
}
auto transfer_buffers = [&]() -> std::vector<int32_t> {
std::vector<int32_t> dst(buffers.size(), 0);
for (size_t i = 0; i < buffers.size(); ++i) {
TF_CHECK_OK(stream->Memcpy(dst.data() + i, buffers[i], sizeof(int32_t)));
}
return dst;
};
auto record = [&](CommandBuffer* cmd_buffer, uint32_t bit_pattern) {
TF_RETURN_IF_ERROR(cmd_buffer->Barrier());
TF_RETURN_IF_ERROR(cmd_buffer->Memset(&buffers[0], bit_pattern + 0, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier());
TF_RETURN_IF_ERROR(cmd_buffer->Memset(&buffers[1], bit_pattern + 1, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier());
TF_RETURN_IF_ERROR(cmd_buffer->Barrier());
TF_RETURN_IF_ERROR(cmd_buffer->Memset(&buffers[2], bit_pattern + 2, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(&buffers[3], bit_pattern + 3, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier());
TF_RETURN_IF_ERROR(cmd_buffer->Memset(&buffers[4], bit_pattern + 4, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(&buffers[5], bit_pattern + 5, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier());
return cmd_buffer->Finalize();
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(record(cmd_buffer.get(), 42));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> expected = {42, 43, 44, 45, 46, 47};
ASSERT_EQ(transfer_buffers(), expected);
GpuCommandBuffer* gpu_cmd_buffer = GpuCommandBuffer::Cast(cmd_buffer.get());
ASSERT_EQ(gpu_cmd_buffer->nodes().size(), 6);
ASSERT_EQ(gpu_cmd_buffer->barriers().size(), 6);
auto nodes = gpu_cmd_buffer->nodes();
auto barriers = gpu_cmd_buffer->barriers();
EXPECT_TRUE(barriers[0].is_barrier_node);
EXPECT_TRUE(Deps(barriers[0]).empty());
EXPECT_FALSE(barriers[1].is_barrier_node);
EXPECT_EQ(barriers[1].handle, nodes[0].handle);
EXPECT_FALSE(barriers[2].is_barrier_node);
EXPECT_FALSE(barriers[3].is_barrier_node);
EXPECT_EQ(barriers[2].handle, nodes[1].handle);
EXPECT_EQ(barriers[3].handle, nodes[1].handle);
EXPECT_TRUE(barriers[4].is_barrier_node);
EXPECT_TRUE(barriers[5].is_barrier_node);
EXPECT_EQ(Deps(barriers[4]), ExpectedDeps(nodes[2], nodes[3]));
EXPECT_EQ(Deps(barriers[5]), ExpectedDeps(nodes[4], nodes[5]));
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(record(cmd_buffer.get(), 43));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
expected = {43, 44, 45, 46, 47, 48};
ASSERT_EQ(transfer_buffers(), expected);
}
TEST(GpuCommandBufferTest, IndependentExecutionScopes) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
CommandBuffer::ExecutionScopeId s0 = CommandBuffer::ExecutionScopeId(0);
CommandBuffer::ExecutionScopeId s1 = CommandBuffer::ExecutionScopeId(1);
std::vector<DeviceMemory<int32_t>> buffers;
for (size_t i = 0; i < 4; ++i) {
buffers.push_back(executor->AllocateArray<int32_t>(1, 0));
}
auto transfer_buffers = [&]() -> std::vector<int32_t> {
std::vector<int32_t> dst(buffers.size(), 0);
for (size_t i = 0; i < buffers.size(); ++i) {
TF_CHECK_OK(stream->Memcpy(dst.data() + i, buffers[i], sizeof(int32_t)));
}
return dst;
};
auto record = [&](CommandBuffer* cmd_buffer, uint32_t bit_pattern) {
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[0], bit_pattern + 0, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[1], bit_pattern + 1, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[2], bit_pattern + 2, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[3], bit_pattern + 3, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier(s0));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier(s1));
return cmd_buffer->Finalize();
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(record(cmd_buffer.get(), 42));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> expected = {42, 43, 44, 45};
ASSERT_EQ(transfer_buffers(), expected);
GpuCommandBuffer* gpu_cmd_buffer = GpuCommandBuffer::Cast(cmd_buffer.get());
auto nodes0 = gpu_cmd_buffer->nodes(s0);
auto nodes1 = gpu_cmd_buffer->nodes(s1);
auto barriers0 = gpu_cmd_buffer->barriers(s0);
auto barriers1 = gpu_cmd_buffer->barriers(s1);
ASSERT_EQ(nodes0.size(), 2);
ASSERT_EQ(nodes1.size(), 2);
ASSERT_EQ(barriers0.size(), 1);
ASSERT_EQ(barriers1.size(), 1);
EXPECT_TRUE(barriers0[0].is_barrier_node);
EXPECT_TRUE(barriers1[0].is_barrier_node);
EXPECT_EQ(Deps(barriers0[0]), ExpectedDeps(nodes0[0], nodes0[1]));
EXPECT_EQ(Deps(barriers1[0]), ExpectedDeps(nodes1[0], nodes1[1]));
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(record(cmd_buffer.get(), 43));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
expected = {43, 44, 45, 46};
ASSERT_EQ(transfer_buffers(), expected);
}
TEST(GpuCommandBufferTest, ExecutionScopeBarriers) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
CommandBuffer::ExecutionScopeId s0 = CommandBuffer::ExecutionScopeId(0);
CommandBuffer::ExecutionScopeId s1 = CommandBuffer::ExecutionScopeId(1);
CommandBuffer::ExecutionScopeId s2 = CommandBuffer::ExecutionScopeId(2);
std::vector<DeviceMemory<int32_t>> buffers;
for (size_t i = 0; i < 7; ++i) {
buffers.push_back(executor->AllocateArray<int32_t>(1, 0));
}
auto transfer_buffers = [&]() -> std::vector<int32_t> {
std::vector<int32_t> dst(buffers.size(), 0);
for (size_t i = 0; i < buffers.size(); ++i) {
TF_CHECK_OK(stream->Memcpy(dst.data() + i, buffers[i], sizeof(int32_t)));
}
return dst;
};
auto record = [&](CommandBuffer* cmd_buffer, uint32_t bit_pattern) {
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[0], bit_pattern + 0, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[1], bit_pattern + 1, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[2], bit_pattern + 2, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[3], bit_pattern + 3, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier({s0, s1, s2}));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[4], bit_pattern + 4, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[5], bit_pattern + 5, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s2, &buffers[6], bit_pattern + 6, 1));
return cmd_buffer->Finalize();
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(record(cmd_buffer.get(), 42));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> expected = {42, 43, 44, 45, 46, 47, 48};
ASSERT_EQ(transfer_buffers(), expected);
GpuCommandBuffer* gpu_cmd_buffer = GpuCommandBuffer::Cast(cmd_buffer.get());
auto nodes0 = gpu_cmd_buffer->nodes(s0);
auto nodes1 = gpu_cmd_buffer->nodes(s1);
auto nodes2 = gpu_cmd_buffer->nodes(s2);
auto barriers0 = gpu_cmd_buffer->barriers(s0);
auto barriers1 = gpu_cmd_buffer->barriers(s1);
auto barriers2 = gpu_cmd_buffer->barriers(s2);
ASSERT_EQ(nodes0.size(), 3);
ASSERT_EQ(nodes1.size(), 3);
ASSERT_EQ(nodes2.size(), 1);
ASSERT_EQ(barriers0.size(), 2);
ASSERT_EQ(barriers1.size(), 2);
ASSERT_EQ(barriers2.size(), 2);
EXPECT_TRUE(barriers0[0].is_barrier_node && barriers0[1].is_barrier_node);
EXPECT_TRUE(barriers1[0].is_barrier_node && barriers1[1].is_barrier_node);
EXPECT_TRUE(barriers2[0].is_barrier_node && barriers2[1].is_barrier_node);
EXPECT_TRUE(barriers0[1].handle == barriers1[1].handle);
EXPECT_TRUE(barriers1[1].handle == barriers2[1].handle);
EXPECT_EQ(Deps(barriers0[0]), ExpectedDeps(nodes0[0], nodes0[1]));
EXPECT_EQ(Deps(barriers1[0]), ExpectedDeps(nodes1[0], nodes1[1]));
EXPECT_TRUE(Deps(barriers2[0]).empty());
EXPECT_EQ(Deps(barriers2[1]),
ExpectedDeps(barriers0[0], barriers1[0], barriers2[0]));
EXPECT_EQ(Deps(nodes0[2]), ExpectedDeps(barriers0[1]));
EXPECT_EQ(Deps(nodes1[2]), ExpectedDeps(barriers1[1]));
EXPECT_EQ(Deps(nodes2[0]), ExpectedDeps(barriers2[1]));
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(record(cmd_buffer.get(), 43));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
expected = {43, 44, 45, 46, 47, 48, 49};
ASSERT_EQ(transfer_buffers(), expected);
}
TEST(GpuCommandBufferTest, ExecutionScopeOneDirectionalBarriers) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
CommandBuffer::ExecutionScopeId s0 = CommandBuffer::ExecutionScopeId(0);
CommandBuffer::ExecutionScopeId s1 = CommandBuffer::ExecutionScopeId(1);
std::vector<DeviceMemory<int32_t>> buffers;
for (size_t i = 0; i < 6; ++i) {
buffers.push_back(executor->AllocateArray<int32_t>(1, 0));
}
auto transfer_buffers = [&]() -> std::vector<int32_t> {
std::vector<int32_t> dst(buffers.size(), 0);
for (size_t i = 0; i < buffers.size(); ++i) {
TF_CHECK_OK(stream->Memcpy(dst.data() + i, buffers[i], sizeof(int32_t)));
}
return dst;
};
auto record = [&](CommandBuffer* cmd_buffer, uint32_t bit_pattern) {
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[0], bit_pattern + 0, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[1], bit_pattern + 1, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[2], bit_pattern + 2, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[3], bit_pattern + 3, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier(s0, s1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[4], bit_pattern + 4, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s1, &buffers[5], bit_pattern + 5, 1));
return cmd_buffer->Finalize();
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(record(cmd_buffer.get(), 42));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> expected = {42, 43, 44, 45, 46, 47};
ASSERT_EQ(transfer_buffers(), expected);
GpuCommandBuffer* gpu_cmd_buffer = GpuCommandBuffer::Cast(cmd_buffer.get());
auto nodes0 = gpu_cmd_buffer->nodes(s0);
auto nodes1 = gpu_cmd_buffer->nodes(s1);
auto barriers0 = gpu_cmd_buffer->barriers(s0);
auto barriers1 = gpu_cmd_buffer->barriers(s1);
ASSERT_EQ(nodes0.size(), 3);
ASSERT_EQ(nodes1.size(), 3);
ASSERT_EQ(barriers0.size(), 1);
ASSERT_EQ(barriers1.size(), 2);
EXPECT_TRUE(barriers0[0].is_barrier_node);
EXPECT_TRUE(barriers1[0].is_barrier_node && barriers1[1].is_barrier_node);
EXPECT_EQ(Deps(barriers0[0]), ExpectedDeps(nodes0[0], nodes0[1]));
EXPECT_EQ(Deps(barriers1[0]), ExpectedDeps(nodes1[0], nodes1[1]));
EXPECT_EQ(Deps(barriers1[1]), ExpectedDeps(barriers0[0], barriers1[0]));
EXPECT_EQ(Deps(nodes0[2]), ExpectedDeps(barriers0[0]));
EXPECT_EQ(Deps(nodes1[2]), ExpectedDeps(barriers1[1]));
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(record(cmd_buffer.get(), 43));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
expected = {43, 44, 45, 46, 47, 48};
ASSERT_EQ(transfer_buffers(), expected);
}
TEST(GpuCommandBufferTest, ConditionalIf) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (!IsAtLeastCuda12300(executor)) {
GTEST_SKIP() << "CUDA graph conditionals are not supported";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec spec(3);
spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<bool> pred = executor->AllocateArray<bool>(1, 0);
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
constexpr bool kTrue = true;
TF_ASSERT_OK(stream->Memcpy(&pred, &kTrue, 1));
TF_ASSERT_OK(stream->Memset32(&a, 1, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, 2, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
CommandBuffer::Builder then_builder = [&](CommandBuffer* then_cmd) {
return then_cmd->Launch(add, ThreadDim(), BlockDim(4), a, b, c);
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->If(pred, then_builder));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected = {3, 3, 3, 3};
ASSERT_EQ(dst, expected);
constexpr bool kFalse = false;
TF_ASSERT_OK(stream->Memcpy(&pred, &kFalse, 1));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> zeroes = {0, 0, 0, 0};
ASSERT_EQ(dst, zeroes);
DeviceMemory<int32_t> d = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->MemZero(&d, byte_length));
TF_ASSERT_OK(stream->Memcpy(&pred, &kTrue, 1));
then_builder = [&](CommandBuffer* then_cmd) {
return then_cmd->Launch(add, ThreadDim(), BlockDim(4), a, b, d);
};
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(cmd_buffer->If(pred, then_builder));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::fill(dst.begin(), dst.end(), 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), d, byte_length));
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, ConditionalIfWithMemset) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (platform->id() == rocm::kROCmPlatformId) {
GTEST_SKIP() << "Not supported on ROCM";
}
if (platform->id() == cuda::kCudaPlatformId &&
executor->GetDeviceDescription().driver_version() <
SemanticVersion{12, 4, 0}) {
GTEST_SKIP() << "ConditionalsWithMemset are not supported before 12.4.";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<bool> pred = executor->AllocateArray<bool>(1, 0);
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
constexpr bool kTrue = true;
TF_ASSERT_OK(stream->Memcpy(&pred, &kTrue, 1));
TF_ASSERT_OK(stream->Memset32(&a, 0, byte_length));
CommandBuffer::Builder then_builder = [&](CommandBuffer* then_cmd) {
return then_cmd->Memset(&a, uint8_t{1}, byte_length);
};
TF_ASSERT_OK_AND_ASSIGN(auto cmd_buffer,
executor->CreateCommandBuffer(primary));
TF_ASSERT_OK(cmd_buffer->If(pred, then_builder));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> dst(length, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), a, byte_length));
std::vector<int32_t> expected(length, 1 << 24 | 1 << 16 | 1 << 8 | 1);
ASSERT_EQ(dst, expected);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->MemZero(&a, byte_length));
then_builder = [&](CommandBuffer* then_cmd) {
return then_cmd->Memset(&b, uint8_t{1}, byte_length);
};
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(cmd_buffer->If(pred, then_builder));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::fill(dst.begin(), dst.end(), 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length));
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, ConditionalIfElse) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (!IsAtLeastCuda12300(executor)) {
GTEST_SKIP() << "CUDA graph conditionals are not supported";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec add_spec(3);
add_spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, add_spec));
MultiKernelLoaderSpec mul_spec(3);
mul_spec.AddInProcessSymbol(internal::GetMulI32Kernel(), "MulI32");
TF_ASSERT_OK_AND_ASSIGN(auto mul, MulI32Kernel::Create(executor, mul_spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<bool> pred = executor->AllocateArray<bool>(1, 0);
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
constexpr bool kTrue = true;
TF_ASSERT_OK(stream->Memcpy(&pred, &kTrue, 1));
TF_ASSERT_OK(stream->Memset32(&a, 2, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, 3, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
CommandBuffer::Builder then_builder = [&](CommandBuffer* then_cmd) {
return then_cmd->Launch(add, ThreadDim(), BlockDim(4), a, b, c);
};
CommandBuffer::Builder else_builder = [&](CommandBuffer* else_cmd) {
return else_cmd->Launch(mul, ThreadDim(), BlockDim(4), a, b, c);
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->IfElse(pred, then_builder, else_builder));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected_add = {5, 5, 5, 5};
ASSERT_EQ(dst, expected_add);
constexpr bool kFalse = false;
TF_ASSERT_OK(stream->Memcpy(&pred, &kFalse, 1));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected_mul = {6, 6, 6, 6};
ASSERT_EQ(dst, expected_mul);
DeviceMemory<int32_t> d = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->MemZero(&d, byte_length));
else_builder = [&](CommandBuffer* else_cmd) {
return else_cmd->Launch(mul, ThreadDim(), BlockDim(4), a, b, d);
};
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(cmd_buffer->IfElse(pred, then_builder, else_builder));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::fill(dst.begin(), dst.end(), 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), d, byte_length));
ASSERT_EQ(dst, expected_mul);
}
TEST(GpuCommandBufferTest, ConditionalCaseEmptyGraph) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (!IsAtLeastCuda12300(executor)) {
GTEST_SKIP() << "CUDA graph conditionals are not supported";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec add_spec(3);
add_spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, add_spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> index = executor->AllocateArray<int32_t>(1, 0);
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&index, 0, sizeof(int32_t)));
TF_ASSERT_OK(stream->Memset32(&a, 2, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, 3, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
CommandBuffer::Builder branch0 = [&](CommandBuffer* branch0_cmd) {
return branch0_cmd->Launch(add, ThreadDim(), BlockDim(4), a, b, c);
};
CommandBuffer::Builder branch1 = [&](CommandBuffer* branch1_cmd) {
return absl::OkStatus();
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->Case(index, {branch0, branch1}));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected_add = {5, 5, 5, 5};
ASSERT_EQ(dst, expected_add);
TF_ASSERT_OK(stream->Memset32(&index, 1, sizeof(int32_t)));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
ASSERT_EQ(dst, expected_add);
TF_ASSERT_OK(stream->Memset32(&index, -1, sizeof(int32_t)));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
ASSERT_EQ(dst, expected_add);
TF_ASSERT_OK(stream->Memset32(&index, 2, sizeof(int32_t)));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
ASSERT_EQ(dst, expected_add);
}
class GpuCommandBufferCaseTest : public testing::TestWithParam<int> {
protected:
int GetNumCases() { return GetParam(); }
int GetEffectiveIndex(int i) {
return (i < 0 || i >= GetNumCases()) ? GetNumCases() - 1 : i;
}
};
TEST_P(GpuCommandBufferCaseTest, ConditionalMultiCase) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (!IsAtLeastCuda12300(executor)) {
GTEST_SKIP() << "CUDA graph conditionals are not supported";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec mul_spec(3);
mul_spec.AddInProcessSymbol(internal::GetMulI32Kernel(), "MulI32");
TF_ASSERT_OK_AND_ASSIGN(auto mul, MulI32Kernel::Create(executor, mul_spec));
constexpr int64_t kLength = 1;
int64_t byte_length = sizeof(int32_t) * kLength;
DeviceMemory<int32_t> index = executor->AllocateArray<int32_t>(1, 0);
TF_ASSERT_OK(stream->Memset32(&index, 0, sizeof(int32_t)));
const int kNumCases = GetNumCases();
std::vector<DeviceMemory<int32_t>> values;
std::vector<DeviceMemory<int32_t>> results;
std::vector<CommandBuffer::Builder> branches;
values.resize(kNumCases);
results.resize(kNumCases);
branches.resize(kNumCases);
for (int i = 0; i < kNumCases; ++i) {
values[i] = executor->AllocateArray<int32_t>(kLength, 0);
TF_ASSERT_OK(stream->Memset32(&values[i], i, byte_length));
results[i] = executor->AllocateArray<int32_t>(kLength, 0);
TF_ASSERT_OK(stream->Memset32(&results[i], 0, byte_length));
branches[i] = [&, i](CommandBuffer* branch_cmd) {
return branch_cmd->Launch(mul, ThreadDim(), BlockDim(kLength), values[i],
values[i], results[i]);
};
}
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->Case(index, branches));
TF_ASSERT_OK(cmd_buffer->Finalize());
for (int i = -1; i <= kNumCases; ++i) {
TF_ASSERT_OK(stream->Memset32(&index, i, sizeof(int32_t)));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
int effective_index = GetEffectiveIndex(i);
for (int z = 0; z < kNumCases; ++z) {
std::vector<int32_t> dst(kLength, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), results[z], byte_length));
std::vector<int32_t> expected;
expected.resize(kLength);
for (int p = 0; p < kLength; ++p) {
if (effective_index == z) {
expected[p] = effective_index * effective_index;
} else {
expected[p] = 0;
}
}
ASSERT_EQ(dst, expected)
<< "For result " << z << " after running case " << i;
TF_ASSERT_OK(stream->Memset32(&results[z], 0, byte_length));
}
}
}
INSTANTIATE_TEST_SUITE_P(ConditionalMultipleCaseTest, GpuCommandBufferCaseTest,
testing::Range(1, 32),
testing::PrintToStringParamName());
TEST(GpuCommandBufferTest, ConditionalCase) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (!IsAtLeastCuda12300(executor)) {
GTEST_SKIP() << "CUDA graph conditionals are not supported";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec add_spec(3);
add_spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, add_spec));
MultiKernelLoaderSpec mul_spec(3);
mul_spec.AddInProcessSymbol(internal::GetMulI32Kernel(), "MulI32");
TF_ASSERT_OK_AND_ASSIGN(auto mul, MulI32Kernel::Create(executor, mul_spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> index = executor->AllocateArray<int32_t>(1, 0);
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&index, 0, sizeof(int32_t)));
TF_ASSERT_OK(stream->Memset32(&a, 2, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, 3, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
CommandBuffer::Builder branch0 = [&](CommandBuffer* branch0_cmd) {
return branch0_cmd->Launch(add, ThreadDim(), BlockDim(4), a, b, c);
};
CommandBuffer::Builder branch1 = [&](CommandBuffer* branch1_cmd) {
return branch1_cmd->Launch(mul, ThreadDim(), BlockDim(4), a, b, c);
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->Case(index, {branch0, branch1}));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected_add = {5, 5, 5, 5};
ASSERT_EQ(dst, expected_add);
TF_ASSERT_OK(stream->Memset32(&index, 1, sizeof(int32_t)));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected_mul = {6, 6, 6, 6};
ASSERT_EQ(dst, expected_mul);
TF_ASSERT_OK(stream->Memset32(&index, -1, sizeof(int32_t)));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
ASSERT_EQ(dst, expected_mul);
TF_ASSERT_OK(stream->Memset32(&index, 2, sizeof(int32_t)));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
ASSERT_EQ(dst, expected_mul);
}
TEST(GpuCommandBufferTest, ConditionalFor) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (!IsAtLeastCuda12300(executor)) {
GTEST_SKIP() << "CUDA graph conditionals are not supported";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec spec(3);
spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> loop_counter = executor->AllocateArray<int32_t>(1, 0);
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&loop_counter, 100, sizeof(int32_t)));
TF_ASSERT_OK(stream->Memset32(&a, 1, byte_length));
TF_ASSERT_OK(stream->MemZero(&b, byte_length));
CommandBuffer::Builder body_builder = [&](CommandBuffer* body_cmd) {
return body_cmd->Launch(add, ThreadDim(), BlockDim(4), a, b, b);
};
int32_t num_iters = 10;
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->For(num_iters, loop_counter, body_builder));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length));
std::vector<int32_t> expected = {10, 10, 10, 10};
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, ConditionalWhile) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (!IsAtLeastCuda12300(executor)) {
GTEST_SKIP() << "CUDA graph conditionals are not supported";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec add_spec(3);
add_spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, add_spec));
MultiKernelLoaderSpec icmp_spec(3);
icmp_spec.AddInProcessSymbol(internal::GetIncAndCmpKernel(), "IncAndCmp");
TF_ASSERT_OK_AND_ASSIGN(auto inc_and_cmp,
IncAndCmpKernel::Create(executor, icmp_spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<bool> pred = executor->AllocateArray<bool>(1, 0);
DeviceMemory<int32_t> loop_counter = executor->AllocateArray<int32_t>(1, 0);
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
static constexpr bool kFalse = false;
TF_ASSERT_OK(stream->Memcpy(&pred, &kFalse, 1));
TF_ASSERT_OK(stream->Memset32(&loop_counter, 0, sizeof(int32_t)));
TF_ASSERT_OK(stream->Memset32(&a, 1, byte_length));
TF_ASSERT_OK(stream->MemZero(&b, byte_length));
int32_t num_iters = 10;
CommandBuffer::ExecutionScopeBuilder cond_builder =
[&](ExecutionScopeId id, CommandBuffer* cond_cmd) {
return cond_cmd->Launch(inc_and_cmp, id, ThreadDim(), BlockDim(),
loop_counter, pred, num_iters);
};
CommandBuffer::Builder body_builder = [&](CommandBuffer* body_cmd) {
return body_cmd->Launch(add, ThreadDim(), BlockDim(length), a, b, b);
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->While(pred, cond_builder, body_builder));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length));
std::vector<int32_t> expected = {10, 10, 10, 10};
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, DISABLED_WhileNestedConditional) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (!IsAtLeastCuda12300(executor)) {
GTEST_SKIP() << "CUDA graph conditionals are not supported";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec add_spec(3);
add_spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, add_spec));
MultiKernelLoaderSpec icmp_spec(3);
icmp_spec.AddInProcessSymbol(internal::GetIncAndCmpKernel(), "IncAndCmp");
TF_ASSERT_OK_AND_ASSIGN(auto inc_and_cmp,
IncAndCmpKernel::Create(executor, icmp_spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<bool> pred = executor->AllocateArray<bool>(1, 0);
DeviceMemory<bool> pred_then = executor->AllocateArray<bool>(1, 0);
DeviceMemory<int32_t> loop_counter = executor->AllocateArray<int32_t>(1, 0);
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
static constexpr bool kFalse = false;
static constexpr bool kTrue = true;
TF_ASSERT_OK(stream->Memcpy(&pred, &kFalse, 1));
TF_ASSERT_OK(stream->Memcpy(&pred_then, &kTrue, 1));
TF_ASSERT_OK(stream->Memset32(&loop_counter, 0, sizeof(int32_t)));
TF_ASSERT_OK(stream->Memset32(&a, 1, byte_length));
TF_ASSERT_OK(stream->MemZero(&b, byte_length));
int32_t num_iters = 10;
CommandBuffer::Builder then_builder =
[&](CommandBuffer* then_cmd) {
return then_cmd->Launch(add, ThreadDim(), BlockDim(length), a, b, b);
};
auto nested_cmd = executor->CreateCommandBuffer(nested).value();
TF_ASSERT_OK(nested_cmd->If(pred_then, then_builder));
CommandBuffer::ExecutionScopeBuilder cond_builder =
[&](ExecutionScopeId id, CommandBuffer* cond_cmd) {
return cond_cmd->Launch(inc_and_cmp, id, ThreadDim(), BlockDim(length),
loop_counter, pred, num_iters);
};
CommandBuffer::Builder body_builder =
[&](CommandBuffer* body_cmd) -> absl::Status {
CHECK_OK(body_cmd->AddNestedCommandBuffer(*nested_cmd));
return absl::OkStatus();
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(cmd_buffer->While(pred, cond_builder, body_builder));
TF_ASSERT_OK(cmd_buffer->Finalize());
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length));
std::vector<int32_t> expected = {10, 10, 10, 10};
ASSERT_EQ(dst, expected);
}
TEST(GpuCommandBufferTest, ConditionalIfInExecutionScope) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (!IsAtLeastCuda12300(executor)) {
GTEST_SKIP() << "CUDA graph conditionals are not supported";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
CommandBuffer::ExecutionScopeId s0 = CommandBuffer::ExecutionScopeId(0);
CommandBuffer::ExecutionScopeId s1 = CommandBuffer::ExecutionScopeId(1);
DeviceMemory<bool> pred = executor->AllocateArray<bool>(1, 0);
constexpr bool kTrue = true;
TF_ASSERT_OK(stream->Memcpy(&pred, &kTrue, 1));
std::vector<DeviceMemory<int32_t>> buffers;
for (size_t i = 0; i < 3; ++i) {
buffers.push_back(executor->AllocateArray<int32_t>(1, 0));
}
auto transfer_buffers = [&]() -> std::vector<int32_t> {
std::vector<int32_t> dst(buffers.size(), 0);
for (size_t i = 0; i < buffers.size(); ++i) {
stream->Memcpy(dst.data() + i, buffers[i], sizeof(int32_t)).IgnoreError();
}
return dst;
};
auto record = [&](CommandBuffer* cmd_buffer, uint32_t bit_pattern) {
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[0], bit_pattern + 0, 1));
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &buffers[1], bit_pattern + 1, 1));
TF_RETURN_IF_ERROR(cmd_buffer->If(s1, pred, [&](CommandBuffer* then_cmd) {
return then_cmd->Memset(&buffers[2], bit_pattern + 2, 1);
}));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier(s0));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier({s0, s1}));
return cmd_buffer->Finalize();
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(record(cmd_buffer.get(), 42));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
std::vector<int32_t> expected = {42, 43, 44};
ASSERT_EQ(transfer_buffers(), expected);
GpuCommandBuffer* gpu_cmd_buffer = GpuCommandBuffer::Cast(cmd_buffer.get());
auto nodes0 = gpu_cmd_buffer->nodes(s0);
auto nodes1 = gpu_cmd_buffer->nodes(s1);
auto barriers0 = gpu_cmd_buffer->barriers(s0);
auto barriers1 = gpu_cmd_buffer->barriers(s1);
ASSERT_EQ(nodes0.size(), 2);
ASSERT_EQ(nodes1.size(), 2);
ASSERT_EQ(barriers0.size(), 3);
ASSERT_EQ(barriers1.size(), 3);
EXPECT_EQ(Deps(barriers0[0]), ExpectedDeps(nodes0[0], nodes0[1]));
EXPECT_EQ(barriers0[0].handle, barriers0[1].handle);
EXPECT_EQ(barriers1[0].handle, nodes1[0].handle);
EXPECT_EQ(barriers1[1].handle, nodes1[1].handle);
EXPECT_TRUE(barriers0[2].handle == barriers1[2].handle);
EXPECT_EQ(Deps(barriers0[2]), ExpectedDeps(barriers0[1], nodes1[1]));
constexpr bool kFalse = false;
TF_ASSERT_OK(stream->Memcpy(&pred, &kFalse, 1));
TF_ASSERT_OK(stream->MemZero(&buffers[2], sizeof(int32_t)));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
expected = {42, 43, 0};
ASSERT_EQ(transfer_buffers(), expected);
}
TEST(GpuCommandBufferTest, ConditionalWhileInExecutionScope) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
if (!IsAtLeastCuda12300(executor)) {
GTEST_SKIP() << "CUDA graph conditionals are not supported";
}
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
CommandBuffer::ExecutionScopeId s0 = CommandBuffer::ExecutionScopeId(0);
CommandBuffer::ExecutionScopeId s1 = CommandBuffer::ExecutionScopeId(1);
MultiKernelLoaderSpec add_spec(3);
add_spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, add_spec));
MultiKernelLoaderSpec icmp_spec(3);
icmp_spec.AddInProcessSymbol(internal::GetIncAndCmpKernel(), "IncAndCmp");
TF_ASSERT_OK_AND_ASSIGN(auto inc_and_cmp,
IncAndCmpKernel::Create(executor, icmp_spec));
DeviceMemory<bool> pred = executor->AllocateArray<bool>(1, 0);
DeviceMemory<int32_t> loop_counter = executor->AllocateArray<int32_t>(1, 0);
DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(1, 0);
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(1, 0);
DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(1, 0);
TF_ASSERT_OK(stream->MemZero(&loop_counter, sizeof(int32_t)));
TF_ASSERT_OK(stream->Memset32(&a, 1, sizeof(int32_t)));
TF_ASSERT_OK(stream->MemZero(&b, sizeof(int32_t)));
auto record = [&](CommandBuffer* cmd_buffer, uint32_t bit_pattern,
int32_t num_iters) {
TF_RETURN_IF_ERROR(cmd_buffer->Memset(s0, &c, bit_pattern, 1));
TF_RETURN_IF_ERROR(cmd_buffer->While(
s1, pred,
[&](ExecutionScopeId id, CommandBuffer* cond_cmd) {
return cond_cmd->Launch(inc_and_cmp, id, ThreadDim(), BlockDim(),
loop_counter, pred, num_iters);
},
[&](CommandBuffer* body_cmd) {
return body_cmd->Launch(add, ThreadDim(), BlockDim(), a, b, b);
}));
TF_RETURN_IF_ERROR(cmd_buffer->Barrier({s0, s1}));
return cmd_buffer->Finalize();
};
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
TF_ASSERT_OK(record(cmd_buffer.get(), 42, 10));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
int32_t b_dst, c_dst;
TF_ASSERT_OK(stream->Memcpy(&b_dst, b, sizeof(int32_t)));
TF_ASSERT_OK(stream->Memcpy(&c_dst, c, sizeof(int32_t)));
EXPECT_EQ(b_dst, 10);
EXPECT_EQ(c_dst, 42);
GpuCommandBuffer* gpu_cmd_buffer = GpuCommandBuffer::Cast(cmd_buffer.get());
auto nodes0 = gpu_cmd_buffer->nodes(s0);
auto nodes1 = gpu_cmd_buffer->nodes(s1);
auto barriers0 = gpu_cmd_buffer->barriers(s0);
auto barriers1 = gpu_cmd_buffer->barriers(s1);
ASSERT_EQ(nodes0.size(), 1);
ASSERT_EQ(nodes1.size(), 3);
ASSERT_EQ(barriers0.size(), 2);
ASSERT_EQ(barriers1.size(), 4);
EXPECT_EQ(Deps(barriers0[1]), ExpectedDeps(nodes0[0], nodes1[2]));
TF_ASSERT_OK(cmd_buffer->Update());
TF_ASSERT_OK(record(cmd_buffer.get(), 43, 20));
TF_ASSERT_OK(stream->MemZero(&loop_counter, sizeof(int32_t)));
TF_ASSERT_OK(stream->MemZero(&b, sizeof(int32_t)));
TF_ASSERT_OK(cmd_buffer->Submit(stream.get()));
TF_ASSERT_OK(stream->Memcpy(&b_dst, b, sizeof(int32_t)));
TF_ASSERT_OK(stream->Memcpy(&c_dst, c, sizeof(int32_t)));
EXPECT_EQ(b_dst, 20);
EXPECT_EQ(c_dst, 43);
}
#define BENCHMARK_SIZES(NAME) \
BENCHMARK(NAME)->Arg(8)->Arg(32)->Arg(128)->Arg(512)->Arg(1024);
static void BM_CreateCommandBuffer(benchmark::State& state) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
MultiKernelLoaderSpec spec(3);
spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, spec));
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(1, 0);
for (auto s : state) {
auto cmd_buffer = executor->CreateCommandBuffer(nested).value();
for (int i = 1; i < state.range(0); ++i) {
CHECK_OK(cmd_buffer->Launch(add, ThreadDim(), BlockDim(4), b, b, b));
}
CHECK_OK(cmd_buffer->Finalize());
}
}
BENCHMARK_SIZES(BM_CreateCommandBuffer);
static void BM_TraceCommandBuffer(benchmark::State& state) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
MultiKernelLoaderSpec spec(3);
spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, spec));
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(1, 0);
for (auto s : state) {
auto launch_kernels = [&](Stream* stream) {
for (int i = 1; i < state.range(0); ++i) {
CHECK_OK(stream->ThenLaunch(ThreadDim(), BlockDim(4), add, b, b, b));
}
return absl::OkStatus();
};
CHECK_OK(
TraceCommandBufferFactory::Create(executor, launch_kernels, nested));
}
}
BENCHMARK_SIZES(BM_TraceCommandBuffer);
static void BM_UpdateCommandBuffer(benchmark::State& state) {
Platform* platform = GpuPlatform();
StreamExecutor* executor = platform->ExecutorForDevice(0).value();
MultiKernelLoaderSpec spec(3);
spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor, spec));
DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(1, 0);
auto cmd_buffer = executor->CreateCommandBuffer(primary).value();
for (int i = 1; i < state.range(0); ++i) {
CHECK_OK(cmd_buffer->Launch(add, ThreadDim(), BlockDim(4), b, b, b));
}
CHECK_OK(cmd_buffer->Finalize());
for (auto s : state) {
CHECK_OK(cmd_buffer->Update());
for (int i = 1; i < state.range(0); ++i) {
CHECK_OK(cmd_buffer->Launch(add, ThreadDim(), BlockDim(4), b, b, b));
}
CHECK_OK(cmd_buffer->Finalize());
}
}
BENCHMARK_SIZES(BM_UpdateCommandBuffer);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/gpu_command_buffer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/gpu_command_buffer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
93741d2a-fa68-4282-bf83-455ca44616e7 | cpp | tensorflow/tensorflow | host_stream | third_party/xla/xla/stream_executor/host/host_stream.cc | third_party/xla/xla/stream_executor/host/host_stream_test.cc | #include "xla/stream_executor/host/host_stream.h"
#include <string.h>
#include <cfenv>
#include <cstdint>
#include <memory>
#include <queue>
#include <utility>
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/event.h"
#include "xla/stream_executor/host/host_event.h"
#include "xla/stream_executor/host/host_kernel.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_common.h"
#include "tsl/platform/denormal.h"
#include "tsl/platform/env.h"
#include "tsl/platform/setround.h"
namespace stream_executor {
namespace host {
HostStream::HostStream(StreamExecutor* executor)
: StreamCommon(executor),
thread_(tsl::Env::Default()->StartThread({}, "host_executor",
[this]() { WorkLoop(); })) {}
HostStream::~HostStream() {
{
absl::MutexLock lock(&mu_);
work_queue_.push(nullptr);
}
thread_.reset();
parent()->DeallocateStream(this);
}
absl::Status HostStream::Memcpy(DeviceMemoryBase* gpu_dst,
const DeviceMemoryBase& gpu_src,
uint64_t size) {
void* dst_mem = gpu_dst->opaque();
void* src_mem = const_cast<void*>(gpu_src.opaque());
EnqueueTask([src_mem, dst_mem, size]() { memcpy(dst_mem, src_mem, size); });
return absl::OkStatus();
}
absl::Status HostStream::Memcpy(void* host_dst, const DeviceMemoryBase& gpu_src,
uint64_t size) {
void* src_mem = const_cast<void*>(gpu_src.opaque());
EnqueueTask([host_dst, src_mem, size]() { memcpy(host_dst, src_mem, size); });
return absl::OkStatus();
}
absl::Status HostStream::Memcpy(DeviceMemoryBase* gpu_dst, const void* host_src,
uint64_t size) {
void* dst_mem = gpu_dst->opaque();
EnqueueTask([dst_mem, host_src, size]() { memcpy(dst_mem, host_src, size); });
return absl::OkStatus();
}
absl::Status HostStream::Memset32(DeviceMemoryBase* location, uint32_t pattern,
uint64_t size) {
void* gpu_mem = location->opaque();
EnqueueTask([gpu_mem, size, pattern]() { memset(gpu_mem, pattern, size); });
return absl::OkStatus();
}
absl::Status HostStream::MemZero(DeviceMemoryBase* location, uint64_t size) {
void* gpu_mem = location->opaque();
EnqueueTask([gpu_mem, size]() { memset(gpu_mem, 0, size); });
return absl::OkStatus();
}
absl::Status HostStream::WaitFor(Stream* other) {
auto event = std::make_shared<absl::Notification>();
static_cast<HostStream*>(other)->EnqueueTask([event]() { event->Notify(); });
EnqueueTask([event]() { event->WaitForNotification(); });
return absl::OkStatus();
}
absl::Status HostStream::WaitFor(Event* event) {
std::shared_ptr<absl::Notification> notification =
static_cast<HostEvent*>(event)->notification();
EnqueueTask([notification]() { notification->WaitForNotification(); });
return absl::OkStatus();
}
bool HostStream::EnqueueTask(absl::AnyInvocable<void() &&> task) {
return EnqueueTaskWithStatus([task = std::move(task)]() mutable {
std::move(task)();
return absl::OkStatus();
});
}
absl::Status HostStream::RecordEvent(Event* event) {
std::shared_ptr<absl::Notification> notification =
static_cast<HostEvent*>(event)->notification();
EnqueueTask([notification]() {
CHECK(!notification->HasBeenNotified());
notification->Notify();
});
return absl::OkStatus();
}
absl::Status HostStream::DoHostCallbackWithStatus(
absl::AnyInvocable<absl::Status() &&> callback) {
if (EnqueueTaskWithStatus(std::move(callback))) {
return absl::OkStatus();
}
return absl::InternalError("Failed to host callback.");
}
bool HostStream::EnqueueTaskWithStatus(
absl::AnyInvocable<absl::Status() &&> task) {
CHECK(task != nullptr);
absl::MutexLock lock(&mu_);
work_queue_.push(std::move(task));
return true;
}
bool HostStream::WorkAvailable() { return !work_queue_.empty(); }
void HostStream::WorkLoop() {
tsl::port::ScopedFlushDenormal flush;
tsl::port::ScopedSetRound round(FE_TONEAREST);
while (true) {
std::queue<absl::AnyInvocable<absl::Status() &&>> queue;
{
absl::MutexLock lock(&mu_);
mu_.Await(absl::Condition(this, &HostStream::WorkAvailable));
std::swap(queue, work_queue_);
}
while (!queue.empty()) {
absl::AnyInvocable<absl::Status() &&>& fn = queue.front();
if (!fn) {
return;
}
status_.Update(std::move(fn)());
queue.pop();
}
}
}
absl::Status HostStream::BlockUntilDone() {
absl::Notification done;
absl::Status status;
EnqueueTask([&done, &status, this]() {
status = status_;
status_ = absl::OkStatus();
done.Notify();
});
done.WaitForNotification();
return status;
}
absl::Status HostStream::Launch(const ThreadDim& thread_dims,
const BlockDim& block_dims,
const Kernel& kernel, const KernelArgs& args) {
const HostKernel* host_kernel = AsHostKernel(&kernel);
const KernelArgsDeviceMemoryArray* device_mem =
DynCast<KernelArgsDeviceMemoryArray>(&args);
if (device_mem != nullptr) {
return host_kernel->Launch(thread_dims, device_mem->device_memory_args());
}
return absl::UnimplementedError(
"Host kernel implements Launch method only for DeviceMemoryArray "
"arguments.");
}
}
} | #include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace se = stream_executor;
TEST(HostStream, EnforcesFIFOOrder) {
se::Platform* platform =
se::PlatformManager::PlatformWithName("Host").value();
se::StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
absl::Mutex mu;
int expected = 0;
bool ok = true;
for (int i = 0; i < 2000; ++i) {
TF_ASSERT_OK(stream->DoHostCallback([i, &mu, &expected, &ok]() {
absl::MutexLock lock(&mu);
if (expected != i) {
ok = false;
}
++expected;
}));
}
TF_ASSERT_OK(stream->BlockHostUntilDone());
absl::MutexLock lock(&mu);
EXPECT_TRUE(ok);
}
TEST(HostStream, ReportsHostCallbackError) {
se::Platform* platform =
se::PlatformManager::PlatformWithName("Host").value();
se::StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
TF_ASSERT_OK(stream->DoHostCallbackWithStatus(
[]() { return absl::InternalError("error!"); }));
auto status = stream->BlockHostUntilDone();
ASSERT_EQ(status.code(), tsl::error::INTERNAL);
ASSERT_EQ(status.message(), "error!");
}
TEST(HostStream, ReportsFirstHostCallbackError) {
se::Platform* platform =
se::PlatformManager::PlatformWithName("Host").value();
se::StreamExecutor* executor = platform->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
TF_ASSERT_OK(stream->DoHostCallbackWithStatus(
[]() { return absl::InternalError("error 1"); }));
TF_ASSERT_OK(stream->DoHostCallbackWithStatus(
[]() { return absl::InternalError("error 2"); }));
ASSERT_EQ(stream->BlockHostUntilDone().message(), "error 1");
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/host/host_stream.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/host/host_stream_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
80165efb-1f70-430b-8c0d-87f23ff2fefc | cpp | tensorflow/tensorflow | host_kernel | third_party/xla/xla/stream_executor/host/host_kernel.cc | third_party/xla/xla/stream_executor/host/host_kernel_test.cc | #include "xla/stream_executor/host/host_kernel.h"
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <new>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/host/host_kernel_c_api.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/threadpool.h"
namespace stream_executor::host {
using LaunchEvent = HostKernel::LaunchEvent;
static tsl::AsyncValueRef<LaunchEvent> OkLaunchEvent() {
static tsl::AsyncValueOwningRef<LaunchEvent>* event = [] {
auto* storage = new tsl::internal::AsyncValueStorage<LaunchEvent>();
return new tsl::AsyncValueOwningRef<LaunchEvent>(
tsl::MakeAvailableAsyncValueRef<LaunchEvent>(*storage));
}();
return event->AsRef();
}
static absl::InlinedVector<SE_HOST_KernelArg, 8> ConvertBuffersToKernelArgs(
absl::Span<const DeviceMemoryBase> buffers) {
absl::InlinedVector<SE_HOST_KernelArg, 8> args(buffers.size());
for (size_t i = 0; i < buffers.size(); ++i) {
args[i].data = const_cast<void*>(buffers[i].opaque());
args[i].size = buffers[i].size();
}
return args;
}
namespace {
class HostKernelExecuteState {
public:
HostKernelExecuteState(HostKernel::TaskRunner task_runner,
SE_HOST_Kernel* kernel, ThreadDim thread_dims,
absl::Span<const SE_HOST_KernelArg> args);
~HostKernelExecuteState();
void Notify(absl::Status status);
void CallSync(uint64_t task_index);
void CallAsync(uint64_t start_index, uint64_t end_index);
tsl::AsyncValueRef<LaunchEvent> event() const { return event_; }
private:
static constexpr size_t kAtomicAlignment =
#if defined(__cpp_lib_hardware_interference_size)
std::hardware_destructive_interference_size;
#else
64;
#endif
SE_HOST_KernelThread Delinearize(uint64_t task_index);
HostKernel::TaskRunner task_runner_;
size_t num_tasks_;
SE_HOST_Kernel* kernel_;
SE_HOST_KernelThreadDim thread_dims_;
absl::InlinedVector<SE_HOST_KernelArg, 8> args_;
alignas(kAtomicAlignment) std::atomic<int64_t> counter_;
alignas(kAtomicAlignment) std::atomic<bool> abort_;
absl::Mutex abort_mutex_;
absl::Status abort_status_ ABSL_GUARDED_BY(abort_mutex_);
tsl::AsyncValueRef<LaunchEvent> event_;
};
}
HostKernel::HostKernel(std::shared_ptr<tsl::thread::ThreadPool> thread_pool)
: thread_pool_(thread_pool) {
}
HostKernel::HostKernel(unsigned arity, SE_HOST_Kernel* kernel,
std::shared_ptr<tsl::thread::ThreadPool> thread_pool)
: function_(std::make_unique<KernelFunctionPtr>(kernel)),
kernel_(function_->kernel()),
arity_(arity),
thread_pool_(thread_pool) {}
absl::Status HostKernel::Launch(
const ThreadDim& thread_dims,
absl::Span<const DeviceMemoryBase> buffers) const {
return Launch(thread_dims, ConvertBuffersToKernelArgs(buffers));
}
absl::Status HostKernel::Launch(
const ThreadDim& thread_dims,
absl::Span<const SE_HOST_KernelArg> args) const {
SE_HOST_KernelThreadDim kernel_thread_dims = {
thread_dims.x,
thread_dims.y,
thread_dims.z,
};
for (uint64_t z = 0; z < thread_dims.z; ++z) {
for (uint64_t y = 0; y < thread_dims.y; ++y) {
for (uint64_t x = 0; x < thread_dims.x; ++x) {
SE_HOST_KernelThread kernel_thread = {x, y, z};
SE_HOST_KernelCallFrame call_frame = {
&kernel_thread_dims, &kernel_thread, args.size(), args.data()};
SE_HOST_KernelError* error = (*kernel_)(&call_frame);
if (ABSL_PREDICT_FALSE(error != nullptr)) {
return absl::InternalError("Failed to call host kernel");
}
}
}
}
return absl::OkStatus();
}
tsl::AsyncValueRef<LaunchEvent> HostKernel::Launch(
const ThreadDim& thread_dims, absl::Span<const DeviceMemoryBase> buffers,
TaskRunner task_runner) const {
return Launch(thread_dims, ConvertBuffersToKernelArgs(buffers),
std::move(task_runner));
}
tsl::AsyncValueRef<LaunchEvent> HostKernel::Launch(
const ThreadDim& thread_dims, absl::Span<const SE_HOST_KernelArg> args,
TaskRunner task_runner) const {
size_t num_tasks = thread_dims.x * thread_dims.y * thread_dims.z;
CHECK_GT(num_tasks, 0) << "Number of tasks must be positive";
if (ABSL_PREDICT_TRUE(num_tasks == 1)) {
absl::Status launched = Launch(thread_dims, args);
return ABSL_PREDICT_TRUE(launched.ok())
? OkLaunchEvent()
: tsl::MakeErrorAsyncValueRef(std::move(launched));
}
auto state = std::make_unique<HostKernelExecuteState>(
std::move(task_runner), kernel_, thread_dims, args);
state->CallAsync(0, num_tasks);
auto execute_event = state->event();
execute_event.AndThen([state = std::move(state)] {});
return execute_event;
}
HostKernelExecuteState::HostKernelExecuteState(
HostKernel::TaskRunner task_runner, SE_HOST_Kernel kernel,
ThreadDim thread_dims, absl::Span<const SE_HOST_KernelArg> args)
: task_runner_(std::move(task_runner)),
num_tasks_(thread_dims.x * thread_dims.y * thread_dims.z),
kernel_(kernel),
thread_dims_({thread_dims.x, thread_dims.y, thread_dims.z}),
args_(args.begin(), args.end()),
counter_(num_tasks_),
abort_(false),
event_(tsl::MakeConstructedAsyncValueRef<LaunchEvent>()) {}
HostKernelExecuteState::~HostKernelExecuteState() {
auto cnt = counter_.load(std::memory_order_acquire);
DCHECK_EQ(cnt, 0) << "Host kernel execute state is destroyed before all "
"tasks are completed";
}
void HostKernelExecuteState::Notify(absl::Status status) {
if (ABSL_PREDICT_FALSE(!status.ok())) {
absl::MutexLock lock(&abort_mutex_);
abort_.store(true, std::memory_order_relaxed);
abort_status_.Update(std::move(status));
}
bool is_done = counter_.fetch_sub(1, std::memory_order_acq_rel) == 1;
if (ABSL_PREDICT_TRUE(!is_done)) return;
if (ABSL_PREDICT_FALSE(abort_.load(std::memory_order_relaxed))) {
auto take_abort_status = [&] {
absl::MutexLock lock(&abort_mutex_);
return std::move(abort_status_);
};
event_.SetError(take_abort_status());
} else {
event_.SetStateConcrete();
}
}
void HostKernelExecuteState::CallSync(uint64_t task_index) {
CHECK_LT(task_index, num_tasks_) << "Task index out of range";
if (ABSL_PREDICT_FALSE(abort_.load(std::memory_order_relaxed))) {
Notify(absl::OkStatus());
return;
}
SE_HOST_KernelThread kernel_thread = Delinearize(task_index);
SE_HOST_KernelCallFrame call_frame = {&thread_dims_, &kernel_thread,
args_.size(), args_.data()};
SE_HOST_KernelError* error = (*kernel_)(&call_frame);
if (ABSL_PREDICT_TRUE(error == nullptr)) {
Notify(absl::OkStatus());
} else {
Notify(absl::InternalError(
absl::StrFormat("Failed to call host kernel: x=%d, y=%d, z=%d",
kernel_thread.x, kernel_thread.y, kernel_thread.z)));
}
}
void HostKernelExecuteState::CallAsync(uint64_t start_index,
uint64_t end_index) {
CHECK_LT(start_index, end_index) << "Invalid task index range";
while (end_index - start_index > 1) {
uint64_t mid_index = (start_index + end_index) / 2;
task_runner_([self = this, mid_index, end_index] {
self->CallAsync(mid_index, end_index);
});
end_index = mid_index;
}
CallSync(start_index);
}
SE_HOST_KernelThread HostKernelExecuteState::Delinearize(uint64_t task_index) {
uint64_t stride_z = thread_dims_.y * thread_dims_.x;
uint64_t stride_y = thread_dims_.x;
uint64_t z = task_index / stride_z;
task_index = task_index % stride_z;
uint64_t y = task_index / stride_y;
task_index = task_index % stride_y;
uint64_t x = task_index;
return SE_HOST_KernelThread{x, y, z};
}
} | #include "xla/stream_executor/host/host_kernel.h"
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/types/span.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/host/host_kernel_c_api.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/platform/threadpool.h"
namespace stream_executor::host {
static SE_HOST_KernelError* AddI32(const SE_HOST_KernelCallFrame* call_frame) {
const SE_HOST_KernelArg& lhs = call_frame->args[0];
const SE_HOST_KernelArg& rhs = call_frame->args[1];
const SE_HOST_KernelArg& out = call_frame->args[2];
int32_t* lhs_ptr = reinterpret_cast<int32_t*>(lhs.data);
int32_t* rhs_ptr = reinterpret_cast<int32_t*>(rhs.data);
int32_t* out_ptr = reinterpret_cast<int32_t*>(out.data);
const auto zstep = call_frame->thread_dims->x * call_frame->thread_dims->y;
const auto ystep = call_frame->thread_dims->x;
uint64_t i = call_frame->thread->x + call_frame->thread->y * ystep +
call_frame->thread->z * zstep;
*(out_ptr + i) = *(lhs_ptr + i) + *(rhs_ptr + i);
return nullptr;
}
static const char* llvm_kernel_add = R"(
%SE_HOST_KernelCallFrame = type { ptr, ptr, i64, ptr }
%struct.SE_HOST_KernelArg = type { ptr, i64 }
define ptr @LlvmAddI32(ptr noundef %0) {
%2 = getelementptr inbounds %SE_HOST_KernelCallFrame, ptr %0, i32 0, i32 3
%3 = load ptr, ptr %2, align 8
%4 = getelementptr inbounds %struct.SE_HOST_KernelArg, ptr %3, i64 1
%5 = getelementptr inbounds %struct.SE_HOST_KernelArg, ptr %3, i64 2
%6 = load ptr, ptr %3, align 8
%7 = load ptr, ptr %4, align 8
%8 = load ptr, ptr %5, align 8
%9 = getelementptr inbounds %SE_HOST_KernelCallFrame, ptr %0, i32 0, i32 1
%10 = load ptr, ptr %9, align 8
%11 = load i64, ptr %10, align 8
%12 = getelementptr inbounds i32, ptr %6, i64 %11
%13 = load i32, ptr %12, align 4
%14 = getelementptr inbounds i32, ptr %7, i64 %11
%15 = load i32, ptr %14, align 4
%16 = add nsw i32 %13, %15
%17 = getelementptr inbounds i32, ptr %8, i64 %11
store i32 %16, ptr %17, align 4
ret ptr null
}
)";
static absl::StatusOr<StreamExecutor*> NewStreamExecutor() {
TF_ASSIGN_OR_RETURN(auto platform, PlatformManager::PlatformWithName("Host"));
TF_ASSIGN_OR_RETURN(auto stream_exec,
platform->ExecutorForDevice(0));
return stream_exec;
}
TEST(HostKernelTest, InternalAddition1D) {
auto tp = std::make_shared<tsl::thread::ThreadPool>(tsl::Env::Default(),
"XLAEigen", 2);
HostKernel kernel(3, AddI32, tp);
std::vector<int32_t> lhs = {1, 2, 3, 4};
std::vector<int32_t> rhs = {5, 6, 7, 8};
std::vector<int32_t> out = {0, 0, 0, 0};
DeviceMemoryBase lhs_mem(lhs.data(), lhs.size() * sizeof(int32_t));
DeviceMemoryBase rhs_mem(rhs.data(), rhs.size() * sizeof(int32_t));
DeviceMemoryBase out_mem(out.data(), out.size() * sizeof(int32_t));
std::vector<DeviceMemoryBase> args = {lhs_mem, rhs_mem, out_mem};
TF_ASSERT_OK(kernel.Launch(ThreadDim(4), args));
std::vector<int32_t> expected = {6, 8, 10, 12};
EXPECT_EQ(out, expected);
}
TEST(HostKernelTest, InternalAddition3D) {
auto tp = std::make_shared<tsl::thread::ThreadPool>(tsl::Env::Default(),
"XLAEigen", 2);
HostKernel kernel(3, AddI32, tp);
std::vector<int32_t> lhs = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
std::vector<int32_t> rhs = {10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21};
std::vector<int32_t> out = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
DeviceMemoryBase lhs_mem(lhs.data(), lhs.size() * sizeof(int32_t));
DeviceMemoryBase rhs_mem(rhs.data(), rhs.size() * sizeof(int32_t));
DeviceMemoryBase out_mem(out.data(), out.size() * sizeof(int32_t));
std::vector<DeviceMemoryBase> args = {lhs_mem, rhs_mem, out_mem};
TF_ASSERT_OK(kernel.Launch(ThreadDim(2, 2, 3), args));
std::vector<int32_t> expected = {11, 13, 15, 17, 19, 21,
23, 25, 27, 29, 31, 33};
EXPECT_EQ(out, expected);
}
TEST(HostKernelTest, Addition3D) {
std::vector<int32_t> lhs = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
std::vector<int32_t> rhs = {10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21};
std::vector<int32_t> out = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
DeviceMemoryBase lhs_mem(lhs.data(), lhs.size() * sizeof(int32_t));
DeviceMemoryBase rhs_mem(rhs.data(), rhs.size() * sizeof(int32_t));
DeviceMemoryBase out_mem(out.data(), out.size() * sizeof(int32_t));
std::vector<DeviceMemoryBase> args = {lhs_mem, rhs_mem, out_mem};
MultiKernelLoaderSpec spec(3);
spec.AddInProcessSymbol(reinterpret_cast<void*>(AddI32), "Addition_kernel");
TF_ASSERT_OK_AND_ASSIGN(auto executor, NewStreamExecutor());
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
TF_ASSERT_OK_AND_ASSIGN(auto add, executor->LoadKernel(spec));
const KernelArgsDeviceMemoryArray kargs{args, 0};
TF_ASSERT_OK(stream->Launch(ThreadDim(2, 2, 3), BlockDim(1), *add, kargs));
std::vector<int32_t> expected = {11, 13, 15, 17, 19, 21,
23, 25, 27, 29, 31, 33};
EXPECT_EQ(out, expected);
}
TEST(HostKernelTest, JitAddition) {
std::vector<int32_t> lhs = {1, 2, 3, 4};
std::vector<int32_t> rhs = {5, 6, 7, 8};
std::vector<int32_t> out = {0, 0, 0, 0};
DeviceMemoryBase lhs_mem(lhs.data(), lhs.size() * sizeof(int32_t));
DeviceMemoryBase rhs_mem(rhs.data(), rhs.size() * sizeof(int32_t));
DeviceMemoryBase out_mem(out.data(), out.size() * sizeof(int32_t));
std::vector<DeviceMemoryBase> args = {lhs_mem, rhs_mem, out_mem};
MultiKernelLoaderSpec spec(3);
spec.AddLlvmHostKernel(llvm_kernel_add, "LlvmAddI32", "LlvmAddI32",
absl::Span<std::string>());
TF_ASSERT_OK_AND_ASSIGN(auto executor, NewStreamExecutor());
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
TF_ASSERT_OK_AND_ASSIGN(auto add, executor->LoadKernel(spec));
const KernelArgsDeviceMemoryArray kargs{args, 0};
TF_ASSERT_OK(stream->Launch(ThreadDim(4), BlockDim(1), *add, kargs));
std::vector<int32_t> expected = {6, 8, 10, 12};
EXPECT_EQ(out, expected);
}
TEST(HostKernelTest, LaunchAsync) {
auto* no_op = +[](const SE_HOST_KernelCallFrame*) {
return static_cast<SE_HOST_KernelError*>(nullptr);
};
auto thread_pool = std::make_shared<tsl::thread::ThreadPool>(
tsl::Env::Default(), "benchmark", tsl::port::MaxParallelism());
std::atomic<size_t> num_tasks = 0;
HostKernel::TaskRunner runner = [&](HostKernel::Task task) {
num_tasks.fetch_add(1, std::memory_order_relaxed);
thread_pool->Schedule(std::move(task));
};
HostKernel host_kernel(0, no_op);
auto event = host_kernel.Launch(ThreadDim(4, 4, 4),
absl::Span<const SE_HOST_KernelArg>(),
std::move(runner));
tsl::BlockUntilReady(event);
EXPECT_TRUE(event.IsConcrete());
EXPECT_EQ(num_tasks.load(std::memory_order_relaxed), 4 * 4 * 4 - 1);
}
TEST(HostKernelTest, LaunchAsyncError) {
auto* maybe_error = +[](const SE_HOST_KernelCallFrame* call_frame) {
if (call_frame->thread->x == 2 && call_frame->thread->z == 2) {
return reinterpret_cast<SE_HOST_KernelError*>(0xDEADBEEF);
}
return static_cast<SE_HOST_KernelError*>(nullptr);
};
auto thread_pool = std::make_shared<tsl::thread::ThreadPool>(
tsl::Env::Default(), "benchmark", tsl::port::MaxParallelism());
std::atomic<size_t> num_tasks = 0;
HostKernel::TaskRunner runner = [&](HostKernel::Task task) {
num_tasks.fetch_add(1, std::memory_order_relaxed);
thread_pool->Schedule(std::move(task));
};
HostKernel host_kernel(0, maybe_error);
auto event = host_kernel.Launch(ThreadDim(4, 4, 4),
absl::Span<const SE_HOST_KernelArg>(),
std::move(runner));
tsl::BlockUntilReady(event);
ASSERT_TRUE(event.IsError());
EXPECT_TRUE(absl::StrContains(event.GetError().message(),
"Failed to call host kernel:"));
EXPECT_EQ(num_tasks.load(std::memory_order_relaxed), 4 * 4 * 4 - 1);
}
static SE_HOST_KernelError* NoOp(const SE_HOST_KernelCallFrame*) {
return nullptr;
}
static void BM_HostKernelSyncLaunch(benchmark::State& state) {
int32_t tdim_x = state.range(0);
HostKernel kernel(0, NoOp);
absl::Span<const SE_HOST_KernelArg> args;
for (auto _ : state) {
benchmark::DoNotOptimize(kernel.Launch(ThreadDim(tdim_x), args));
}
}
static void BM_HostKernelAsyncLaunch(benchmark::State& state) {
int32_t tdim_x = state.range(0);
auto thread_pool = std::make_shared<tsl::thread::ThreadPool>(
tsl::Env::Default(), "benchmark", tsl::port::MaxParallelism());
auto task_runner = [&thread_pool](HostKernel::Task task) {
thread_pool->Schedule(std::move(task));
};
HostKernel kernel(0, NoOp);
absl::Span<const SE_HOST_KernelArg> args;
for (auto _ : state) {
auto event = kernel.Launch(ThreadDim(tdim_x), args, task_runner);
tsl::BlockUntilReady(event);
}
}
BENCHMARK(BM_HostKernelSyncLaunch)
->MeasureProcessCPUTime()
->Arg(1)
->Arg(4)
->Arg(8)
->Arg(16)
->Arg(32)
->Arg(64);
BENCHMARK(BM_HostKernelAsyncLaunch)
->MeasureProcessCPUTime()
->Arg(1)
->Arg(4)
->Arg(8)
->Arg(16)
->Arg(32)
->Arg(64);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/host/host_kernel.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/host/host_kernel_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
67d9e88d-80b3-4696-a5ac-0ca0737cf6fa | cpp | tensorflow/tensorflow | cuda_collectives | third_party/xla/xla/stream_executor/cuda/cuda_collectives.cc | third_party/xla/xla/stream_executor/cuda/cuda_collectives_test.cc | #include "xla/stream_executor/cuda/cuda_collectives.h"
#include <cstdint>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "third_party/nccl/nccl.h"
#include "xla/stream_executor/gpu/context.h"
#include "xla/stream_executor/gpu/scoped_activate_context.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/numbers.h"
namespace stream_executor::gpu {
absl::StatusOr<void*> CudaCollectives::CollectiveMemoryAllocate(
Context* context, uint64_t bytes) {
if (bytes == 0) return nullptr;
ScopedActivateContext activated(context);
void* ptr = nullptr;
ncclResult_t res = ncclMemAlloc(&ptr, bytes);
if (res != ncclSuccess) {
return absl::InternalError(absl::StrFormat(
"failed to allocate %s (%llu bytes) from device collective memory: %s, "
"Last NCCL warning(error) log entry (may be unrelated): %s",
tsl::strings::HumanReadableNumBytes(bytes), bytes,
ncclGetErrorString(res), ncclGetLastError(nullptr)));
}
VLOG(2) << "Allocated collective memory " << ptr << " for context " << context
<< " of " << bytes << " bytes";
return ptr;
}
absl::Status CudaCollectives::CollectiveMemoryDeallocate(
Context* context, void* location) {
ScopedActivateContext activation(context);
ncclResult_t res = ncclMemFree(location);
if (res != ncclSuccess) {
return absl::InternalError(absl::StrFormat(
"failed to free device collective memory at %p; result: %s, Last NCCL "
"warning(error) log entry (may be unrelated): %s",
location, ncclGetErrorString(res), ncclGetLastError(nullptr)));
}
VLOG(2) << "Deallocated collective memory " << location << " for context "
<< context;
return absl::OkStatus();
}
} | #include "xla/stream_executor/cuda/cuda_collectives.h"
#include <cstddef>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/service/gpu/runtime/nccl_api.h"
#include "xla/stream_executor/gpu/gpu_executor.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace stream_executor::gpu {
namespace {
using ::tsl::testing::IsOk;
using ::tsl::testing::IsOkAndHolds;
TEST(CudaCollectivesTest, CollectiveMemoryAllocation) {
if (!xla::gpu::NcclApi::HasNcclSupport()) {
GTEST_SKIP() << "Compiled without NCCL support";
}
TF_ASSERT_OK_AND_ASSIGN(Platform * platform,
PlatformManager::PlatformWithName("CUDA"));
TF_ASSERT_OK_AND_ASSIGN(StreamExecutor * executor,
platform->ExecutorForDevice(0));
GpuExecutor* gpu_executor = ExtractGpuExecutor(executor);
constexpr size_t kAllocateSize = 1024;
TF_ASSERT_OK_AND_ASSIGN(void* memory,
CudaCollectives::CollectiveMemoryAllocate(
gpu_executor->gpu_context(), kAllocateSize));
EXPECT_THAT(gpu_executor->GetPointerMemorySpace(memory),
IsOkAndHolds(MemoryType::kDevice));
EXPECT_THAT(CudaCollectives::CollectiveMemoryDeallocate(
gpu_executor->gpu_context(), memory),
IsOk());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/cuda_collectives.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/cuda_collectives_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
603e6420-5efb-4b8e-9e71-70db41222d1d | cpp | tensorflow/tensorflow | cuda_kernel | third_party/xla/xla/stream_executor/cuda/cuda_kernel.cc | third_party/xla/xla/stream_executor/cuda/cuda_kernel_test.cc | #include "xla/stream_executor/cuda/cuda_kernel.h"
#include <cstddef>
#include <cstdint>
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/launch_dim.h"
namespace stream_executor {
namespace gpu {
absl::StatusOr<int32_t> CudaKernel::GetMaxOccupiedBlocksPerCore(
ThreadDim threads, size_t dynamic_shared_memory_bytes) const {
int32_t threads_per_block = threads.x * threads.y * threads.z;
VLOG(3) << "Get kernel block occupancy: " << name()
<< "; threads_per_block: " << threads_per_block
<< "; dynamic_shared_memory_bytes: " << dynamic_shared_memory_bytes;
return GpuDriver::GetMaxOccupiedBlocksPerCore(
gpu_executor_->gpu_context(), gpu_function_, threads_per_block,
dynamic_shared_memory_bytes);
}
}
} | #include "xla/stream_executor/cuda/cuda_kernel.h"
#include <gtest/gtest.h>
#include "third_party/gpus/cuda/include/cuda.h"
#include "xla/stream_executor/cuda/cuda_runtime.h"
#include "xla/stream_executor/gpu/gpu_executor.h"
#include "xla/stream_executor/gpu/gpu_test_kernels.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace stream_executor::gpu {
namespace {
using testing::Ge;
using tsl::testing::IsOkAndHolds;
TEST(CudaKernelTest, GetMaxOccupiedBlocksPerCore) {
TF_ASSERT_OK_AND_ASSIGN(Platform * platform,
PlatformManager::PlatformWithName("CUDA"));
TF_ASSERT_OK_AND_ASSIGN(StreamExecutor * executor,
platform->ExecutorForDevice(0));
GpuExecutor* gpu_executor = ExtractGpuExecutor(executor);
CudaKernel cuda_kernel(gpu_executor);
cuda_kernel.set_arity(3);
TF_ASSERT_OK_AND_ASSIGN(
CUfunction function,
CudaRuntime::GetFuncBySymbol(internal::GetAddI32Kernel()));
cuda_kernel.set_gpu_function(function);
EXPECT_EQ(cuda_kernel.Arity(), 3);
EXPECT_EQ(cuda_kernel.gpu_function(), function);
EXPECT_THAT(cuda_kernel.GetMaxOccupiedBlocksPerCore(
ThreadDim(1, 1, 1), 0),
IsOkAndHolds(Ge(1)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/cuda_kernel.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/cuda_kernel_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
504bdc2c-48c0-44c6-9489-d2383b5d9d67 | cpp | tensorflow/tensorflow | cuda_platform | third_party/xla/xla/stream_executor/cuda/cuda_platform.cc | third_party/xla/xla/stream_executor/cuda/cuda_platform_test.cc | #include "xla/stream_executor/cuda/cuda_platform.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/stream_executor/cuda/cuda_executor.h"
#include "xla/stream_executor/cuda/cuda_platform_id.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform/initialize.h"
#include "xla/stream_executor/platform_manager.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
namespace stream_executor {
namespace gpu {
CudaPlatform::CudaPlatform() : name_("CUDA") {}
Platform::Id CudaPlatform::id() const { return cuda::kCudaPlatformId; }
int CudaPlatform::VisibleDeviceCount() const {
static const int num_devices = [] {
if (!GpuDriver::Init().ok()) return -1;
return GpuDriver::GetDeviceCount();
}();
return num_devices;
}
const std::string& CudaPlatform::Name() const { return name_; }
absl::StatusOr<std::unique_ptr<DeviceDescription>>
CudaPlatform::DescriptionForDevice(int ordinal) const {
return CudaExecutor::CreateDeviceDescription(ordinal);
}
absl::StatusOr<StreamExecutor*> CudaPlatform::ExecutorForDevice(int ordinal) {
return executor_cache_.GetOrCreate(
ordinal, [this, ordinal]() { return GetUncachedExecutor(ordinal); });
}
absl::StatusOr<StreamExecutor*> CudaPlatform::FindExisting(int ordinal) {
return executor_cache_.Get(ordinal);
}
absl::StatusOr<std::unique_ptr<StreamExecutor>>
CudaPlatform::GetUncachedExecutor(int ordinal) {
auto executor = std::make_unique<CudaExecutor>(this, ordinal);
TF_RETURN_IF_ERROR(executor->Init());
return std::move(executor);
}
}
static void InitializeCudaPlatform() {
TF_CHECK_OK(
PlatformManager::RegisterPlatform(std::make_unique<gpu::CudaPlatform>()));
}
}
STREAM_EXECUTOR_REGISTER_MODULE_INITIALIZER(
cuda_platform, stream_executor::InitializeCudaPlatform()); | #include "xla/stream_executor/cuda/cuda_platform.h"
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace stream_executor::gpu {
namespace {
TEST(CudaPlatformTest, FindExistingWorks) {
TF_ASSERT_OK_AND_ASSIGN(Platform * platform,
PlatformManager::PlatformWithName("CUDA"));
CHECK_GT(platform->VisibleDeviceCount(), 0);
for (int i = 0; i < platform->VisibleDeviceCount(); ++i) {
EXPECT_FALSE(platform->FindExisting(i).ok());
}
absl::flat_hash_map<int, StreamExecutor*> executors;
for (int i = 0; i < platform->VisibleDeviceCount(); ++i) {
TF_ASSERT_OK_AND_ASSIGN(auto executor, platform->ExecutorForDevice(i));
executors[i] = executor;
}
EXPECT_EQ(executors.size(), platform->VisibleDeviceCount());
for (int i = 0; i < platform->VisibleDeviceCount(); ++i) {
TF_ASSERT_OK_AND_ASSIGN(auto executor, platform->FindExisting(i));
EXPECT_EQ(executor, executors[i]);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/cuda_platform.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/cuda_platform_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c4ea70b1-9d47-4f07-af1c-0c17c795a89a | cpp | tensorflow/tensorflow | cuda_executor | third_party/xla/xla/stream_executor/cuda/cuda_executor.cc | third_party/xla/xla/stream_executor/cuda/cuda_executor_test.cc | #include "xla/stream_executor/cuda/cuda_executor.h"
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <variant>
#include "absl/log/check.h"
#include "absl/numeric/int128.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "third_party/gpus/cuda/include/cuda.h"
#include "xla/stream_executor/blas.h"
#include "xla/stream_executor/command_buffer.h"
#include "xla/stream_executor/cuda/cuda_collectives.h"
#include "xla/stream_executor/cuda/cuda_event.h"
#include "xla/stream_executor/cuda/cuda_kernel.h"
#include "xla/stream_executor/cuda/cuda_platform_id.h"
#include "xla/stream_executor/cuda/cuda_runtime.h"
#include "xla/stream_executor/cuda/cuda_status.h"
#include "xla/stream_executor/cuda/cuda_version_parser.h"
#include "xla/stream_executor/cuda/delay_kernel.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/event.h"
#include "xla/stream_executor/event_based_timer.h"
#include "xla/stream_executor/fft.h"
#include "xla/stream_executor/gpu/context.h"
#include "xla/stream_executor/gpu/gpu_command_buffer.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/gpu/gpu_event.h"
#include "xla/stream_executor/gpu/gpu_kernel.h"
#include "xla/stream_executor/gpu/gpu_semaphore.h"
#include "xla/stream_executor/gpu/gpu_stream.h"
#include "xla/stream_executor/gpu/gpu_timer.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/gpu/read_numa_node.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/module_spec.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/plugin_registry.h"
#include "xla/stream_executor/semantic_version.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/fingerprint.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace stream_executor {
namespace gpu {
namespace {
bool ShouldLaunchDelayKernel() {
static bool value = [] {
const char* blocking = std::getenv("CUDA_LAUNCH_BLOCKING");
return !blocking || std::string_view{blocking} != "1";
}();
return value;
}
absl::Status FuncGetAttribute(CUfunction_attribute attribute, CUfunction func,
int* attribute_value) {
return cuda::ToStatus(
cuFuncGetAttribute(attribute_value, attribute, func),
absl::StrCat("Failed to query kernel attribute: ", attribute));
}
}
static CUdeviceptr AsCudaDevicePtr(const DeviceMemoryBase& gpu_mem) {
return reinterpret_cast<CUdeviceptr>(gpu_mem.opaque());
}
static CUdeviceptr AsCudaDevicePtr(DeviceMemoryBase* gpu_mem) {
return AsCudaDevicePtr(*gpu_mem);
}
CudaExecutor::~CudaExecutor() {
CHECK(kernel_to_gpu_binary_.empty()) << "GpuExecutor has live kernels.";
CHECK(gpu_binary_to_module_.empty()) << "GpuExecutor has loaded modules.";
if (gpu_context() != nullptr) {
GpuDriver::DestroyContext(gpu_context());
}
}
absl::Status CudaExecutor::Init() {
TF_RETURN_IF_ERROR(GpuDriver::Init());
TF_RETURN_IF_ERROR(GpuDriver::GetDevice(device_ordinal(), &device_));
Context* context;
TF_RETURN_IF_ERROR(
GpuDriver::CreateContext(device_ordinal(), device_, &context));
set_context(context);
TF_RETURN_IF_ERROR(
GpuDriver::GetComputeCapability(&cc_major_, &cc_minor_, device_));
TF_ASSIGN_OR_RETURN(delay_kernels_supported_, DelayKernelIsSupported());
return absl::OkStatus();
}
absl::StatusOr<bool> CudaExecutor::DelayKernelIsSupported() {
TF_ASSIGN_OR_RETURN(int status,
GpuDriver::GetDeviceAttribute(
CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING, device_));
return static_cast<bool>(status);
}
absl::Status CudaExecutor::LoadModuleFromCuBin(const char* cubin,
CUmodule* module) {
uint64_t module_refcount;
std::tie(*module, module_refcount) = gpu_binary_to_module_[cubin];
if (*module == nullptr) {
TF_RETURN_IF_ERROR(GpuDriver::LoadCubin(gpu_context(), cubin, module));
module_refcount = 1;
VLOG(3) << "Loaded CUBIN " << static_cast<const void*>(cubin)
<< " as module " << *module;
} else {
++module_refcount;
VLOG(3) << "CUBIN " << static_cast<const void*>(cubin)
<< " is already loaded as module " << *module;
}
gpu_binary_to_module_[cubin] = {*module, module_refcount};
return absl::OkStatus();
}
absl::Status CudaExecutor::LoadModuleFromPtx(const char* ptx,
CUmodule* module) {
uint64_t module_refcount;
std::tie(*module, module_refcount) = gpu_binary_to_module_[ptx];
if (*module == nullptr) {
TF_RETURN_IF_ERROR(GpuDriver::LoadPtx(gpu_context(), ptx, module));
VLOG(3) << "Loaded PTX " << static_cast<const void*>(ptx) << " as module "
<< *module;
module_refcount = 1;
} else {
++module_refcount;
VLOG(3) << "PTX " << static_cast<const void*>(ptx)
<< " is already loaded as module " << module;
}
gpu_binary_to_module_[ptx] = {*module, module_refcount};
return absl::OkStatus();
}
absl::Status CudaExecutor::LoadModuleFromHsaco(const char* hsaco,
CUmodule* module) {
return absl::InternalError(
"Feature not supported on CUDA platform (LoadModuleFromHsaco)");
}
absl::StatusOr<std::unique_ptr<Kernel>> CudaExecutor::LoadKernel(
const MultiKernelLoaderSpec& spec) {
auto cuda_kernel = std::make_unique<CudaKernel>(this);
CUmodule module;
const std::string* kernel_name;
if (spec.has_cuda_cubin_in_memory()) {
absl::MutexLock lock{&in_memory_modules_mu_};
kernel_name = &spec.cuda_cubin_in_memory().kernel_name();
const char* cubin = reinterpret_cast<const char*>(
spec.cuda_cubin_in_memory().cubin_bytes().data());
TF_RETURN_IF_ERROR(LoadModuleFromCuBin(cubin, &module));
kernel_to_gpu_binary_[cuda_kernel.get()] = cubin;
} else if (spec.has_cuda_ptx_in_memory()) {
kernel_name = &spec.cuda_ptx_in_memory().kernel_name();
if (cc_major_ == 0 && cc_minor_ == 0) {
return absl::InternalError("Compute capability not set");
}
const char* ptx = spec.cuda_ptx_in_memory().text(cc_major_, cc_minor_);
if (ptx == nullptr) {
ptx = spec.cuda_ptx_in_memory().default_text();
}
if (ptx == nullptr) {
LOG(FATAL) << "Loader spec has no ptx for kernel " << *kernel_name;
}
absl::MutexLock lock{&in_memory_modules_mu_};
TF_RETURN_IF_ERROR(LoadModuleFromPtx(ptx, &module));
kernel_to_gpu_binary_[cuda_kernel.get()] = ptx;
} else if (spec.has_in_process_symbol()) {
kernel_name = &spec.in_process_symbol().kernel_name();
void* symbol = spec.in_process_symbol().symbol();
VLOG(2) << "Resolve CUDA kernel " << *kernel_name
<< " from symbol pointer: " << symbol;
TF_ASSIGN_OR_RETURN(
GpuFunctionHandle function,
CudaRuntime::GetFuncBySymbol(spec.in_process_symbol().symbol()));
cuda_kernel->set_gpu_function(function);
} else {
return absl::InternalError("No method of loading CUDA kernel provided");
}
VLOG(3) << "LoadKernel on kernel : " << *kernel_name;
if (!spec.has_in_process_symbol()) {
VLOG(2) << "getting function " << *kernel_name << " from module " << module;
GpuFunctionHandle function;
TF_RETURN_IF_ERROR(GpuDriver::GetModuleFunction(
gpu_context(), module, kernel_name->c_str(), &function));
cuda_kernel->set_gpu_function(function);
}
cuda_kernel->set_name(*kernel_name);
cuda_kernel->set_arity(spec.arity());
KernelMetadata kernel_metadata;
TF_RETURN_IF_ERROR(GetKernelMetadata(cuda_kernel.get(), &kernel_metadata));
cuda_kernel->set_metadata(kernel_metadata);
cuda_kernel->set_name(*kernel_name);
cuda_kernel->set_args_packing(spec.kernel_args_packing());
return std::move(cuda_kernel);
}
absl::StatusOr<std::unique_ptr<EventBasedTimer>>
CudaExecutor::CreateEventBasedTimer(GpuStream* stream, bool use_delay_kernel) {
GpuSemaphore semaphore{};
if (use_delay_kernel && ShouldLaunchDelayKernel() &&
delay_kernels_supported_) {
TF_ASSIGN_OR_RETURN(semaphore, LaunchDelayKernel(stream));
}
TF_ASSIGN_OR_RETURN(auto start_event, CreateGpuEvent(true));
TF_ASSIGN_OR_RETURN(auto stop_event, CreateGpuEvent(true));
TF_RETURN_IF_ERROR(start_event->Record(stream->gpu_stream()));
return std::make_unique<GpuTimer>(gpu_context(), std::move(start_event),
std::move(stop_event), stream,
std::move(semaphore));
}
bool CudaExecutor::UnloadGpuBinary(const void* gpu_binary) {
auto module_it = gpu_binary_to_module_.find(gpu_binary);
if (gpu_binary_to_module_.end() == module_it) {
VLOG(3) << "No loaded CUDA module for " << gpu_binary;
return false;
}
auto& module = module_it->second.first;
auto& refcount = module_it->second.second;
VLOG(3) << "Found CUDA module " << module << " with refcount " << refcount;
if (--refcount == 0) {
VLOG(3) << "Unloading CUDA module " << module;
GpuDriver::UnloadModule(gpu_context(), module);
gpu_binary_to_module_.erase(module_it);
}
return true;
}
void CudaExecutor::UnloadKernel(const Kernel* kernel) {
VLOG(3) << "Unloading kernel " << kernel << " : " << kernel->name();
absl::MutexLock lock{&in_memory_modules_mu_};
auto gpu_binary_it = kernel_to_gpu_binary_.find(kernel);
if (kernel_to_gpu_binary_.end() == gpu_binary_it) {
VLOG(3) << "Kernel " << kernel << " : " << kernel->name()
<< " has never been loaded.";
return;
}
VLOG(3) << "Kernel " << kernel << " : " << kernel->name()
<< " has loaded GPU code " << gpu_binary_it->second;
UnloadGpuBinary(gpu_binary_it->second);
kernel_to_gpu_binary_.erase(gpu_binary_it);
}
absl::Status CudaExecutor::LoadModule(const MultiModuleLoaderSpec& spec,
ModuleHandle* module_handle) {
CUmodule cu_module;
if (spec.has_cuda_cubin_in_memory()) {
absl::MutexLock lock{&in_memory_modules_mu_};
TF_RETURN_IF_ERROR(LoadModuleFromCuBin(
reinterpret_cast<const char*>(spec.cuda_cubin_in_memory().data()),
&cu_module));
*module_handle = ModuleHandle(const_cast<void*>(
static_cast<const void*>(spec.cuda_cubin_in_memory().data())));
return absl::OkStatus();
} else if (spec.has_cuda_ptx_in_memory()) {
if (cc_major_ == 0 && cc_minor_ == 0) {
return absl::InternalError("Compute capability not set");
}
if (!spec.cuda_ptx_in_memory()) {
return absl::InternalError("PTX not found in spec");
}
absl::MutexLock lock{&in_memory_modules_mu_};
TF_RETURN_IF_ERROR(
LoadModuleFromPtx(spec.cuda_ptx_in_memory(), &cu_module));
*module_handle = ModuleHandle(
const_cast<void*>(static_cast<const void*>(spec.cuda_ptx_in_memory())));
return absl::OkStatus();
}
return absl::InternalError("No method of loading CUDA module provided");
}
bool CudaExecutor::UnloadModule(ModuleHandle module_handle) {
const char* gpu_binary = reinterpret_cast<const char*>(module_handle.id());
absl::MutexLock lock{&in_memory_modules_mu_};
return UnloadGpuBinary(gpu_binary);
}
namespace {
absl::uint128 Fingerprint128(const absl::string_view s) {
auto fp = tsl::Fingerprint128(s);
return absl::MakeUint128(fp.high64, fp.low64);
}
int fpus_per_core(int cc_major, int cc_minor) {
int n = 128;
if (cc_major == 3) {
n = 192;
} else if ((cc_major == 6 && cc_minor == 0) || (cc_major == 7) ||
(cc_major == 8 && cc_minor == 0)) {
n = 64;
}
return n;
}
}
absl::StatusOr<std::shared_ptr<DeviceMemoryBase>>
CudaExecutor::CreateOrShareConstant(Stream* stream,
absl::Span<const uint8_t> content) {
absl::MutexLock lock{&shared_constants_mu_};
absl::uint128 fingerprint = Fingerprint128(absl::string_view(
reinterpret_cast<const char*>(content.data()), content.size()));
auto insert_result = shared_constants_.insert(
{fingerprint, std::weak_ptr<DeviceMemoryBase>()});
auto it = insert_result.first;
bool was_already_in_cache = !insert_result.second;
std::shared_ptr<DeviceMemoryBase> shared_constant;
if (was_already_in_cache) {
shared_constant = it->second.lock();
}
if (shared_constant == nullptr) {
auto new_constant = std::make_unique<DeviceMemoryBase>(
Allocate(content.size(), 0));
if (new_constant->opaque() == nullptr) {
return absl::InternalError(absl::StrFormat(
"Failed to allocate %d bytes for new constant", content.size()));
}
TF_RETURN_IF_ERROR(
stream->Memcpy(new_constant.get(), content.data(), content.size()));
absl::Status status = stream->BlockHostUntilDone();
if (!status.ok()) {
Deallocate(new_constant.get());
status.Update(absl::InternalError(absl::StrFormat(
"Memcpy to device address %p failed", new_constant->opaque())));
return status;
}
shared_constant = std::shared_ptr<DeviceMemoryBase>(
new_constant.release(), [this](DeviceMemoryBase* p) {
Deallocate(p);
delete p;
});
it->second = std::weak_ptr<DeviceMemoryBase>(shared_constant);
}
return shared_constant;
}
absl::Status CudaExecutor::GetKernelMetadata(GpuKernel* cuda_kernel,
KernelMetadata* kernel_metadata) {
int value;
TF_RETURN_IF_ERROR(FuncGetAttribute(CU_FUNC_ATTRIBUTE_NUM_REGS,
cuda_kernel->gpu_function(), &value));
kernel_metadata->set_registers_per_thread(value);
TF_RETURN_IF_ERROR(FuncGetAttribute(CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES,
cuda_kernel->gpu_function(), &value));
kernel_metadata->set_shared_memory_bytes(value);
return absl::OkStatus();
}
DeviceMemoryBase CudaExecutor::Allocate(uint64_t size, int64_t memory_space) {
if (memory_space == 1) {
auto result =
CudaCollectives::CollectiveMemoryAllocate(gpu_context(), size);
if (!result.ok()) {
LOG(ERROR) << result.status();
}
return DeviceMemoryBase(nullptr, 0);
} else if (memory_space ==
static_cast<int64_t>(stream_executor::MemoryType::kHost)) {
return DeviceMemoryBase(GpuDriver::HostAllocate(gpu_context(), size), size);
}
CHECK_EQ(memory_space, 0);
return DeviceMemoryBase(GpuDriver::DeviceAllocate(gpu_context(), size), size);
}
void CudaExecutor::Deallocate(DeviceMemoryBase* mem) {
auto status_or_memory_space = GetPointerMemorySpace(mem->opaque());
if (!status_or_memory_space.ok()) {
LOG(ERROR) << status_or_memory_space.status();
return;
}
auto memory_space = status_or_memory_space.value();
if (memory_space == MemoryType::kHost) {
GpuDriver::HostDeallocate(gpu_context(), mem->opaque());
} else {
GpuDriver::DeviceDeallocate(gpu_context(), mem->opaque());
}
}
bool CudaExecutor::SynchronizeAllActivity() {
return GpuDriver::SynchronizeContext(gpu_context()).ok();
}
bool CudaExecutor::HostMemoryRegister(void* location, uint64_t size) {
VLOG(1) << "Called StreamExecutor::HostMemoryRegister(data=" << location
<< ")";
return GpuDriver::HostRegister(gpu_context(), location, size);
}
bool CudaExecutor::HostMemoryUnregister(void* location) {
VLOG(1) << "Called StreamExecutor::HostUnregister(data=" << location << ")";
return GpuDriver::HostUnregister(gpu_context(), location);
}
absl::Status CudaExecutor::SynchronousMemZero(DeviceMemoryBase* location,
uint64_t size) {
if (reinterpret_cast<uintptr_t>(location->opaque()) % 4 == 0 &&
size % 4 == 0) {
return GpuDriver::SynchronousMemsetUint32(
gpu_context(), AsCudaDevicePtr(location), 0x0, size / 4);
}
return GpuDriver::SynchronousMemsetUint8(
gpu_context(), AsCudaDevicePtr(location), 0x0, size);
}
absl::Status CudaExecutor::SynchronousMemcpy(DeviceMemoryBase* gpu_dst,
const void* host_src,
uint64_t size) {
return GpuDriver::SynchronousMemcpyH2D(
gpu_context(), AsCudaDevicePtr(gpu_dst), host_src, size);
}
absl::Status CudaExecutor::SynchronousMemcpy(void* host_dst,
const DeviceMemoryBase& gpu_src,
uint64_t size) {
return GpuDriver::SynchronousMemcpyD2H(gpu_context(), host_dst,
AsCudaDevicePtr(gpu_src), size);
}
void CudaExecutor::DeallocateStream(Stream* stream) {
{
absl::MutexLock lock(&mu_);
if (dnn_ != nullptr) {
dnn_->NotifyStreamDestroyed(stream);
}
}
GpuStream* gpu_stream = AsGpuStream(stream);
absl::MutexLock l(&alive_gpu_streams_mu_);
alive_gpu_streams_.erase(gpu_stream->gpu_stream());
}
absl::Status CudaExecutor::BlockHostUntilDone(Stream* stream) {
return GpuDriver::SynchronizeStream(gpu_context(), AsGpuStreamValue(stream));
}
blas::BlasSupport* CudaExecutor::AsBlas() {
absl::MutexLock lock(&mu_);
if (blas_ != nullptr) {
return blas_.get();
}
PluginRegistry* registry = PluginRegistry::Instance();
absl::StatusOr<PluginRegistry::BlasFactory> status =
registry->GetFactory<PluginRegistry::BlasFactory>(cuda::kCudaPlatformId);
if (!status.ok()) {
LOG(ERROR) << "Unable to retrieve BLAS factory: "
<< status.status().message();
return nullptr;
}
auto blas = status.value()(this);
blas_.reset(blas);
return blas_.get();
}
dnn::DnnSupport* CudaExecutor::AsDnn() {
absl::MutexLock lock(&mu_);
if (dnn_ != nullptr) {
return dnn_.get();
}
PluginRegistry* registry = PluginRegistry::Instance();
absl::StatusOr<PluginRegistry::DnnFactory> status =
registry->GetFactory<PluginRegistry::DnnFactory>(cuda::kCudaPlatformId);
if (!status.ok()) {
LOG(ERROR) << "Unable to retrieve DNN factory: "
<< status.status().message();
return nullptr;
}
auto dnn = status.value()(this);
dnn_.reset(dnn);
return dnn_.get();
}
fft::FftSupport* CudaExecutor::AsFft() {
absl::MutexLock lock(&mu_);
if (fft_ != nullptr) {
return fft_.get();
}
PluginRegistry* registry = PluginRegistry::Instance();
absl::StatusOr<PluginRegistry::FftFactory> status =
registry->GetFactory<PluginRegistry::FftFactory>(cuda::kCudaPlatformId);
if (!status.ok()) {
LOG(ERROR) << "Unable to retrieve FFT factory: "
<< status.status().message();
return nullptr;
}
auto fft = status.value()(this);
fft_.reset(fft);
return fft_.get();
}
bool CudaExecutor::CanEnablePeerAccessTo(StreamExecutor* other) {
GpuExecutor* cuda_other = static_cast<GpuExecutor*>(other);
return GpuDriver::CanEnablePeerAccess(gpu_context(),
cuda_other->gpu_context());
}
absl::Status CudaExecutor::EnablePeerAccessTo(StreamExecutor* other) {
GpuExecutor* cuda_other = static_cast<GpuExecutor*>(other);
return GpuDriver::EnablePeerAccess(gpu_context(), cuda_other->gpu_context());
}
bool CudaExecutor::DeviceMemoryUsage(int64_t* free, int64_t* total) const {
return GpuDriver::GetDeviceMemoryInfo(gpu_context(), free, total);
}
absl::StatusOr<DeviceMemoryBase> CudaExecutor::GetSymbol(
const std::string& symbol_name, ModuleHandle module_handle) {
void* mem = nullptr;
size_t bytes = 0;
CHECK(static_cast<bool>(module_handle));
{
absl::MutexLock lock{&in_memory_modules_mu_};
auto it = gpu_binary_to_module_.find(module_handle.id());
CHECK(it != gpu_binary_to_module_.end());
GpuModuleHandle gpu_module_handle = it->second.first;
CHECK(gpu_module_handle != nullptr);
TF_RETURN_IF_ERROR(GpuDriver::GetModuleSymbol(
gpu_context(), gpu_module_handle, symbol_name.c_str(),
reinterpret_cast<CUdeviceptr*>(&mem), &bytes));
return DeviceMemoryBase(mem, bytes);
}
return absl::NotFoundError(
absl::StrCat("Check if module containing symbol ", symbol_name,
" is loaded (module_handle = ",
reinterpret_cast<uintptr_t>(module_handle.id()), ")"));
}
absl::Status FillBlockDimLimit(GpuDeviceHandle device,
BlockDim* block_dim_limit) {
int x, y, z;
TF_RETURN_IF_ERROR(GpuDriver::GetGridLimits(&x, &y, &z, device));
block_dim_limit->x = x;
block_dim_limit->y = y;
block_dim_limit->z = z;
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<GpuEvent>> CudaExecutor::CreateGpuEvent(
bool allow_timing) {
auto gpu_event = std::make_unique<CudaEvent>(gpu_context());
TF_RETURN_IF_ERROR(gpu_event->Init(allow_timing));
return std::move(gpu_event);
}
absl::StatusOr<std::unique_ptr<Event>> CudaExecutor::CreateEvent() {
return CreateGpuEvent(false);
}
absl::StatusOr<std::unique_ptr<Stream>> CudaExecutor::CreateStream(
std::optional<std::variant<StreamPriority, int>> priority) {
TF_ASSIGN_OR_RETURN(auto event, CreateGpuEvent(false));
auto stream = std::make_unique<GpuStream>(this, std::move(event), priority);
absl::MutexLock l(&alive_gpu_streams_mu_);
TF_RETURN_IF_ERROR(stream->Init());
auto gpu_stream = stream->gpu_stream();
alive_gpu_streams_[gpu_stream] = stream.get();
return std::move(stream);
}
absl::StatusOr<std::unique_ptr<CommandBuffer>>
CudaExecutor::CreateCommandBuffer(CommandBuffer::Mode mode) {
VLOG(2) << "Create CUDA command buffer (CUDA graph)";
GpuGraphHandle graph = nullptr;
TF_RETURN_IF_ERROR(GpuDriver::CreateGraph(&graph));
return std::make_unique<GpuCommandBuffer>(mode, this, graph);
}
absl::Status CudaExecutor::TrimGraphMemory() {
return GpuDriver::DeviceGraphMemTrim(device_);
}
absl::StatusOr<std::unique_ptr<DeviceDescription>>
CudaExecutor::CreateDeviceDescription(int device_ordinal) {
GpuDeviceHandle device;
TF_RETURN_IF_ERROR(GpuDriver::GetDevice(device_ordinal, &device));
int cc_major;
int cc_minor;
TF_RETURN_IF_ERROR(
GpuDriver::GetComputeCapability(&cc_major, &cc_minor, device));
DeviceDescription desc;
desc.set_driver_version(
ParseCudaVersion(GpuDriver::GetDriverVersion().value_or(0))
.value_or(SemanticVersion{0, 0, 0}));
desc.set_runtime_version(
ParseCudaVersion(CudaRuntime::GetRuntimeVersion().value_or(0))
.value_or(SemanticVersion{0, 0, 0}));
desc.set_compile_time_toolkit_version(
ParseCudaVersion(CUDA_VERSION).value_or(SemanticVersion{0, 0, 0}));
{
std::string pci_bus_id = GpuDriver::GetPCIBusID(device);
pci_bus_id = absl::AsciiStrToLower(pci_bus_id);
desc.set_pci_bus_id(pci_bus_id);
int numa_node = ReadNumaNode(pci_bus_id, device_ordinal);
desc.set_numa_node(numa_node);
}
{
desc.set_threads_per_block_limit(
GpuDriver::GetDeviceAttribute(CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK,
device)
.value());
ThreadDim thread_dim_limit;
thread_dim_limit.x = GpuDriver::GetDeviceAttribute(
CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X, device)
.value();
thread_dim_limit.y = GpuDriver::GetDeviceAttribute(
CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y, device)
.value();
thread_dim_limit.z = GpuDriver::GetDeviceAttribute(
CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z, device)
.value();
desc.set_thread_dim_limit(thread_dim_limit);
}
int sm_clock_khz =
GpuDriver::GetDeviceAttribute(CU_DEVICE_ATTRIBUTE_CLOCK_RATE, device)
.value();
desc.set_clock_rate_ghz(static_cast<float>(sm_clock_khz) / 1e6);
{
bool ecc_enabled = false;
(void)GpuDriver::IsEccEnabled(device, &ecc_enabled);
desc.set_ecc_enabled(ecc_enabled);
}
uint64_t device_memory_size = static_cast<uint64_t>(-1);
(void)GpuDriver::GetDeviceTotalMemory(device, &device_memory_size);
desc.set_device_memory_size(device_memory_size);
int64_t l2_cache_bytes =
GpuDriver::GetDeviceAttribute(CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE, device)
.value();
desc.set_l2_cache_size(l2_cache_bytes);
absl::StatusOr<int> mem_clock_khz = GpuDriver::GetDeviceAttribute(
CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, device_ordinal);
absl::StatusOr<int> mem_bus_width_bits = GpuDriver::GetDeviceAttribute(
CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH, device_ordinal);
if (mem_clock_khz.ok() && mem_bus_width_bits.ok()) {
desc.set_memory_bandwidth(2 * int64_t{mem_clock_khz.value()} * 1000 *
int64_t{mem_bus_width_bits.value()} / 8);
}
{
BlockDim block_dim_limit;
TF_RETURN_IF_ERROR(FillBlockDimLimit(device, &block_dim_limit));
desc.set_block_dim_limit(block_dim_limit);
}
{
std::string device_name;
TF_RETURN_IF_ERROR(GpuDriver::GetDeviceName(device, &device_name));
desc.set_name(device_name);
}
desc.set_platform_version(
absl::StrCat("Compute Capability ", cc_major, ".", cc_minor));
desc.set_device_address_bits(64);
desc.set_device_vendor("NVIDIA Corporation");
desc.set_cuda_compute_capability(cc_major, cc_minor);
desc.set_shared_memory_per_core(
GpuDriver::GetMaxSharedMemoryPerCore(device).value());
desc.set_shared_memory_per_block(
GpuDriver::GetMaxSharedMemoryPerBlock(device).value());
desc.set_shared_memory_per_block_optin(
GpuDriver::GetMaxSharedMemoryPerBlockOptin(device).value());
int core_count = GpuDriver::GetMultiprocessorCount(device).value();
desc.set_core_count(core_count);
desc.set_fpus_per_core(fpus_per_core(cc_major, cc_minor));
desc.set_threads_per_core_limit(
GpuDriver::GetMaxThreadsPerMultiprocessor(device).value());
desc.set_registers_per_block_limit(
GpuDriver::GetMaxRegistersPerBlock(device).value());
desc.set_threads_per_warp(GpuDriver::GetThreadsPerWarp(device).value());
desc.set_registers_per_core_limit(
GpuDriver::GetDeviceAttribute(
CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR, device)
.value());
auto value_or = [](const auto& status_or, auto default_val) {
if (status_or.ok()) return *status_or;
return default_val;
};
desc.set_model_str(absl::StrFormat(
"sm_%d.%d with %dB RAM, %d cores, %dKHz clock, %dKHz mem clock, %dB L2$",
cc_major, cc_minor, device_memory_size, core_count, sm_clock_khz,
value_or(mem_clock_khz, 0), l2_cache_bytes));
return std::make_unique<DeviceDescription>(std::move(desc));
}
}
} | #include "xla/stream_executor/cuda/cuda_executor.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/semantic_version.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace stream_executor::gpu {
namespace {
using testing::Ge;
using testing::IsEmpty;
using testing::Not;
using testing::VariantWith;
TEST(CudaExecutorTest, CreateDeviceDescription) {
TF_ASSERT_OK(GpuDriver::Init());
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<DeviceDescription> result,
CudaExecutor::CreateDeviceDescription(0));
constexpr SemanticVersion kNullVersion{0, 0, 0};
EXPECT_NE(result->runtime_version(), kNullVersion);
EXPECT_NE(result->driver_version(), kNullVersion);
EXPECT_NE(result->compile_time_toolkit_version(), kNullVersion);
EXPECT_THAT(result->platform_version(), Not(IsEmpty()));
EXPECT_THAT(result->name(), Not(IsEmpty()));
EXPECT_THAT(result->model_str(), Not(IsEmpty()));
EXPECT_THAT(result->device_vendor(), "NVIDIA Corporation");
EXPECT_THAT(
result->gpu_compute_capability(),
VariantWith<CudaComputeCapability>(Ge(CudaComputeCapability{1, 0})));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/cuda_executor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/cuda_executor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
420151f8-c17c-4762-bc14-ceeecde6a1bc | cpp | tensorflow/tensorflow | cuda_version_parser | third_party/xla/xla/stream_executor/cuda/cuda_version_parser.cc | third_party/xla/xla/stream_executor/cuda/cuda_version_parser_test.cc | #include "xla/stream_executor/cuda/cuda_version_parser.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/stream_executor/semantic_version.h"
namespace stream_executor {
absl::StatusOr<SemanticVersion> ParseCudaVersion(int cuda_version) {
if (cuda_version < 0) {
return absl::InvalidArgumentError("Version numbers cannot be negative!");
}
int major = cuda_version / 1000;
int minor = (cuda_version % 1000) / 10;
return SemanticVersion(major, minor, 0);
}
} | #include "xla/stream_executor/cuda/cuda_version_parser.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "xla/stream_executor/semantic_version.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
namespace stream_executor {
namespace {
using tsl::testing::IsOkAndHolds;
using tsl::testing::StatusIs;
TEST(CudaVersionParserTest, ValidVersion) {
EXPECT_THAT(ParseCudaVersion(12040), IsOkAndHolds(SemanticVersion{12, 4, 0}));
}
TEST(CudaVersionParserTest, LeastSignificantDigitIsIgnored) {
EXPECT_THAT(ParseCudaVersion(12041), IsOkAndHolds(SemanticVersion{12, 4, 0}));
}
TEST(CudaVersionParserTest, NegativeIntegerIsNotAValidVersion) {
EXPECT_THAT(ParseCudaVersion(-42),
StatusIs(absl::StatusCode::kInvalidArgument));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/cuda_version_parser.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/cuda_version_parser_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5fda2d8d-3b4a-4536-afb2-12c1cb9ba632 | cpp | tensorflow/tensorflow | cuda_driver | third_party/xla/xla/stream_executor/cuda/cuda_driver.cc | third_party/xla/xla/stream_executor/cuda/cuda_driver_test.cc | #include "xla/stream_executor/cuda/cuda_driver.h"
#include <stdint.h>
#include <stdlib.h>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <new>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/casts.h"
#include "absl/container/inlined_vector.h"
#include "absl/debugging/leak_check.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/notification.h"
#include "absl/types/span.h"
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "third_party/gpus/cuda/include/driver_types.h"
#include "xla/stream_executor/cuda/cuda_status.h"
#include "xla/stream_executor/gpu/context.h"
#include "xla/stream_executor/gpu/context_map.h"
#include "xla/stream_executor/gpu/gpu_diagnostics.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/gpu/scoped_activate_context.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
namespace stream_executor {
namespace gpu {
namespace {
absl::StatusOr<CUdevice> DeviceFromContext(Context* context) {
ScopedActivateContext activated{context};
CUdevice device = -1;
auto status = cuda::ToStatus(cuCtxGetDevice(&device));
if (status.ok()) {
return device;
}
return status;
}
CUcontext CurrentContextOrDie() {
CUcontext current = nullptr;
TF_CHECK_OK(cuda::ToStatus(cuCtxGetCurrent(¤t),
"Failed to query current context"));
return current;
}
ContextMap<CUcontext, GpuContext>* GetContextMap() {
static ContextMap<CUcontext, GpuContext>* context_map =
new ContextMap<CUcontext, GpuContext>([](void* ptr) {
int device_ordinal;
absl::Status status = cuda::ToStatus(
cuPointerGetAttribute(static_cast<void*>(&device_ordinal),
CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL,
reinterpret_cast<CUdeviceptr>(ptr)));
if (!status.ok()) {
LOG(FATAL) << "Not able to get the device_ordinal for ptr: " << ptr
<< ". Error: " << status;
}
return device_ordinal;
});
return context_map;
}
CUcontext CurrentContext() {
CUcontext current = CurrentContextOrDie();
if (current != nullptr && !GetContextMap()->Has(current)) {
LOG(FATAL) << "current context was not created by the StreamExecutor "
"cuda_driver API: "
<< current
<< "; a CUDA runtime call "
"was likely performed without using a StreamExecutor context";
}
return current;
}
tsl::thread::ThreadPool* GetDriverExecutor() {
static tsl::thread::ThreadPool* thread_pool = new tsl::thread::ThreadPool(
tsl::Env::Default(), tsl::ThreadOptions(), "cuda_driver", 1);
return thread_pool;
}
}
void GpuContext::SetActive() {
TF_CHECK_OK(
cuda::ToStatus(cuCtxSetCurrent(context_), "Failed setting context"));
}
bool GpuContext::IsActive() const { return CurrentContext() == context_; }
namespace {
static absl::Status InternalInit() {
absl::Status status =
cuda::ToStatus(cuInit(0 ), "Failed call to cuInit");
if (status.ok()) {
return status;
}
LOG(ERROR) << "failed call to cuInit: " << status;
Diagnostician::LogDiagnosticInformation();
return status;
}
const char kScheduleSpinString[] = "spin";
const char kScheduleYieldString[] = "yield";
const char kScheduleBlockingSyncString[] = "blocking_sync";
int GetFlagsFromEnv() {
const char* gpu_schedule_string =
std::getenv("TF_CUDA_PLATFORM_GPU_DEVICE_SCHEDULE");
if (gpu_schedule_string == nullptr) {
return 0;
}
unsigned device_flags = 0;
if (strcmp(kScheduleSpinString, gpu_schedule_string) == 0) {
device_flags = CU_CTX_SCHED_SPIN;
} else if (strcmp(kScheduleYieldString, gpu_schedule_string) == 0) {
device_flags = CU_CTX_SCHED_YIELD;
} else if (strcmp(kScheduleBlockingSyncString, gpu_schedule_string) == 0) {
device_flags = CU_CTX_SCHED_BLOCKING_SYNC;
} else {
LOG(QFATAL) << "Unknown option for environment variable "
"TF_CUDA_PLATFORM_GPU_DEVICE_SCHEDULE "
<< gpu_schedule_string << " should be one of {"
<< kScheduleBlockingSyncString << ", " << kScheduleSpinString
<< ", " << kScheduleYieldString << "}";
}
return device_flags;
}
}
absl::Status GpuDriver::Init() {
static absl::Status* init_retval = [] {
return new absl::Status(InternalInit());
}();
return *init_retval;
}
absl::Status GpuDriver::GetDevice(int device_ordinal, CUdevice* device) {
return cuda::ToStatus(cuDeviceGet(device, device_ordinal),
"Failed call to cuDeviceGet");
}
absl::Status GpuDriver::GetDeviceName(CUdevice device,
std::string* device_name) {
static const size_t kCharLimit = 64;
absl::InlinedVector<char, 4> chars(kCharLimit);
TF_RETURN_IF_ERROR(
cuda::ToStatus(cuDeviceGetName(chars.begin(), kCharLimit - 1, device),
"Failed to get device name"));
chars[kCharLimit - 1] = '\0';
*device_name = chars.begin();
return absl::OkStatus();
}
absl::Status GpuDriver::CreateContext(int device_ordinal, CUdevice device,
Context** context) {
*context = nullptr;
int flags = GetFlagsFromEnv();
unsigned int former_primary_context_flags;
int former_primary_context_is_active;
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuDevicePrimaryCtxGetState(device, &former_primary_context_flags,
&former_primary_context_is_active)));
if (former_primary_context_flags != flags) {
if (former_primary_context_is_active) {
LOG(ERROR)
<< "The primary context is active and has a different flag set ("
<< former_primary_context_flags << ") than the desired flag set ("
<< flags << ").";
} else {
TF_RETURN_IF_ERROR(
cuda::ToStatus(cuDevicePrimaryCtxSetFlags(device, flags)));
}
}
CUcontext former_context = CurrentContextOrDie();
CUcontext new_context;
TF_RETURN_IF_ERROR(
cuda::ToStatus(cuDevicePrimaryCtxRetain(&new_context, device)));
if (former_context != nullptr) {
CUdevice former_device;
if (cuCtxGetDevice(&former_device) == CUDA_SUCCESS) {
if (former_device == device) {
if (former_context == new_context) {
VLOG(2) << "The primary context " << former_context << " for device "
<< device
<< " exists before initializing the StreamExecutor.";
} else {
LOG(WARNING) << "A non-primary context " << former_context
<< " for device " << device
<< " exists before initializing the StreamExecutor. The "
<< "primary context is now " << new_context << ". We "
<< "haven't verified StreamExecutor works with that.";
}
}
} else {
LOG(ERROR) << "Failed to get the device of the current context "
<< former_context;
}
}
TF_RETURN_IF_ERROR(cuda::ToStatus(cuCtxSetCurrent(former_context)));
*context = GetContextMap()->Add(new_context, device_ordinal);
CHECK(*context != nullptr)
<< "success in this call must entail non-null result";
VLOG(2) << "created or reused context " << new_context << " for this thread";
return absl::OkStatus();
}
void GpuDriver::DestroyContext(Context* context) {
if (context == nullptr) {
return;
}
GpuContext* cuda_context = tensorflow::down_cast<GpuContext*>(context);
auto status = cuda::ToStatus(cuCtxPushCurrent(cuda_context->context()));
if (!status.ok()) {
LOG(ERROR) << "failed to Push CUDA context; leaking: " << status;
}
CUdevice device;
cuCtxGetDevice(&device);
cuCtxPopCurrent(nullptr);
status = cuda::ToStatus(cuDevicePrimaryCtxRelease(device));
if (!status.ok()) {
LOG(ERROR) << "failed to release CUDA context; leaking: " << status;
}
GetContextMap()->Remove(cuda_context->context());
}
absl::Status GpuDriver::CreateGraph(CUgraph* graph) {
VLOG(2) << "Create new CUDA graph";
TF_RETURN_IF_ERROR(cuda::ToStatus(cuGraphCreate(graph, 0),
"Failed to create CUDA graph"));
VLOG(2) << "Created CUDA graph " << *graph;
return absl::OkStatus();
}
absl::Status GpuDriver::DestroyGraph(CUgraph graph) {
VLOG(2) << "Destroy CUDA graph " << graph;
return cuda::ToStatus(cuGraphDestroy(graph), "Failed to destroy CUDA graph");
}
static std::string_view StreamCaptureModeToString(
GpuDriver::StreamCaptureMode mode) {
switch (mode) {
case GpuDriver::StreamCaptureMode::kGlobal:
return "global";
case GpuDriver::StreamCaptureMode::kThreadLocal:
return "threadlocal";
case GpuDriver::StreamCaptureMode::kRelaxed:
return "relaxed";
}
}
absl::Status GpuDriver::StreamBeginCapture(CUstream stream,
StreamCaptureMode mode) {
CUstreamCaptureMode cu_mode;
switch (mode) {
case StreamCaptureMode::kGlobal:
cu_mode = CU_STREAM_CAPTURE_MODE_GLOBAL;
break;
case StreamCaptureMode::kThreadLocal:
cu_mode = CU_STREAM_CAPTURE_MODE_THREAD_LOCAL;
break;
case StreamCaptureMode::kRelaxed:
cu_mode = CU_STREAM_CAPTURE_MODE_RELAXED;
break;
}
VLOG(2) << "Beginning stream " << stream << " capture in "
<< StreamCaptureModeToString(mode) << " mode";
return cuda::ToStatus(cuStreamBeginCapture(stream, cu_mode),
"Failed to begin stream capture");
}
absl::Status GpuDriver::StreamBeginCaptureToGraph(CUstream stream,
CUgraph graph,
StreamCaptureMode mode) {
CUstreamCaptureMode cu_mode;
switch (mode) {
case StreamCaptureMode::kGlobal:
cu_mode = CU_STREAM_CAPTURE_MODE_GLOBAL;
break;
case StreamCaptureMode::kThreadLocal:
cu_mode = CU_STREAM_CAPTURE_MODE_THREAD_LOCAL;
break;
case StreamCaptureMode::kRelaxed:
cu_mode = CU_STREAM_CAPTURE_MODE_RELAXED;
break;
}
#if CUDA_VERSION >= 12030
VLOG(2) << "Beginning stream " << stream << " capture in "
<< StreamCaptureModeToString(mode) << " mode to graph " << graph;
return cuda::ToStatus(
cuStreamBeginCaptureToGraph(stream, graph,
nullptr,
nullptr,
0, cu_mode),
"Failed to begin stream capture to graph");
#else
return absl::UnimplementedError(
"StreamBeginCaptureToGraph is not implemented");
#endif
}
absl::Status GpuDriver::StreamEndCapture(CUstream stream, CUgraph* graph) {
VLOG(2) << "End stream " << stream << " capture";
return cuda::ToStatus(cuStreamEndCapture(stream, graph),
"Failed to end stream capture");
}
absl::Status GpuDriver::GraphInstantiate(CUgraphExec* exec, CUgraph graph,
const GraphInstantiateFlags& flags) {
VLOG(2) << "Instantiate CUDA executable graph from graph " << graph << " ("
<< "auto_free_on_launch=" << flags.auto_free_on_launch << ", "
<< "device_launch=" << flags.device_launch << ", "
<< "use_node_priority=" << flags.use_node_prirotiy << ", "
<< "upload=" << flags.upload << ")";
#if CUDA_VERSION >= 12000
uint64_t cu_flags = 0;
if (flags.auto_free_on_launch)
cu_flags |= CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH;
if (flags.use_node_prirotiy)
cu_flags |= CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY;
if (flags.device_launch)
cu_flags |= CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH;
if (flags.upload) cu_flags |= CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD;
return cuda::ToStatus(cuGraphInstantiate(exec, graph, cu_flags),
"Failed to instantiate CUDA graph");
#else
return cuda::ToStatus(cuGraphInstantiate(exec, graph, nullptr, nullptr, 0),
"Failed to instantiate CUDA graph");
#endif
}
absl::Status GpuDriver::GraphLaunch(CUgraphExec exec, CUstream stream) {
VLOG(2) << "Launching CUDA executable graph " << exec << " on a stream "
<< stream;
return cuda::ToStatus(cuGraphLaunch(exec, stream),
"Failed to launch CUDA graph");
}
absl::Status GpuDriver::GraphNodeSetEnabled(CUgraphExec exec, CUgraphNode node,
bool enabled) {
unsigned value = enabled ? 1 : 0;
VLOG(2) << "Set CUDA executable graph " << exec << " node " << node
<< " enabled flag to " << value;
return cuda::ToStatus(cuGraphNodeSetEnabled(exec, node, value),
"Failed to set CUDA graph node enabled flag");
}
absl::Status GpuDriver::GraphExecUpdate(CUgraphExec exec, CUgraph graph,
GraphExecUpdateResultInfo* result) {
VLOG(2) << "Update CUDA graph executable " << exec << " with graph " << graph;
#if CUDA_VERSION >= 12000
CUgraphExecUpdateResultInfo cu_result;
memset(&cu_result, 0, sizeof(cu_result));
CUresult err_code = cuGraphExecUpdate(exec, graph, &cu_result);
auto cu_result_enum = cu_result.result;
if (cu_result.errorFromNode) {
result->error_from_node = cu_result.errorFromNode;
}
if (cu_result.errorNode) {
result->error_node = cu_result.errorNode;
}
#else
CUgraphExecUpdateResult cu_result;
CUresult err_code = cuGraphExecUpdate(exec, graph, nullptr, &cu_result);
auto cu_result_enum = cu_result;
#endif
switch (cu_result_enum) {
case CU_GRAPH_EXEC_UPDATE_SUCCESS:
result->result = GraphExecUpdateResult::kSuccess;
break;
case CU_GRAPH_EXEC_UPDATE_ERROR:
result->result = GraphExecUpdateResult::kError;
break;
case CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED:
result->result = GraphExecUpdateResult::kTopologyChanged;
break;
case CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED:
result->result = GraphExecUpdateResult::kNodeTypeChanged;
break;
case CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED:
result->result = GraphExecUpdateResult::kFunctionChanged;
break;
case CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED:
result->result = GraphExecUpdateResult::kParametersChanged;
break;
case CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED:
result->result = GraphExecUpdateResult::kNotSupported;
break;
#if CUDA_VERSION >= 12000
case CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE:
result->result = GraphExecUpdateResult::kUnsupportedFunctionChange;
break;
case CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED:
result->result = GraphExecUpdateResult::kAttributesChanged;
break;
#endif
default:
return absl::InternalError("Unknown graph update result");
}
return cuda::ToStatus(err_code, "Failed to update CUDA graph");
}
absl::StatusOr<std::vector<GpuGraphNodeHandle>>
GpuDriver::GraphNodeGetDependencies(GpuGraphNodeHandle node) {
VLOG(2) << "Get CUDA graph node " << node << " dependencies";
std::vector<CUgraphNode> dependencies;
size_t num_dependencies = 0;
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuGraphNodeGetDependencies(node, nullptr, &num_dependencies),
"Failed to get CUDA graph node depedencies size"));
dependencies.resize(num_dependencies, nullptr);
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuGraphNodeGetDependencies(node, dependencies.data(), &num_dependencies),
"Failed to get CUDA graph node depedencies"));
return dependencies;
}
absl::Status GpuDriver::DestroyGraphExec(CUgraphExec exec) {
VLOG(2) << "Destroying CUDA executable graph " << exec;
return cuda::ToStatus(cuGraphExecDestroy(exec),
"Failed to destroy CUDA executable graph");
}
absl::StatusOr<std::string> GpuDriver::GraphDebugDotPrint(
CUgraph graph, const char* path, bool return_printed_graph) {
#if CUDA_VERSION >= 12000
VLOG(2) << "Print CUDA graph " << graph << " debug dot file to " << path;
int flags = CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE;
TF_RETURN_IF_ERROR(cuda::ToStatus(cuGraphDebugDotPrint(graph, path, flags),
"Failed to print gpu graph debug file"));
if (return_printed_graph) {
std::string data;
if (tsl::ReadFileToString(tsl::Env::Default(), path, &data).ok()) {
return data;
} else {
LOG(WARNING) << "failed to read gpu graph debug file " << path;
}
}
#endif
return std::string(path);
}
absl::Status GpuDriver::DeviceGraphMemTrim(CUdevice device) {
VLOG(2) << "Trim CUDA device graph memory " << device;
return cuda::ToStatus(cuDeviceGraphMemTrim(device),
"Failed to trim device graph memory");
}
absl::StatusOr<bool> GpuDriver::StreamIsCapturing(CUstream stream) {
VLOG(2) << "Checking if stream " << stream << " is capturing";
CUstreamCaptureStatus status;
TF_RETURN_IF_ERROR(cuda::ToStatus(cuStreamIsCapturing(stream, &status),
"Failed to check stream capturing status"));
return status == CU_STREAM_CAPTURE_STATUS_ACTIVE;
}
absl::Status GpuDriver::GraphConditionalHandleCreate(
GpuGraphConditionalHandle* handle, CUgraph graph, Context* context,
unsigned int default_launch_value, unsigned int flags) {
VLOG(2) << "Create conditional handle for a graph " << graph
<< "; context: " << context
<< "; default_launch_value: " << default_launch_value
<< "; flags: " << flags;
#if CUDA_VERSION >= 12030
return cuda::ToStatus(
cuGraphConditionalHandleCreate(
handle, graph, tensorflow::down_cast<GpuContext*>(context)->context(),
default_launch_value, flags),
"Failed to create conditional handle for a CUDA graph");
#else
return absl::UnimplementedError(
"CUDA graph conditional nodes are not implemented");
#endif
}
static std::string ConditionalTypeToString(
GpuDriver::GpuGraphConditionalNodeParams::Type type) {
switch (type) {
case GpuDriver::GpuGraphConditionalNodeParams::Type::kIf:
return "IF";
case GpuDriver::GpuGraphConditionalNodeParams::Type::kWhile:
return "WHILE";
}
}
absl::StatusOr<GpuDriver::GpuGraphNodeResult> GpuDriver::GraphAddNode(
CUgraphNode* node, CUgraph graph, absl::Span<const CUgraphNode> deps,
const GpuGraphNodeParams& params) {
#if CUDA_VERSION >= 12030
if (auto* conditional = std::get_if<GpuGraphConditionalNodeParams>(¶ms)) {
VLOG(2) << "Add conditional node to a graph " << graph
<< "; type: " << ConditionalTypeToString(conditional->type)
<< "; deps: " << deps.size();
CUgraphNodeParams cu_params;
memset(&cu_params, 0, sizeof(cu_params));
GpuContext* gpu_context =
tensorflow::down_cast<GpuContext*>(conditional->context);
cu_params.type = CU_GRAPH_NODE_TYPE_CONDITIONAL;
cu_params.conditional.handle = conditional->handle;
cu_params.conditional.ctx = gpu_context->context();
cu_params.conditional.size = 1;
switch (conditional->type) {
case GpuDriver::GpuGraphConditionalNodeParams::Type::kIf:
cu_params.conditional.type = CU_GRAPH_COND_TYPE_IF;
break;
case GpuDriver::GpuGraphConditionalNodeParams::Type::kWhile:
cu_params.conditional.type = CU_GRAPH_COND_TYPE_WHILE;
break;
}
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuGraphAddNode(node, graph, deps.data(), deps.size(), &cu_params),
"Failed to add conditional node to a CUDA graph"));
GpuGraphConditionalNodeParams::Result result;
result.graph = cu_params.conditional.phGraph_out[0];
VLOG(2) << "Created conditional CUDA graph " << result.graph;
return result;
}
#endif
return absl::UnimplementedError("unsupported node type");
}
absl::Status GpuDriver::GraphAddEmptyNode(CUgraphNode* node, CUgraph graph,
absl::Span<const CUgraphNode> deps) {
VLOG(2) << "Add empty node to a graph " << graph << "; deps: " << deps.size();
return cuda::ToStatus(
cuGraphAddEmptyNode(node, graph, deps.data(), deps.size()),
"Failed to add empty node to a CUDA graph");
}
absl::Status GpuDriver::GraphAddKernelNode(
CUgraphNode* node, CUgraph graph, absl::Span<const CUgraphNode> deps,
absl::string_view kernel_name, CUfunction function, unsigned int grid_dim_x,
unsigned int grid_dim_y, unsigned int grid_dim_z, unsigned int block_dim_x,
unsigned int block_dim_y, unsigned int block_dim_z,
unsigned int shared_mem_bytes, void** kernel_params, void** extra) {
VLOG(2) << "Add kernel node to a graph " << graph
<< "; kernel: " << kernel_name << "; gdx: " << grid_dim_x
<< " gdy: " << grid_dim_y << " gdz: " << grid_dim_z
<< " bdx: " << block_dim_x << " bdy: " << block_dim_y
<< " bdz: " << block_dim_z << "; shmem: " << shared_mem_bytes
<< "; deps: " << deps.size();
CUDA_KERNEL_NODE_PARAMS params;
memset(¶ms, 0, sizeof(params));
params.func = function;
params.gridDimX = grid_dim_x;
params.gridDimY = grid_dim_y;
params.gridDimZ = grid_dim_z;
params.blockDimX = block_dim_x;
params.blockDimY = block_dim_y;
params.blockDimZ = block_dim_z;
params.sharedMemBytes = shared_mem_bytes;
params.kernelParams = kernel_params;
params.extra = extra;
if (shared_mem_bytes != 0) {
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuFuncSetAttribute(function,
CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES,
shared_mem_bytes),
"Failed to set shared memory size"));
}
return cuda::ToStatus(
cuGraphAddKernelNode(node, graph, deps.data(), deps.size(), ¶ms),
"Failed to add kernel node to a CUDA graph");
}
absl::Status GpuDriver::GraphExecKernelNodeSetParams(
CUgraphExec exec, CUgraphNode node, absl::string_view kernel_name,
CUfunction function, unsigned int grid_dim_x, unsigned int grid_dim_y,
unsigned int grid_dim_z, unsigned int block_dim_x, unsigned int block_dim_y,
unsigned int block_dim_z, unsigned int shared_mem_bytes,
void** kernel_params, void** extra) {
VLOG(2) << "Set kernel node params " << node << " in graph executable "
<< exec << "; kernel: " << kernel_name << "; gdx: " << grid_dim_x
<< " gdy: " << grid_dim_y << " gdz: " << grid_dim_z
<< " bdx: " << block_dim_x << " bdy: " << block_dim_y
<< " bdz: " << block_dim_z << "; shmem: " << shared_mem_bytes;
CUDA_KERNEL_NODE_PARAMS params;
memset(¶ms, 0, sizeof(params));
params.func = function;
params.gridDimX = grid_dim_x;
params.gridDimY = grid_dim_y;
params.gridDimZ = grid_dim_z;
params.blockDimX = block_dim_x;
params.blockDimY = block_dim_y;
params.blockDimZ = block_dim_z;
params.sharedMemBytes = shared_mem_bytes;
params.kernelParams = kernel_params;
params.extra = extra;
if (shared_mem_bytes != 0) {
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuFuncSetAttribute(function,
CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES,
shared_mem_bytes),
"Failed to set shared memory size"));
}
return cuda::ToStatus(cuGraphExecKernelNodeSetParams(exec, node, ¶ms),
"Failed to set CUDA graph kernel node params");
}
absl::Status GpuDriver::GraphAddMemcpyD2DNode(
Context* context, CUgraphNode* node, CUgraph graph,
absl::Span<const CUgraphNode> deps, CUdeviceptr gpu_dst,
CUdeviceptr gpu_src, uint64_t size) {
GpuContext* gpu_context = tensorflow::down_cast<GpuContext*>(context);
VLOG(2) << "Add memcpy d2d node to a graph " << graph
<< "; dst: " << reinterpret_cast<void*>(gpu_dst)
<< "; src: " << reinterpret_cast<void*>(gpu_src) << "; size: " << size
<< "; context: " << gpu_context->context()
<< "; deps: " << deps.size();
CUDA_MEMCPY3D params;
memset(¶ms, 0, sizeof(params));
params.srcMemoryType = CU_MEMORYTYPE_DEVICE;
params.srcDevice = gpu_src;
params.dstMemoryType = CU_MEMORYTYPE_DEVICE;
params.dstDevice = gpu_dst;
params.WidthInBytes = size;
params.Height = 1;
params.Depth = 1;
return cuda::ToStatus(
cuGraphAddMemcpyNode(node, graph, deps.data(), deps.size(), ¶ms,
gpu_context->context()),
"Failed to add memcpy d2d node to a CUDA graph");
}
absl::Status GpuDriver::GraphExecMemcpyD2DNodeSetParams(
Context* context, GpuGraphExecHandle exec, GpuGraphNodeHandle node,
GpuDevicePtr gpu_dst, GpuDevicePtr gpu_src, uint64_t size) {
GpuContext* gpu_context = tensorflow::down_cast<GpuContext*>(context);
VLOG(2) << "Set memcpy d2d node params " << node << " in graph executable "
<< exec << "; dst: " << reinterpret_cast<void*>(gpu_dst)
<< "; src: " << reinterpret_cast<void*>(gpu_src) << "; size: " << size
<< "; context: " << gpu_context->context();
CUDA_MEMCPY3D params;
memset(¶ms, 0, sizeof(params));
params.srcMemoryType = CU_MEMORYTYPE_DEVICE;
params.srcDevice = gpu_src;
params.dstMemoryType = CU_MEMORYTYPE_DEVICE;
params.dstDevice = gpu_dst;
params.WidthInBytes = size;
params.Height = 1;
params.Depth = 1;
return cuda::ToStatus(cuGraphExecMemcpyNodeSetParams(exec, node, ¶ms,
gpu_context->context()),
"Failed to set memcpy d2d node params");
}
namespace {
struct BitPatternToString {
std::string operator()(uint8_t pattern) {
return absl::StrCat("u8:", pattern);
}
std::string operator()(uint16_t pattern) {
return absl::StrCat("u16:", pattern);
}
std::string operator()(uint32_t pattern) {
return absl::StrCat("u32:", pattern);
}
};
struct BitPatternToValue {
std::pair<unsigned, unsigned> operator()(uint8_t pattern) {
unsigned value = pattern;
return {(value << 24) | (value << 16) | (value << 8) | value,
1};
}
std::pair<unsigned, unsigned> operator()(uint16_t pattern) {
unsigned value = pattern;
return {(value << 16) | value, 2};
}
std::pair<unsigned, unsigned> operator()(uint32_t pattern) {
return {pattern, 4};
}
};
}
absl::Status GpuDriver::GraphAddMemsetNode(
Context* context, CUgraphNode* node, GpuGraphHandle graph,
absl::Span<const CUgraphNode> deps, CUdeviceptr dst,
std::variant<uint8_t, uint16_t, uint32_t> bit_pattern,
uint64_t num_elements) {
GpuContext* gpu_context = tensorflow::down_cast<GpuContext*>(context);
VLOG(2) << "Add memset node to a graph " << graph
<< "; dst: " << reinterpret_cast<void*>(dst)
<< "; bit_pattern: " << std::visit(BitPatternToString(), bit_pattern)
<< "; num_elements: " << num_elements
<< "; context: " << gpu_context->context()
<< "; deps: " << deps.size();
CUDA_MEMSET_NODE_PARAMS params;
memset(¶ms, 0, sizeof(params));
auto [value, element_size] = std::visit(BitPatternToValue(), bit_pattern);
params.dst = dst;
params.elementSize = element_size;
params.height = 1;
params.pitch = 0;
params.value = value;
params.width = num_elements;
return cuda::ToStatus(
cuGraphAddMemsetNode(node, graph, deps.data(), deps.size(), ¶ms,
gpu_context->context()),
"Failed to add memset node to a CUDA graph");
}
absl::Status GpuDriver::GraphExecMemsetNodeSetParams(
Context* context, CUgraphExec exec, CUgraphNode node, CUdeviceptr dst,
std::variant<uint8_t, uint16_t, uint32_t> bit_pattern,
uint64_t num_elements) {
GpuContext* gpu_context = tensorflow::down_cast<GpuContext*>(context);
VLOG(2) << "Set memset node params " << node << " in graph executable "
<< exec << "; dst: " << reinterpret_cast<void*>(dst)
<< "; bit_pattern: " << std::visit(BitPatternToString(), bit_pattern)
<< "; num_elements: " << num_elements
<< "; context: " << gpu_context->context();
CUDA_MEMSET_NODE_PARAMS params;
memset(¶ms, 0, sizeof(params));
auto [value, element_size] = std::visit(BitPatternToValue(), bit_pattern);
params.dst = dst;
params.elementSize = element_size;
params.height = 1;
params.pitch = 0;
params.value = value;
params.width = num_elements;
return cuda::ToStatus(cuGraphExecMemsetNodeSetParams(exec, node, ¶ms,
gpu_context->context()),
"Failed to set memset node params");
}
absl::Status GpuDriver::GraphAddChildNode(CUgraphNode* node, CUgraph graph,
absl::Span<const CUgraphNode> deps,
CUgraph child) {
VLOG(2) << "Create a new node by cloning the child graph " << child
<< " and add it to " << graph << "; deps: " << deps.size();
return cuda::ToStatus(
cuGraphAddChildGraphNode(node, graph, deps.data(), deps.size(), child),
"Failed to create a child graph node and add it to a CUDA graph");
}
absl::Status GpuDriver::GraphExecChildNodeSetParams(CUgraphExec exec,
CUgraphNode node,
CUgraph child) {
VLOG(2) << "Set child node params " << node << " in graph executable " << exec
<< "to params contained in " << child;
return cuda::ToStatus(cuGraphExecChildGraphNodeSetParams(exec, node, child),
"Failed to set CUDA graph child node params");
}
absl::Status GpuDriver::LaunchKernel(
Context* context, absl::string_view kernel_name, CUfunction function,
unsigned int grid_dim_x, unsigned int grid_dim_y, unsigned int grid_dim_z,
unsigned int block_dim_x, unsigned int block_dim_y,
unsigned int block_dim_z, unsigned int shared_mem_bytes, CUstream stream,
void** kernel_params, void** extra) {
ScopedActivateContext activation(context);
VLOG(2) << "launching kernel: " << kernel_name << "; gdx: " << grid_dim_x
<< " gdy: " << grid_dim_y << " gdz: " << grid_dim_z
<< " bdx: " << block_dim_x << " bdy: " << block_dim_y
<< " bdz: " << block_dim_z
<< "; shared_mem_bytes: " << shared_mem_bytes;
if (shared_mem_bytes != 0) {
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuFuncSetAttribute(function,
CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES,
shared_mem_bytes),
"Failed to set shared memory size"));
}
return cuda::ToStatus(
cuLaunchKernel(function, grid_dim_x, grid_dim_y, grid_dim_z, block_dim_x,
block_dim_y, block_dim_z, shared_mem_bytes, stream,
kernel_params, extra),
absl::StrCat("Failed to launch CUDA kernel: ", kernel_name,
"; block dims: ", block_dim_x, "x", block_dim_y, "x",
block_dim_z, "; grid dims: ", grid_dim_x, "x", grid_dim_y,
"x", grid_dim_z,
"; shared memory size: ", shared_mem_bytes));
}
absl::Status GpuDriver::LaunchKernel(
Context* context, absl::string_view kernel_name, GpuFunctionHandle function,
unsigned int cluster_dim_x, unsigned int cluster_dim_y,
unsigned int cluster_dim_z, unsigned int grid_dim_x,
unsigned int grid_dim_y, unsigned int grid_dim_z, unsigned int block_dim_x,
unsigned int block_dim_y, unsigned int block_dim_z,
unsigned int shared_mem_bytes, GpuStreamHandle stream, void** kernel_params,
void** extra) {
ScopedActivateContext activation(context);
VLOG(2) << "launching kernel: " << kernel_name << "; cdx: " << cluster_dim_x
<< " cdy: " << cluster_dim_y << " cdz: " << cluster_dim_z
<< " gdx: " << grid_dim_x << " gdy: " << grid_dim_y
<< " gdz: " << grid_dim_z << " bdx: " << block_dim_x
<< " bdy: " << block_dim_y << " bdz: " << block_dim_z
<< "; shared_mem_bytes: " << shared_mem_bytes;
if (shared_mem_bytes != 0) {
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuFuncSetAttribute(function,
CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES,
shared_mem_bytes),
"Failed to set shared memory size"));
}
CUlaunchConfig launch_config;
memset(&launch_config, 0, sizeof(launch_config));
launch_config.blockDimX = block_dim_x;
launch_config.blockDimY = block_dim_y;
launch_config.blockDimZ = block_dim_z;
launch_config.gridDimX = grid_dim_x;
launch_config.gridDimY = grid_dim_y;
launch_config.gridDimZ = grid_dim_z;
launch_config.hStream = stream;
launch_config.sharedMemBytes = shared_mem_bytes;
CUlaunchAttribute cluster_dims;
memset(&cluster_dims, 0, sizeof(cluster_dims));
cluster_dims.id = CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION;
cluster_dims.value.clusterDim.x = cluster_dim_x;
cluster_dims.value.clusterDim.y = cluster_dim_y;
cluster_dims.value.clusterDim.z = cluster_dim_z;
launch_config.attrs = &cluster_dims;
launch_config.numAttrs = 1;
return cuda::ToStatus(
cuLaunchKernelEx(&launch_config, function, kernel_params, extra),
absl::StrCat("Failed to launch CUDA kernel: ", kernel_name,
"; cluster dims: ", cluster_dim_x, "x", cluster_dim_y, "x",
cluster_dim_z, "; block dims: ", block_dim_x, "x",
block_dim_y, "x", block_dim_z, "; grid dims: ", grid_dim_x,
"x", grid_dim_y, "x", grid_dim_z,
"; shared memory size: ", shared_mem_bytes));
}
absl::Status GpuDriver::LoadCubin(Context* context, const char* cubin_bytes,
CUmodule* module) {
ScopedActivateContext activation(context);
return cuda::ToStatus(
cuModuleLoadFatBinary(module, cubin_bytes),
"Failed to load in-memory CUBIN (compiled for a different GPU?).");
}
absl::Status GpuDriver::LoadPtx(Context* context, const char* ptx_contents,
CUmodule* module) {
absl::Notification notification;
absl::Status ret = absl::OkStatus();
GetDriverExecutor()->Schedule(
[context, ptx_contents, module, &ret, ¬ification]() {
ScopedActivateContext activation(context);
void* ptx_data = const_cast<char*>(ptx_contents);
static const unsigned int kLogBufferBytesLimit = 1024;
unsigned int error_log_buffer_bytes = kLogBufferBytesLimit;
unsigned int info_log_buffer_bytes = kLogBufferBytesLimit;
absl::InlinedVector<char, 4> error_log_buffer(error_log_buffer_bytes);
absl::InlinedVector<char, 4> info_log_buffer(info_log_buffer_bytes);
bool log_verbose = true;
CUjit_option options[] = {CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES,
CU_JIT_ERROR_LOG_BUFFER,
CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES,
CU_JIT_INFO_LOG_BUFFER, CU_JIT_LOG_VERBOSE};
void* option_values[] = {
absl::bit_cast<void*>(uintptr_t(error_log_buffer_bytes)),
absl::bit_cast<void*>(error_log_buffer.data()),
absl::bit_cast<void*>(uintptr_t(info_log_buffer_bytes)),
absl::bit_cast<void*>(info_log_buffer.data()),
absl::bit_cast<void*>(uintptr_t(log_verbose))};
CHECK(TF_ARRAYSIZE(options) == TF_ARRAYSIZE(option_values));
absl::Status status;
{
absl::LeakCheckDisabler disabler;
status = cuda::ToStatus(cuModuleLoadDataEx(
module, ptx_data, TF_ARRAYSIZE(options), options, option_values));
}
error_log_buffer_bytes = reinterpret_cast<uintptr_t>(option_values[0]);
info_log_buffer_bytes = reinterpret_cast<uintptr_t>(option_values[2]);
CHECK_LE(error_log_buffer_bytes, kLogBufferBytesLimit);
CHECK_LE(info_log_buffer_bytes, kLogBufferBytesLimit);
if (!status.ok()) {
LOG(ERROR) << "failed to load PTX text as a module: " << status;
error_log_buffer[error_log_buffer_bytes ? error_log_buffer_bytes - 1
: 0] = '\0';
LOG(ERROR) << "error log buffer (" << error_log_buffer_bytes
<< " bytes): " << error_log_buffer.data();
if (absl::StrContains(error_log_buffer.data(),
"Register allocation failed")) {
ret = absl::ResourceExhaustedError(
absl::StrFormat("Failed to load PTX text as a module (register "
"allocation failed): %s",
status.ToString()));
} else {
ret = status;
}
notification.Notify();
return;
}
VLOG(3) << "PTX compilation info log (" << info_log_buffer_bytes
<< " bytes): " << info_log_buffer.data();
VLOG(3) << "PTX compilation error log (" << error_log_buffer_bytes
<< " bytes): " << error_log_buffer.data();
CHECK(module != nullptr);
notification.Notify();
});
notification.WaitForNotification();
return ret;
}
absl::Status GpuDriver::LoadHsaco(Context* context, const char* hsaco_contents,
CUmodule* module) {
return absl::InternalError(
"Feature not supported on CUDA platform (LoadHsaco)");
}
absl::Status GpuDriver::SynchronousMemsetUint8(Context* context,
CUdeviceptr location,
uint8_t value, size_t size) {
ScopedActivateContext activation(context);
return cuda::ToStatus(cuMemsetD8(location, value, size),
"Failed to memset memory");
}
absl::Status GpuDriver::SynchronousMemsetUint32(Context* context,
CUdeviceptr location,
uint32_t value,
size_t uint32_count) {
ScopedActivateContext activation(context);
return cuda::ToStatus(cuMemsetD32(location, value, uint32_count),
"Failed to memset memory");
}
absl::Status GpuDriver::AsynchronousMemsetUint8(Context* context,
CUdeviceptr location,
uint8_t value,
size_t uint8_count,
CUstream stream) {
ScopedActivateContext activation(context);
return cuda::ToStatus(cuMemsetD8Async(location, value, uint8_count, stream),
"Failed to enqueue async memset operation");
}
absl::Status GpuDriver::AsynchronousMemsetUint32(Context* context,
CUdeviceptr location,
uint32_t value,
size_t uint32_count,
CUstream stream) {
ScopedActivateContext activation(context);
return cuda::ToStatus(cuMemsetD32Async(location, value, uint32_count, stream),
"Failed to enqueue async memset operation");
}
absl::Status GpuDriver::AddStreamCallback(Context* context, CUstream stream,
StreamCallback callback, void* data) {
return cuda::ToStatus(cuLaunchHostFunc(stream, callback, data));
}
absl::Status GpuDriver::GetModuleFunction(Context* context, CUmodule module,
const char* kernel_name,
CUfunction* function) {
ScopedActivateContext activated{context};
CHECK(module != nullptr && kernel_name != nullptr);
cudaError_t cuda_error = cudaPeekAtLastError();
if (cuda_error != cudaSuccess) {
return absl::InternalError(
absl::StrCat("There was an error before calling cuModuleGetFunction (",
cuda_error, "): ", cudaGetErrorName(cuda_error), " : ",
cudaGetErrorString(cuda_error)));
}
return cuda::ToStatus(cuModuleGetFunction(function, module, kernel_name),
"Failed to get module function");
}
absl::Status GpuDriver::GetModuleSymbol(Context* context, CUmodule module,
const char* symbol_name,
CUdeviceptr* dptr, size_t* bytes) {
ScopedActivateContext activated{context};
CHECK(module != nullptr && symbol_name != nullptr &&
(dptr != nullptr || bytes != nullptr));
return cuda::ToStatus(
cuModuleGetGlobal(dptr, bytes, module, symbol_name),
absl::StrCat("Failed to get symbol '", symbol_name, "'"));
}
void GpuDriver::UnloadModule(Context* context, CUmodule module) {
ScopedActivateContext activated{context};
auto status = cuda::ToStatus(cuModuleUnload(module));
if (!status.ok()) {
LOG(ERROR) << "failed to unload module " << module
<< "; leaking: " << status;
}
}
absl::StatusOr<GpuStreamHandle> GpuDriver::CreateStream(Context* context,
int priority) {
ScopedActivateContext activated(context);
GpuStreamHandle stream;
if (priority == 0) {
TF_RETURN_IF_ERROR(
cuda::ToStatus(cuStreamCreate(&stream, CU_STREAM_NON_BLOCKING)));
} else {
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuStreamCreateWithPriority(&stream, CU_STREAM_NON_BLOCKING, priority)));
}
VLOG(2) << "successfully created stream " << stream << " for context "
<< context << " on thread";
return stream;
}
void GpuDriver::DestroyStream(Context* context, GpuStreamHandle stream) {
if (stream == nullptr) {
return;
}
ScopedActivateContext activated{context};
CUresult res = cuStreamQuery(stream);
if (res != CUDA_SUCCESS) {
LOG(ERROR) << "stream not idle on destroy: " << cuda::ToStatus(res);
}
auto status = cuda::ToStatus(cuStreamDestroy(stream));
if (!status.ok()) {
LOG(ERROR) << "failed to destroy CUDA stream for context " << context
<< ": " << status;
} else {
VLOG(2) << "successfully destroyed stream " << stream << " for context "
<< context;
}
}
void* GpuDriver::DeviceAllocate(Context* context, uint64_t bytes) {
if (bytes == 0) {
return nullptr;
}
ScopedActivateContext activated{context};
CUdeviceptr result = 0;
auto status = cuda::ToStatus(cuMemAlloc(&result, bytes));
if (!status.ok()) {
LOG(INFO) << "failed to allocate "
<< tsl::strings::HumanReadableNumBytes(bytes) << " (" << bytes
<< " bytes) from device: " << status;
return nullptr;
}
void* ptr = reinterpret_cast<void*>(result);
VLOG(2) << "allocated " << ptr << " for context " << context << " of "
<< bytes << " bytes";
return ptr;
}
void GpuDriver::DeviceDeallocate(Context* context, void* location) {
ScopedActivateContext activation(context);
CUdeviceptr pointer = absl::bit_cast<CUdeviceptr>(location);
auto status = cuda::ToStatus(cuMemFree(pointer));
if (!status.ok()) {
LOG(ERROR) << "failed to free device memory at " << location
<< "; result: " << status;
} else {
VLOG(2) << "deallocated " << location << " for context " << context;
}
}
void* GpuDriver::UnifiedMemoryAllocate(Context* context, uint64_t bytes) {
ScopedActivateContext activation(context);
CUdeviceptr result = 0;
auto status =
cuda::ToStatus(cuMemAllocManaged(&result, bytes, CU_MEM_ATTACH_GLOBAL));
if (!status.ok()) {
LOG(ERROR) << "failed to alloc " << bytes
<< " bytes unified memory; result: " << status;
return nullptr;
}
void* ptr = reinterpret_cast<void*>(result);
VLOG(2) << "allocated " << ptr << " for context " << context << " of "
<< bytes << " bytes in unified memory";
return ptr;
}
void GpuDriver::UnifiedMemoryDeallocate(Context* context, void* location) {
ScopedActivateContext activation(context);
CUdeviceptr pointer = absl::bit_cast<CUdeviceptr>(location);
auto status = cuda::ToStatus(cuMemFree(pointer));
if (!status.ok()) {
LOG(ERROR) << "failed to free unified memory at " << location
<< "; result: " << status;
} else {
VLOG(2) << "deallocated unified memory at " << location << " for context "
<< context;
}
}
void* GpuDriver::HostAllocate(Context* context, uint64_t bytes) {
ScopedActivateContext activation(context);
void* host_mem = nullptr;
auto status = cuda::ToStatus(
cuMemHostAlloc(&host_mem, bytes, CU_MEMHOSTALLOC_PORTABLE));
if (!status.ok()) {
LOG(ERROR) << "failed to alloc " << bytes << " bytes on host: " << status;
}
return host_mem;
}
void GpuDriver::HostDeallocate(Context* context, void* location) {
ScopedActivateContext activation(context);
auto status = cuda::ToStatus(cuMemFreeHost(location));
if (!status.ok()) {
LOG(ERROR) << "error deallocating host memory at " << location << ": "
<< status;
}
}
bool GpuDriver::HostRegister(Context* context, void* location, uint64_t bytes) {
ScopedActivateContext activation(context);
auto status = cuda::ToStatus(
cuMemHostRegister(location, bytes, CU_MEMHOSTREGISTER_PORTABLE));
if (!status.ok()) {
LOG(ERROR) << "error registering host memory at " << location << ": "
<< status;
return false;
}
return true;
}
bool GpuDriver::HostUnregister(Context* context, void* location) {
ScopedActivateContext activation(context);
auto status = cuda::ToStatus(cuMemHostUnregister(location));
if (!status.ok()) {
LOG(ERROR) << "error unregistering host memory at " << location << ": "
<< status;
return false;
}
return true;
}
int GpuDriver::GetGpuStreamPriority(
Context* context, stream_executor::StreamPriority stream_priority) {
ScopedActivateContext activation(context);
if (stream_priority == stream_executor::StreamPriority::Default) {
return 0;
}
int lowest, highest;
auto status = cuda::ToStatus(cuCtxGetStreamPriorityRange(&lowest, &highest));
if (!status.ok()) {
LOG(ERROR)
<< "Could not query stream priority range. Returning default priority.";
return 0;
}
return stream_priority == stream_executor::StreamPriority::Highest ? highest
: lowest;
}
absl::Status GpuDriver::DestroyEvent(Context* context, CUevent* event) {
if (*event == nullptr) {
return absl::InvalidArgumentError("input event cannot be null");
}
ScopedActivateContext activated{context};
return cuda::ToStatus(cuEventDestroy(*event), "Error destroying CUDA event");
}
absl::Status GpuDriver::RecordEvent(Context* context, CUevent event,
CUstream stream) {
ScopedActivateContext activated{context};
return cuda::ToStatus(cuEventRecord(event, stream),
"Error recording CUDA event");
}
absl::StatusOr<float> GpuDriver::GetEventElapsedTime(Context* context,
CUevent start,
CUevent stop) {
ScopedActivateContext activated{context};
auto status = cuda::ToStatus(cuEventSynchronize(stop));
if (!status.ok()) {
LOG(ERROR) << "failed to synchronize the stop event: " << status;
return false;
}
float elapsed_milliseconds;
TF_RETURN_IF_ERROR(
cuda::ToStatus(cuEventElapsedTime(&elapsed_milliseconds, start, stop)));
return elapsed_milliseconds;
}
absl::Status GpuDriver::WaitStreamOnEvent(Context* context, CUstream stream,
CUevent event) {
ScopedActivateContext activation(context);
return cuda::ToStatus(cuStreamWaitEvent(stream, event, 0 ));
}
absl::Status GpuDriver::SynchronizeContext(Context* context) {
ScopedActivateContext activation(context);
return cuda::ToStatus(cuCtxSynchronize());
}
absl::Status GpuDriver::SynchronizeStream(Context* context, CUstream stream) {
ScopedActivateContext activated{context};
CHECK(stream != nullptr);
return cuda::ToStatus(cuStreamSynchronize(stream),
"Could not synchronize CUDA stream");
}
absl::Status GpuDriver::SynchronousMemcpyD2H(Context* context, void* host_dst,
CUdeviceptr gpu_src,
uint64_t size) {
ScopedActivateContext activation(context);
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuMemcpyDtoH(host_dst, gpu_src, size),
absl::StrFormat("failed to synchronous memcpy from device to host "
"host dst: %p; GPU src: %p; size: %u=0x%x",
host_dst, absl::bit_cast<void*>(gpu_src), size, size)));
VLOG(2) << "successfully sync memcpy'd d2h of " << size << " bytes to "
<< host_dst;
return absl::OkStatus();
}
absl::Status GpuDriver::SynchronousMemcpyH2D(Context* context,
CUdeviceptr gpu_dst,
const void* host_src,
uint64_t size) {
ScopedActivateContext activation(context);
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuMemcpyHtoD(gpu_dst, host_src, size),
absl::StrFormat(
"failed to synchronous memcpy from host to device: GPU dst: %p;"
" host src: %p; size: %u=0x%x",
absl::bit_cast<void*>(gpu_dst), host_src, size, size)));
VLOG(2) << "successfully enqueued sync memcpy h2d of " << size << " bytes";
return absl::OkStatus();
}
absl::Status GpuDriver::AsynchronousMemcpyD2H(Context* context, void* host_dst,
CUdeviceptr gpu_src,
uint64_t size, CUstream stream) {
ScopedActivateContext activation(context);
TF_RETURN_IF_ERROR(
cuda::ToStatus(cuMemcpyDtoHAsync(host_dst, gpu_src, size, stream)));
VLOG(2) << "successfully enqueued async memcpy d2h of " << size
<< " bytes from " << absl::bit_cast<void*>(gpu_src) << " to "
<< host_dst << " on stream " << stream;
return absl::OkStatus();
}
absl::Status GpuDriver::AsynchronousMemcpyH2D(Context* context,
CUdeviceptr gpu_dst,
const void* host_src,
uint64_t size, CUstream stream) {
ScopedActivateContext activation(context);
TF_RETURN_IF_ERROR(
cuda::ToStatus(cuMemcpyHtoDAsync(gpu_dst, host_src, size, stream)));
VLOG(2) << "successfully enqueued async memcpy h2d of " << size << " bytes"
<< " from " << host_src << " to " << absl::bit_cast<void*>(gpu_dst)
<< " on stream " << stream;
return absl::OkStatus();
}
absl::Status GpuDriver::AsynchronousMemcpyD2D(Context* context,
CUdeviceptr gpu_dst,
CUdeviceptr gpu_src,
uint64_t size, CUstream stream) {
ScopedActivateContext activation(context);
TF_ASSIGN_OR_RETURN(bool is_capturing, StreamIsCapturing(stream));
if ((gpu_dst == 0 || gpu_src == 0) || is_capturing) {
TF_RETURN_IF_ERROR(
cuda::ToStatus(cuMemcpyDtoDAsync(gpu_dst, gpu_src, size, stream)));
} else {
CUcontext dst_context =
GetContextMap()->GetAnyContext(absl::bit_cast<void*>(gpu_dst));
CUcontext src_context =
GetContextMap()->GetAnyContext(absl::bit_cast<void*>(gpu_src));
if (dst_context == src_context) {
TF_RETURN_IF_ERROR(
cuda::ToStatus(cuMemcpyDtoDAsync(gpu_dst, gpu_src, size, stream)));
} else {
TF_RETURN_IF_ERROR(cuda::ToStatus(cuMemcpyPeerAsync(
gpu_dst, dst_context, gpu_src, src_context, size, stream)));
}
}
VLOG(2) << "successfully enqueued async memcpy d2d of " << size << " bytes"
<< " from " << absl::bit_cast<void*>(gpu_src) << " to "
<< absl::bit_cast<void*>(gpu_dst) << " on stream " << stream;
return absl::OkStatus();
}
absl::Status GpuDriver::InitEvent(Context* context, CUevent* result,
EventFlags flags) {
int cuflags;
switch (flags) {
case EventFlags::kDefault:
cuflags = CU_EVENT_DEFAULT;
break;
case EventFlags::kDisableTiming:
cuflags = CU_EVENT_DISABLE_TIMING;
break;
default:
LOG(FATAL) << "impossible event flags: " << int(flags);
}
ScopedActivateContext activated{context};
return cuda::ToStatus(cuEventCreate(result, cuflags));
}
int GpuDriver::GetDeviceCount() {
int device_count = 0;
auto status = cuda::ToStatus(cuDeviceGetCount(&device_count));
if (!status.ok()) {
LOG(ERROR) << "could not retrieve CUDA device count: " << status;
return 0;
}
return device_count;
}
absl::StatusOr<MemoryType> GpuDriver::GetPointerMemorySpace(
CUdeviceptr pointer) {
unsigned int value;
TF_RETURN_IF_ERROR(cuda::ToStatus(cuPointerGetAttribute(
&value, CU_POINTER_ATTRIBUTE_MEMORY_TYPE, pointer)));
switch (value) {
case CU_MEMORYTYPE_DEVICE:
return MemoryType::kDevice;
case CU_MEMORYTYPE_HOST:
return MemoryType::kHost;
default:
return absl::InternalError(
absl::StrCat("unknown memory space provided by CUDA API: ", value));
}
}
absl::Status GpuDriver::GetPointerAddressRange(CUdeviceptr dptr,
CUdeviceptr* base,
size_t* size) {
return cuda::ToStatus(cuMemGetAddressRange(base, size, dptr));
}
absl::Status GpuDriver::GetComputeCapability(int* cc_major, int* cc_minor,
CUdevice device) {
*cc_major = 0;
*cc_minor = 0;
TF_RETURN_IF_ERROR(cuda::ToStatus(cuDeviceGetAttribute(
cc_major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, device)));
return cuda::ToStatus(cuDeviceGetAttribute(
cc_minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, device));
}
absl::Status GpuDriver::GetGpuISAVersion(int* version, CUdevice device) {
return absl::Status{
absl::StatusCode::kInternal,
"Feature not supported on CUDA platform (GetGpuISAVersion)"};
}
absl::Status GpuDriver::GetGpuGCNArchName(CUdevice, std::string*) {
return absl::Status{
absl::StatusCode::kInternal,
"Feature not supported on CUDA platform (GetGpuGCNArchName)"};
}
template <typename T>
static absl::StatusOr<T> GetSimpleAttribute(CUdevice device,
CUdevice_attribute attribute) {
int value = -1;
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuDeviceGetAttribute(&value, attribute, device),
absl::StrCat("Could not retrieve CUDA device attribute (", attribute)));
T converted = value;
return converted;
}
absl::StatusOr<int> GpuDriver::GetMultiprocessorCount(CUdevice device) {
return GetSimpleAttribute<int>(device,
CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT);
}
absl::StatusOr<int64_t> GpuDriver::GetMaxSharedMemoryPerCore(CUdevice device) {
return GetSimpleAttribute<int64_t>(
device, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR);
}
absl::StatusOr<int64_t> GpuDriver::GetMaxSharedMemoryPerBlock(CUdevice device) {
return GetSimpleAttribute<int64_t>(
device, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK);
}
absl::StatusOr<int64_t> GpuDriver::GetMaxSharedMemoryPerBlockOptin(
CUdevice device) {
return GetSimpleAttribute<int64_t>(
device, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN);
}
absl::StatusOr<int64_t> GpuDriver::GetMaxThreadsPerMultiprocessor(
CUdevice device) {
return GetSimpleAttribute<int64_t>(
device, CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR);
}
absl::StatusOr<int64_t> GpuDriver::GetMaxRegistersPerBlock(CUdevice device) {
return GetSimpleAttribute<int64_t>(
device, CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK);
}
absl::StatusOr<int64_t> GpuDriver::GetThreadsPerWarp(CUdevice device) {
return GetSimpleAttribute<int64_t>(device, CU_DEVICE_ATTRIBUTE_WARP_SIZE);
}
absl::Status GpuDriver::GetGridLimits(int* x, int* y, int* z, CUdevice device) {
int value;
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuDeviceGetAttribute(&value, CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X, device),
"Could not get device attribute"));
*x = value;
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuDeviceGetAttribute(&value, CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y, device),
"Could not get device attribute"));
*y = value;
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuDeviceGetAttribute(&value, CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z, device),
"Could not get device attribute"));
*z = value;
return absl::OkStatus();
}
absl::StatusOr<int32_t> GpuDriver::GetDriverVersion() {
int32_t version;
TF_RETURN_IF_ERROR(cuda::ToStatus(cuDriverGetVersion(&version),
"Could not get driver version"));
return version;
}
bool GpuDriver::GetDeviceProperties(CUdevprop* device_properties,
int device_ordinal) {
auto status =
cuda::ToStatus(cuDeviceGetProperties(device_properties, device_ordinal));
return status.ok();
}
absl::StatusOr<int> GpuDriver::GetDeviceAttribute(CUdevice_attribute attribute,
CUdevice device) {
int val;
TF_RETURN_IF_ERROR(
cuda::ToStatus(cuDeviceGetAttribute(&val, attribute, device)));
return val;
}
bool GpuDriver::IsEccEnabled(CUdevice device, bool* result) {
int value = -1;
auto status = cuda::ToStatus(
cuDeviceGetAttribute(&value, CU_DEVICE_ATTRIBUTE_ECC_ENABLED, device));
if (!status.ok()) {
LOG(ERROR) << "failed to query ECC status: " << status;
return false;
}
*result = value;
return true;
}
bool GpuDriver::GetDeviceMemoryInfo(Context* context, int64_t* free_out,
int64_t* total_out) {
ScopedActivateContext activation(context);
size_t free = 0;
size_t total = 0;
auto status = cuda::ToStatus(cuMemGetInfo(&free, &total));
if (!status.ok()) {
LOG(ERROR) << "failed to query device memory info: " << status;
return false;
}
*free_out = free;
*total_out = total;
return true;
}
bool GpuDriver::GetDeviceTotalMemory(CUdevice device, uint64_t* result) {
size_t value{};
auto status = cuda::ToStatus(cuDeviceTotalMem(&value, device));
if (!status.ok()) {
LOG(ERROR) << "failed to query total available memory: " << status;
return false;
}
*result = value;
return true;
}
std::string GpuDriver::GetPCIBusID(CUdevice device) {
std::string pci_bus_id;
static const int kBufferSize = 64;
absl::InlinedVector<char, 4> chars(kBufferSize);
chars[kBufferSize - 1] = '\0';
auto status = cuda::ToStatus(
cuDeviceGetPCIBusId(chars.begin(), kBufferSize - 1, device));
if (!status.ok()) {
LOG(ERROR) << "failed to query PCI bus id for device: " << status;
return pci_bus_id;
}
pci_bus_id = chars.begin();
return pci_bus_id;
}
bool GpuDriver::CanEnablePeerAccess(Context* from, Context* to) {
if (from == to) {
return true;
}
auto from_device = DeviceFromContext(from);
if (!from_device.ok()) {
LOG(ERROR) << "failed to resolve 'from' peer access context to a device: "
<< from_device.status();
return false;
}
auto to_device = DeviceFromContext(to);
if (!to_device.ok()) {
LOG(ERROR) << "failed to resolve 'to' peer access context to a device: "
<< to_device.status();
return false;
}
return CanEnablePeerAccess(from_device.value(), to_device.value());
}
bool GpuDriver::CanEnablePeerAccess(GpuDeviceHandle from, GpuDeviceHandle to) {
int can_access_peer = -1;
auto status =
cuda::ToStatus(cuDeviceCanAccessPeer(&can_access_peer, from, to));
if (!status.ok()) {
LOG(ERROR) << "failed to detect peer access capability: " << status;
return false;
}
return can_access_peer;
}
absl::Status GpuDriver::EnablePeerAccess(Context* from, Context* to) {
if (from == to) {
return absl::OkStatus();
}
ScopedActivateContext activated{from};
CUresult result = cuCtxEnablePeerAccess(
tensorflow::down_cast<GpuContext*>(to)->context(), 0 );
if (result != CUDA_SUCCESS &&
result != CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED) {
return absl::InternalError(
absl::StrFormat("failed to enable peer access from %p to %p: %s", from,
to, cuda::ToStatus(result).ToString()));
}
return absl::OkStatus();
}
absl::StatusOr<int> GpuDriver::GetMaxOccupiedBlocksPerCore(
Context* context, CUfunction kernel, int threads_per_block,
size_t dynamic_shared_memory_bytes) {
ScopedActivateContext activation(context);
int max_blocks;
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(
&max_blocks, kernel, threads_per_block, dynamic_shared_memory_bytes,
CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE),
absl::StrFormat("Failed to calculate occupancy of kernel %p", kernel)));
return max_blocks;
}
absl::StatusOr<size_t> GpuDriver::GraphGetNodeCount(GpuGraphHandle graph) {
size_t num_nodes;
TF_RETURN_IF_ERROR(
cuda::ToStatus(cuGraphGetNodes(graph, nullptr, &num_nodes)));
return num_nodes;
}
}
} | #include "xla/stream_executor/cuda/cuda_driver.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/cleanup/cleanup.h"
#include "absl/log/log.h"
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "third_party/gpus/cuda/include/driver_types.h"
#include "xla/stream_executor/cuda/cuda_diagnostics.h"
#include "xla/stream_executor/cuda/cuda_status.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/gpu/scoped_activate_context.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
using ::tsl::testing::IsOkAndHolds;
namespace stream_executor {
namespace cuda {
void CheckCuda(CUresult result, const char* file, int line) {
TF_CHECK_OK(cuda::ToStatus(result));
}
void CheckCuda(cudaError_t result, const char* file, int line) {
if (result == cudaSuccess) {
return;
}
const char* name = cudaGetErrorName(result);
const char* message = cudaGetErrorString(result);
LOG(FATAL) << file << "(" << line << "): " << name << ", " << message;
}
#define CHECK_CUDA(result) CheckCuda(result, __FILE__, __LINE__)
class CudaDriverTest : public ::testing::Test {
protected:
static void SetUpTestSuite() { CHECK_CUDA(cuInit(0)); }
};
TEST_F(CudaDriverTest, ScopedActivateContextTest) {
CUdevice device;
CHECK_CUDA(cuDeviceGet(&device, 0));
CUcontext context0, context1;
CHECK_CUDA(cuCtxCreate(&context0, 0, device));
CHECK_CUDA(cuCtxCreate(&context1, 0, device));
gpu::GpuContext se_context1(context1, 101);
{
gpu::ScopedActivateContext scope(&se_context1);
CUcontext c;
CHECK_CUDA(cuCtxGetCurrent(&c));
EXPECT_EQ(c, context1);
}
CHECK_CUDA(cuCtxSetCurrent(context0));
{
gpu::ScopedActivateContext scope(&se_context1);
CUcontext c;
CHECK_CUDA(cuCtxGetCurrent(&c));
EXPECT_EQ(c, context1);
}
}
TEST_F(CudaDriverTest, DriverVersionParsingTest) {
auto driver_version = Diagnostician::FindKernelModuleVersion(
"... NVIDIA UNIX Open Kernel Module for x86_64 570.00 Release Build "
"... Mon Aug 12 04:17:20 UTC 2024");
TF_CHECK_OK(driver_version.status());
EXPECT_EQ("570.0.0", cuda::DriverVersionToString(driver_version.value()));
driver_version = Diagnostician::FindKernelModuleVersion(
"... NVIDIA UNIX Open Kernel Module 571.00 Release Build "
"... Mon Aug 12 04:17:20 UTC 2024");
TF_CHECK_OK(driver_version.status());
EXPECT_EQ("571.0.0", cuda::DriverVersionToString(driver_version.value()));
}
TEST_F(CudaDriverTest, GraphGetNodeCountTest) {
CUdevice device;
CHECK_CUDA(cuDeviceGet(&device, 0));
CUcontext context;
CHECK_CUDA(cuCtxCreate(&context, 0, device));
gpu::GpuGraphHandle graph;
TF_CHECK_OK(gpu::GpuDriver::CreateGraph(&graph));
absl::Cleanup cleanup(
[graph] { TF_CHECK_OK(gpu::GpuDriver::DestroyGraph(graph)); });
EXPECT_THAT(gpu::GpuDriver::GraphGetNodeCount(graph), IsOkAndHolds(0));
gpu::GpuGraphNodeHandle node;
TF_CHECK_OK(gpu::GpuDriver::GraphAddEmptyNode(&node, graph, {}));
EXPECT_THAT(gpu::GpuDriver::GraphGetNodeCount(graph), IsOkAndHolds(1));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/cuda_driver.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/cuda_driver_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
573867bc-7633-461e-a4f3-a42efd5de27b | cpp | tensorflow/tensorflow | tf_allocator_adapter | third_party/xla/xla/stream_executor/integrations/tf_allocator_adapter.cc | third_party/xla/xla/stream_executor/integrations/tf_allocator_adapter_test.cc | #include "xla/stream_executor/integrations/tf_allocator_adapter.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/framework/allocator.h"
namespace stream_executor {
TfAllocatorAdapter::TfAllocatorAdapter(tsl::Allocator *wrapped, Stream *stream)
: DeviceMemoryAllocator(stream->parent()->GetPlatform()),
wrapped_(wrapped),
stream_(stream) {}
TfAllocatorAdapter::TfAllocatorAdapter(tsl::Allocator *wrapped,
Platform *platform)
: DeviceMemoryAllocator(platform), wrapped_(wrapped), stream_(nullptr) {}
TfAllocatorAdapter::~TfAllocatorAdapter() {}
absl::StatusOr<OwningDeviceMemory> TfAllocatorAdapter::Allocate(
int device_ordinal, uint64_t size, bool retry_on_failure,
int64_t memory_space) {
tsl::AllocationAttributes attrs;
attrs.retry_on_failure = retry_on_failure;
void *data = nullptr;
if (size != 0) {
data =
wrapped_->AllocateRaw(tsl::Allocator::kAllocatorAlignment, size, attrs);
if (data == nullptr) {
return absl::ResourceExhaustedError(absl::StrCat(
"Out of memory while trying to allocate ", size, " bytes."));
}
}
return OwningDeviceMemory(DeviceMemoryBase(data, size), device_ordinal, this);
}
absl::Status TfAllocatorAdapter::Deallocate(int device_ordinal,
DeviceMemoryBase mem) {
wrapped_->DeallocateRaw(mem.opaque());
return absl::OkStatus();
}
absl::StatusOr<Stream *> TfAllocatorAdapter::GetStream(int device_ordinal) {
CHECK_EQ(stream_->parent()->device_ordinal(), device_ordinal);
return stream_;
}
absl::StatusOr<tsl::Allocator *> TfAllocatorAdapter::GetAllocator(
int device_ordinal) {
if (stream_ == nullptr) {
return absl::UnavailableError("stream_ is null for TfAllocatorAdapter.");
}
if (stream_->parent()->device_ordinal() != device_ordinal) {
return absl::InternalError(
absl::StrCat("stream_->parent()->device_ordinal() ",
stream_->parent()->device_ordinal(),
" not equal to device_ordinal ", device_ordinal));
}
return wrapped_;
}
} | #include "xla/stream_executor/integrations/tf_allocator_adapter.h"
#include <cstddef>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/container/node_hash_set.h"
#include "absl/log/check.h"
#include "xla/service/platform_util.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/framework/allocator.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace se = stream_executor;
class TestAllocator : public tsl::Allocator {
public:
explicit TestAllocator(
size_t start_address,
std::shared_ptr<absl::flat_hash_set<void*>> allocations = nullptr)
: start_address_(start_address), allocations_(allocations) {
if (allocations_ == nullptr) {
allocations_ = std::make_shared<absl::flat_hash_set<void*>>();
}
}
std::string Name() override { return "test"; }
void* AllocateRaw(size_t alignment, size_t num_bytes) override {
void* ptr = reinterpret_cast<void*>(++start_address_);
allocations_->insert(ptr);
return ptr;
}
void DeallocateRaw(void* ptr) override {
auto it = allocations_->find(ptr);
if (it == allocations_->end()) {
ADD_FAILURE() << "Allocation not found (double free?)";
} else {
allocations_->erase(it);
}
}
private:
size_t start_address_;
std::shared_ptr<absl::flat_hash_set<void*>> allocations_;
};
TEST(MultiDeviceAdapter, UsesCorrectAllocator) {
TF_ASSERT_OK_AND_ASSIGN(auto* platform,
xla::PlatformUtil::GetDefaultPlatform());
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
xla::PlatformUtil::GetStreamExecutors(platform))
TF_ASSERT_OK_AND_ASSIGN(auto stream, executors[0]->CreateStream());
std::vector<se::MultiDeviceAdapter::AllocatorInfo> infos;
infos.emplace_back(std::make_unique<TestAllocator>(0x1000), stream.get(),
0, 0);
infos.emplace_back(std::make_unique<TestAllocator>(0x2000), stream.get(),
0, 1);
infos.emplace_back(std::make_unique<TestAllocator>(0x3000), stream.get(),
1, 0);
infos.emplace_back(std::make_unique<TestAllocator>(0x4000), stream.get(),
1, 1);
std::unique_ptr<se::DeviceMemoryAllocator> allocator =
std::make_unique<se::MultiDeviceAdapter>(platform, std::move(infos));
TF_ASSERT_OK_AND_ASSIGN(
se::OwningDeviceMemory buff0,
allocator->Allocate(0, 4, false, 0));
CHECK_EQ(reinterpret_cast<size_t>(buff0->opaque()), 0x1001);
TF_ASSERT_OK_AND_ASSIGN(
se::OwningDeviceMemory buff1,
allocator->Allocate(0, 4, false, 0));
CHECK_EQ(reinterpret_cast<size_t>(buff1->opaque()), 0x1002);
TF_ASSERT_OK_AND_ASSIGN(
se::OwningDeviceMemory buff2,
allocator->Allocate(0, 4, false, 1));
CHECK_EQ(reinterpret_cast<size_t>(buff2->opaque()), 0x3001);
TF_ASSERT_OK_AND_ASSIGN(
se::OwningDeviceMemory buff3,
allocator->Allocate(1, 4, false, 0));
CHECK_EQ(reinterpret_cast<size_t>(buff3->opaque()), 0x2001);
TF_ASSERT_OK_AND_ASSIGN(
se::OwningDeviceMemory buff4,
allocator->Allocate(1, 4, false, 1));
CHECK_EQ(reinterpret_cast<size_t>(buff4->opaque()), 0x4001);
}
TEST(MultiDeviceAdapter, DeallocationWithDifferentAllocator) {
TF_ASSERT_OK_AND_ASSIGN(auto* platform,
xla::PlatformUtil::GetDefaultPlatform());
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
xla::PlatformUtil::GetStreamExecutors(platform));
TF_ASSERT_OK_AND_ASSIGN(auto stream, executors[0]->CreateStream());
std::shared_ptr<absl::flat_hash_set<void*>> allocations =
std::make_shared<absl::flat_hash_set<void*>>();
std::vector<se::MultiDeviceAdapter::AllocatorInfo> info_allocator;
info_allocator.emplace_back(
std::make_unique<TestAllocator>(0x1000, allocations), stream.get(),
0, 0);
std::unique_ptr<se::DeviceMemoryAllocator> allocator =
std::make_unique<se::MultiDeviceAdapter>(platform,
std::move(info_allocator));
std::vector<se::MultiDeviceAdapter::AllocatorInfo> info_deallocator;
info_deallocator.emplace_back(
std::make_unique<TestAllocator>(0x1000, allocations), stream.get(),
0, 0);
std::unique_ptr<se::DeviceMemoryAllocator> deallocator =
std::make_unique<se::MultiDeviceAdapter>(platform,
std::move(info_deallocator));
TF_ASSERT_OK_AND_ASSIGN(
se::OwningDeviceMemory buff0,
allocator->Allocate(0, 4, false, 0));
CHECK_EQ(allocations->size(), 1);
CHECK_EQ(reinterpret_cast<size_t>(buff0->opaque()), 0x1001);
TF_CHECK_OK(deallocator->Deallocate(0, buff0.cref()));
CHECK_EQ(allocations->size(), 0);
allocations->insert(buff0->opaque());
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/integrations/tf_allocator_adapter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/integrations/tf_allocator_adapter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4e622bdc-263a-4543-afca-fcd327a73a35 | cpp | tensorflow/tensorflow | sycl_platform | third_party/xla/xla/stream_executor/sycl/sycl_platform.cc | third_party/xla/xla/stream_executor/sycl/sycl_platform_test.cc | #include "xla/stream_executor/sycl/sycl_platform.h"
#include <algorithm>
#include <cstdlib>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include "absl/base/call_once.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/gpu/gpu_executor.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform/initialize.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/sycl/sycl_platform_id.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
namespace stream_executor {
namespace gpu {
SyclPlatform::SyclPlatform() : name_("SYCL") {}
SyclPlatform::~SyclPlatform() {}
Platform::Id SyclPlatform::id() const { return sycl::kSyclPlatformId; }
int SyclPlatform::VisibleDeviceCount() const {
static const int num_devices = [] {
if (!GpuDriver::Init().ok()) return -1;
return GpuDriver::GetDeviceCount();
}();
return num_devices;
}
const std::string& SyclPlatform::Name() const { return name_; }
absl::StatusOr<std::unique_ptr<DeviceDescription>>
SyclPlatform::DescriptionForDevice(int ordinal) const {
return GpuExecutor::CreateDeviceDescription(ordinal);
}
absl::StatusOr<StreamExecutor*> SyclPlatform::ExecutorForDevice(int ordinal) {
return executor_cache_.GetOrCreate(
ordinal, [this, ordinal]() { return GetUncachedExecutor(ordinal); });
}
absl::StatusOr<std::unique_ptr<StreamExecutor>>
SyclPlatform::GetUncachedExecutor(int ordinal {
auto executor = std::make_unique<GpuExecutor>(this, ordinal);
TF_RETURN_IF_ERROR(executor->Init());
return std::move(executor);
}
}
static void InitializeSyclPlatform() {
TF_CHECK_OK(
PlatformManager::RegisterPlatform(std::make_unique<gpu::SyclPlatform>()));
}
}
STREAM_EXECUTOR_REGISTER_MODULE_INITIALIZER(
sycl_platform, stream_executor::InitializeSyclPlatform()); | #include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace stream_executor {
namespace gpu {
static Platform* NewPlatform() {
Platform* platform = PlatformManager::PlatformWithName("SYCL").value();
return platform;
}
TEST(SyclPlatformTest, Name) {
auto platform = NewPlatform();
auto name = platform->Name();
EXPECT_EQ(name, "SYCL");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/sycl/sycl_platform.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/sycl/sycl_platform_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d372ca5e-0ee3-4c33-b46f-25c5dac29903 | cpp | tensorflow/tensorflow | rocm_kernel | third_party/xla/xla/stream_executor/rocm/rocm_kernel.cc | third_party/xla/xla/stream_executor/rocm/rocm_kernel_test.cc | #include "xla/stream_executor/rocm/rocm_kernel.h"
#include <cstddef>
#include <cstdint>
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/launch_dim.h"
namespace stream_executor {
namespace gpu {
absl::StatusOr<int32_t> RocmKernel::GetMaxOccupiedBlocksPerCore(
ThreadDim threads, size_t dynamic_shared_memory_bytes) const {
int32_t threads_per_block = threads.x * threads.y * threads.z;
VLOG(0) << "Get kernel block occupancy: " << name()
<< "; threads_per_block: " << threads_per_block
<< "; dynamic_shared_memory_bytes: " << dynamic_shared_memory_bytes;
return GpuDriver::GetMaxOccupiedBlocksPerCore(
gpu_executor_->gpu_context(), rocm_function_, threads_per_block,
dynamic_shared_memory_bytes);
}
}
} | #include "xla/stream_executor/rocm/rocm_kernel.h"
#include <gtest/gtest.h>
#include "rocm/include/hip/hip_runtime.h"
#include "xla/stream_executor/gpu/gpu_executor.h"
#include "xla/stream_executor/gpu/gpu_test_kernels.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/rocm/rocm_runtime.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace stream_executor::gpu {
namespace {
using testing::Ge;
using tsl::testing::IsOkAndHolds;
TEST(RocmKernelTest, GetMaxOccupiedBlocksPerCore) {
TF_ASSERT_OK_AND_ASSIGN(Platform * platform,
PlatformManager::PlatformWithName("ROCM"));
TF_ASSERT_OK_AND_ASSIGN(StreamExecutor * executor,
platform->ExecutorForDevice(0));
GpuExecutor* gpu_executor = ExtractGpuExecutor(executor);
RocmKernel rocm_kernel(gpu_executor);
rocm_kernel.set_arity(3);
TF_ASSERT_OK_AND_ASSIGN(
hipFunction_t function,
RocmRuntime::GetFuncBySymbol(internal::GetAddI32Kernel()));
rocm_kernel.set_gpu_function(function);
EXPECT_EQ(rocm_kernel.Arity(), 3);
EXPECT_EQ(rocm_kernel.gpu_function(), function);
EXPECT_THAT(rocm_kernel.GetMaxOccupiedBlocksPerCore(
ThreadDim(1, 1, 1), 0),
IsOkAndHolds(Ge(1)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/rocm/rocm_kernel.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/rocm/rocm_kernel_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6479cac6-c669-44ce-a8f3-f49af593b676 | cpp | tensorflow/tensorflow | rocm_version_parser | third_party/xla/xla/stream_executor/rocm/rocm_version_parser.cc | third_party/xla/xla/stream_executor/rocm/rocm_version_parser_test.cc | #include "xla/stream_executor/rocm/rocm_version_parser.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/stream_executor/semantic_version.h"
namespace stream_executor {
absl::StatusOr<SemanticVersion> ParseRocmVersion(int rocm_version) {
if (rocm_version < 0) {
return absl::InvalidArgumentError("Version numbers cannot be negative.");
}
int major = rocm_version / 10'000'000;
int minor = (rocm_version % 10'000'000) / 100'000;
int patch = rocm_version % 100'000;
return SemanticVersion(major, minor, patch);
}
} | #include "xla/stream_executor/rocm/rocm_version_parser.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "rocm/include/hip/hip_version.h"
#include "xla/stream_executor/semantic_version.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
namespace stream_executor {
namespace {
using tsl::testing::IsOkAndHolds;
using tsl::testing::StatusIs;
TEST(ParseRocmVersionTest, Simple) {
EXPECT_THAT(stream_executor::ParseRocmVersion(60'100'002),
IsOkAndHolds(SemanticVersion(6, 1, 2)));
}
TEST(RocmVersionParserTest, NegativeIntegerIsNotAValidVersion) {
EXPECT_THAT(ParseRocmVersion(-42),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(RocmVersionParserTest, AlignsWithHIPVersion) {
EXPECT_THAT(ParseRocmVersion(HIP_VERSION),
IsOkAndHolds(SemanticVersion{HIP_VERSION_MAJOR, HIP_VERSION_MINOR,
HIP_VERSION_PATCH}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/rocm/rocm_version_parser.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/rocm/rocm_version_parser_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
27547f78-99c7-49cb-aa5f-2a5f143d52a0 | cpp | tensorflow/tensorflow | rocm_executor | third_party/xla/xla/stream_executor/rocm/rocm_executor.cc | third_party/xla/xla/stream_executor/rocm/rocm_executor_test.cc | #include "xla/stream_executor/rocm/rocm_executor.h"
#include <unistd.h>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include "absl/base/casts.h"
#include "absl/functional/any_invocable.h"
#include "absl/numeric/int128.h"
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "rocm/include/hip/hip_runtime.h"
#include "rocm/include/hip/hip_version.h"
#include "rocm/rocm_config.h"
#include "xla/stream_executor/blas.h"
#include "xla/stream_executor/command_buffer.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/event.h"
#include "xla/stream_executor/event_based_timer.h"
#include "xla/stream_executor/fft.h"
#include "xla/stream_executor/gpu/context.h"
#include "xla/stream_executor/gpu/gpu_command_buffer.h"
#include "xla/stream_executor/gpu/gpu_diagnostics.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/gpu/gpu_event.h"
#include "xla/stream_executor/gpu/gpu_executor.h"
#include "xla/stream_executor/gpu/gpu_kernel.h"
#include "xla/stream_executor/gpu/gpu_stream.h"
#include "xla/stream_executor/gpu/gpu_timer.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/gpu/read_numa_node.h"
#include "xla/stream_executor/integrations/device_mem_allocator.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/module_spec.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform/initialize.h"
#include "xla/stream_executor/plugin_registry.h"
#include "xla/stream_executor/rocm/rocm_diagnostics.h"
#include "xla/stream_executor/rocm/rocm_driver.h"
#include "xla/stream_executor/rocm/rocm_driver_wrapper.h"
#include "xla/stream_executor/rocm/rocm_event.h"
#include "xla/stream_executor/rocm/rocm_kernel.h"
#include "xla/stream_executor/rocm/rocm_platform_id.h"
#include "xla/stream_executor/rocm/rocm_runtime.h"
#include "xla/stream_executor/rocm/rocm_version_parser.h"
#include "xla/stream_executor/semantic_version.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/fingerprint.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#define RETURN_IF_ROCM_ERROR(expr, ...) \
do { \
hipError_t _res = (expr); \
if (TF_PREDICT_FALSE(_res != hipSuccess)) { \
if (_res == hipErrorOutOfMemory) \
return absl::ResourceExhaustedError(absl::StrCat( \
__VA_ARGS__, ":", ::stream_executor::gpu::ToString(_res))); \
else \
return absl::InternalError(absl::StrCat( \
__VA_ARGS__, ": ", ::stream_executor::gpu::ToString(_res))); \
} \
} while (0)
namespace stream_executor {
namespace gpu {
static hipDeviceptr_t AsROCmDevicePtr(const DeviceMemoryBase& gpu_mem) {
return const_cast<hipDeviceptr_t>(gpu_mem.opaque());
}
static hipDeviceptr_t AsROCmDevicePtr(DeviceMemoryBase* gpu_mem) {
return AsROCmDevicePtr(*gpu_mem);
}
RocmExecutor::~RocmExecutor() {
for (auto& it : disk_modules_) {
GpuDriver::UnloadModule(gpu_context(), it.second);
}
for (auto& it : in_memory_modules_) {
GpuDriver::UnloadModule(gpu_context(), it.second);
}
if (gpu_context() != nullptr) {
GpuDriver::DestroyContext(gpu_context());
}
CHECK(kernel_to_gpu_binary_.empty()) << "GpuExecutor has live kernels.";
CHECK(gpu_binary_to_module_.empty()) << "GpuExecutor has loaded modules.";
}
bool RocmExecutor::UnloadModule(ModuleHandle module_handle) {
const char* gpu_binary = reinterpret_cast<const char*>(module_handle.id());
absl::MutexLock lock{&in_memory_modules_mu_};
return UnloadGpuBinary(gpu_binary);
}
namespace {
absl::uint128 Fingerprint128(const absl::string_view s) {
auto fp = tsl::Fingerprint128(s);
return absl::MakeUint128(fp.high64, fp.low64);
}
int fpus_per_core(std::string gcn_arch_name) {
int n = 128;
if (gcn_arch_name.substr(0, 6) == "gfx906") {
n = 64;
}
return n;
}
absl::Status FuncGetAttribute(hipFunction_attribute attribute,
hipFunction_t func, int* attribute_value) {
RETURN_IF_ROCM_ERROR(
wrap::hipFuncGetAttribute(attribute_value, attribute, func),
"Failed to query kernel attribute: ", attribute);
return absl::OkStatus();
}
}
absl::StatusOr<std::shared_ptr<DeviceMemoryBase>>
RocmExecutor::CreateOrShareConstant(Stream* stream,
absl::Span<const uint8_t> content) {
absl::MutexLock lock{&shared_constants_mu_};
absl::uint128 fingerprint = Fingerprint128(absl::string_view(
reinterpret_cast<const char*>(content.data()), content.size()));
auto insert_result = shared_constants_.insert(
{fingerprint, std::weak_ptr<DeviceMemoryBase>()});
auto it = insert_result.first;
bool was_already_in_cache = !insert_result.second;
std::shared_ptr<DeviceMemoryBase> shared_constant;
if (was_already_in_cache) {
shared_constant = it->second.lock();
}
if (shared_constant == nullptr) {
DeviceMemoryBase* new_constant =
new DeviceMemoryBase(Allocate(content.size(), 0));
if (new_constant->opaque() == nullptr) {
return absl::InternalError(absl::StrFormat(
"Failed to allocate %d bytes for new constant", content.size()));
}
TF_RETURN_IF_ERROR(
stream->Memcpy(new_constant, content.data(), content.size()));
absl::Status status = stream->BlockHostUntilDone();
if (!status.ok()) {
Deallocate(new_constant);
status.Update(absl::InternalError(absl::StrFormat(
"Memcpy to device address %p failed", new_constant->opaque())));
return status;
}
shared_constant = std::shared_ptr<DeviceMemoryBase>(
new_constant, [this](DeviceMemoryBase* p) {
Deallocate(p);
delete p;
});
it->second = std::weak_ptr<DeviceMemoryBase>(shared_constant);
}
return shared_constant;
}
absl::StatusOr<std::unique_ptr<EventBasedTimer>>
RocmExecutor::CreateEventBasedTimer(GpuStream* stream, bool use_delay_kernel) {
TF_ASSIGN_OR_RETURN(auto start_event, CreateGpuEvent(true));
TF_ASSIGN_OR_RETURN(auto stop_event, CreateGpuEvent(true));
TF_RETURN_IF_ERROR(start_event->Record(stream->gpu_stream()));
return std::make_unique<GpuTimer>(gpu_context(), std::move(start_event),
std::move(stop_event), stream);
}
bool RocmExecutor::UnloadGpuBinary(const void* gpu_binary) {
auto module_it = gpu_binary_to_module_.find(gpu_binary);
if (gpu_binary_to_module_.end() == module_it) {
VLOG(3) << "No loaded HSACO module for " << gpu_binary;
return false;
}
auto& module = module_it->second.first;
auto& refcount = module_it->second.second;
VLOG(3) << "Found HSACO module " << module << " with refcount " << refcount;
if (--refcount == 0) {
VLOG(3) << "Unloading HSACO module " << module;
GpuDriver::UnloadModule(gpu_context(), module);
gpu_binary_to_module_.erase(module_it);
const char* mem_it = nullptr;
for (auto x : in_memory_modules_) {
if (x.second == module) mem_it = x.first;
}
if (mem_it != nullptr) in_memory_modules_.erase(mem_it);
}
return true;
}
void RocmExecutor::UnloadKernel(const Kernel* kernel) {
VLOG(3) << "Unloading kernel " << kernel << " : " << kernel->name();
absl::MutexLock lock{&in_memory_modules_mu_};
auto gpu_binary_it = kernel_to_gpu_binary_.find(kernel);
if (kernel_to_gpu_binary_.end() == gpu_binary_it) {
VLOG(3) << "Kernel " << kernel << " : " << kernel->name()
<< " has never been loaded.";
return;
}
VLOG(3) << "Kernel " << kernel << " : " << kernel->name()
<< " has loaded GPU code " << gpu_binary_it->second;
UnloadGpuBinary(gpu_binary_it->second);
kernel_to_gpu_binary_.erase(gpu_binary_it);
}
absl::Status RocmExecutor::Init() {
TF_RETURN_IF_ERROR(GpuDriver::Init());
TF_RETURN_IF_ERROR(GpuDriver::GetDevice(device_ordinal(), &device_));
Context* context;
TF_RETURN_IF_ERROR(
GpuDriver::CreateContext(device_ordinal(), device_, &context));
set_context(context);
return GpuDriver::GetGpuISAVersion(&version_, device_);
}
absl::StatusOr<std::unique_ptr<Kernel>> RocmExecutor::LoadKernel(
const MultiKernelLoaderSpec& spec) {
auto rocm_kernel = std::make_unique<RocmKernel>(this);
hipModule_t module = nullptr;
const std::string* kernel_name;
if (spec.has_cuda_cubin_in_memory()) {
kernel_name = &spec.cuda_cubin_in_memory().kernel_name();
const char* hsaco = reinterpret_cast<const char*>(
spec.cuda_cubin_in_memory().cubin_bytes().data());
absl::MutexLock lock{&in_memory_modules_mu_};
module = in_memory_modules_[hsaco];
if (module == nullptr) {
TF_RETURN_IF_ERROR(GpuDriver::LoadHsaco(gpu_context(), hsaco, &module));
}
kernel_to_gpu_binary_[rocm_kernel.get()] = hsaco;
} else if (spec.has_in_process_symbol()) {
kernel_name = &spec.in_process_symbol().kernel_name();
void* symbol = spec.in_process_symbol().symbol();
VLOG(1) << "Resolve ROCM kernel " << *kernel_name
<< " from symbol pointer: " << symbol;
#if TF_ROCM_VERSION >= 60200
TF_ASSIGN_OR_RETURN(
GpuFunctionHandle function,
RocmRuntime::GetFuncBySymbol(spec.in_process_symbol().symbol()));
rocm_kernel->set_gpu_function(function);
#else
rocm_kernel->set_gpu_function(
static_cast<hipFunction_t>(spec.in_process_symbol().symbol()));
#endif
} else {
return absl::InternalError("No method of loading ROCM kernel provided");
}
if (!spec.has_in_process_symbol()) {
VLOG(2) << "getting function " << *kernel_name << " from module " << module;
GpuFunctionHandle function;
TF_RETURN_IF_ERROR(GpuDriver::GetModuleFunction(
gpu_context(), module, kernel_name->c_str(), &function));
rocm_kernel->set_gpu_function(function);
}
rocm_kernel->set_arity(spec.arity());
if (!spec.has_in_process_symbol()) {
KernelMetadata kernel_metadata;
TF_RETURN_IF_ERROR(GetKernelMetadata(rocm_kernel.get(), &kernel_metadata));
rocm_kernel->set_metadata(kernel_metadata);
}
rocm_kernel->set_name(*kernel_name);
rocm_kernel->set_args_packing(spec.kernel_args_packing());
return std::move(rocm_kernel);
}
absl::Status RocmExecutor::GetKernelMetadata(GpuKernel* rocm_kernel,
KernelMetadata* kernel_metadata) {
int value = 0;
TF_RETURN_IF_ERROR(FuncGetAttribute(HIP_FUNC_ATTRIBUTE_NUM_REGS,
rocm_kernel->gpu_function(), &value));
kernel_metadata->set_registers_per_thread(value);
TF_RETURN_IF_ERROR(FuncGetAttribute(HIP_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES,
rocm_kernel->gpu_function(), &value));
kernel_metadata->set_shared_memory_bytes(value);
return absl::OkStatus();
}
absl::Status RocmExecutor::LoadModule(const MultiModuleLoaderSpec& spec,
ModuleHandle* module_handle) {
hipModule_t hip_module = nullptr;
if (spec.has_cuda_cubin_in_memory()) {
absl::MutexLock lock{&in_memory_modules_mu_};
TF_RETURN_IF_ERROR(LoadModuleFromHsaco(
reinterpret_cast<const char*>(spec.cuda_cubin_in_memory().data()),
&hip_module));
*module_handle = ModuleHandle(const_cast<void*>(
static_cast<const void*>(spec.cuda_cubin_in_memory().data())));
return absl::OkStatus();
} else {
return absl::InternalError("No HASCO binary found");
}
}
absl::Status RocmExecutor::LoadModuleFromHsaco(const char* hsaco,
hipModule_t* module) {
uint64_t module_refcount;
std::tie(*module, module_refcount) = gpu_binary_to_module_[hsaco];
if (*module == nullptr) {
TF_RETURN_IF_ERROR(GpuDriver::LoadHsaco(gpu_context(), hsaco, module));
module_refcount = 1;
in_memory_modules_[hsaco] = *module;
VLOG(3) << "Loaded HSACO " << static_cast<const void*>(hsaco)
<< " as module " << *module;
} else {
++module_refcount;
VLOG(3) << "HSACO " << static_cast<const void*>(hsaco)
<< " is already loaded as module " << *module;
}
gpu_binary_to_module_[hsaco] = {*module, module_refcount};
return absl::OkStatus();
}
DeviceMemoryBase RocmExecutor::Allocate(uint64_t size, int64_t memory_space) {
if (memory_space ==
static_cast<int64_t>(stream_executor::MemoryType::kHost)) {
return DeviceMemoryBase(GpuDriver::HostAllocate(gpu_context(), size), size);
}
CHECK_EQ(memory_space, 0);
return DeviceMemoryBase(GpuDriver::DeviceAllocate(gpu_context(), size), size);
}
void RocmExecutor::Deallocate(DeviceMemoryBase* mem) {
GpuDriver::DeviceDeallocate(gpu_context(), mem->opaque());
}
bool RocmExecutor::SynchronizeAllActivity() {
return GpuDriver::SynchronizeContext(gpu_context()).ok();
}
absl::Status RocmExecutor::SynchronousMemZero(DeviceMemoryBase* location,
uint64_t size) {
if (reinterpret_cast<uintptr_t>(location->opaque()) % 4 == 0 &&
size % 4 == 0) {
return GpuDriver::SynchronousMemsetUint32(
gpu_context(), AsROCmDevicePtr(location), 0x0, size / 4);
}
return GpuDriver::SynchronousMemsetUint8(
gpu_context(), AsROCmDevicePtr(location), 0x0, size);
}
absl::Status RocmExecutor::SynchronousMemcpy(DeviceMemoryBase* gpu_dst,
const void* host_src,
uint64_t size) {
return GpuDriver::SynchronousMemcpyH2D(
gpu_context(), AsROCmDevicePtr(gpu_dst), host_src, size);
}
absl::Status RocmExecutor::SynchronousMemcpy(void* host_dst,
const DeviceMemoryBase& gpu_src,
uint64_t size) {
return GpuDriver::SynchronousMemcpyD2H(gpu_context(), host_dst,
AsROCmDevicePtr(gpu_src), size);
}
void RocmExecutor::DeallocateStream(Stream* stream) {
{
absl::MutexLock lock(&mu_);
if (dnn_ != nullptr) {
dnn_->NotifyStreamDestroyed(stream);
}
}
GpuStream* rocm_stream = AsGpuStream(stream);
absl::MutexLock l(&alive_gpu_streams_mu_);
alive_gpu_streams_.erase(rocm_stream->gpu_stream());
}
absl::Status RocmExecutor::BlockHostUntilDone(Stream* stream) {
return GpuDriver::SynchronizeStream(gpu_context(), AsGpuStreamValue(stream));
}
blas::BlasSupport* RocmExecutor::AsBlas() {
absl::MutexLock lock(&mu_);
if (blas_ != nullptr) {
return blas_.get();
}
PluginRegistry* registry = PluginRegistry::Instance();
absl::StatusOr<PluginRegistry::BlasFactory> status =
registry->GetFactory<PluginRegistry::BlasFactory>(rocm::kROCmPlatformId);
if (!status.ok()) {
LOG(ERROR) << "Unable to retrieve BLAS factory: "
<< status.status().message();
return nullptr;
}
auto blas = status.value()(this);
blas_.reset(blas);
return blas_.get();
}
dnn::DnnSupport* RocmExecutor::AsDnn() {
absl::MutexLock lock(&mu_);
if (dnn_ != nullptr) {
return dnn_.get();
}
PluginRegistry* registry = PluginRegistry::Instance();
absl::StatusOr<PluginRegistry::DnnFactory> status =
registry->GetFactory<PluginRegistry::DnnFactory>(rocm::kROCmPlatformId);
if (!status.ok()) {
LOG(ERROR) << "Unable to retrieve DNN factory: "
<< status.status().message();
return nullptr;
}
auto dnn = status.value()(this);
dnn_.reset(dnn);
return dnn_.get();
}
fft::FftSupport* RocmExecutor::AsFft() {
absl::MutexLock lock(&mu_);
if (fft_ != nullptr) {
return fft_.get();
}
PluginRegistry* registry = PluginRegistry::Instance();
absl::StatusOr<PluginRegistry::FftFactory> status =
registry->GetFactory<PluginRegistry::FftFactory>(rocm::kROCmPlatformId);
if (!status.ok()) {
LOG(ERROR) << "Unable to retrieve FFT factory: "
<< status.status().message();
return nullptr;
}
auto fft = status.value()(this);
fft_.reset(fft);
return fft_.get();
}
bool RocmExecutor::CanEnablePeerAccessTo(StreamExecutor* other) {
GpuExecutor* rocm_other = static_cast<GpuExecutor*>(other);
return GpuDriver::CanEnablePeerAccess(gpu_context(),
rocm_other->gpu_context());
}
absl::Status RocmExecutor::EnablePeerAccessTo(StreamExecutor* other) {
GpuExecutor* rocm_other = static_cast<GpuExecutor*>(other);
return GpuDriver::EnablePeerAccess(gpu_context(), rocm_other->gpu_context());
}
bool RocmExecutor::DeviceMemoryUsage(int64_t* free, int64_t* total) const {
return GpuDriver::GetDeviceMemoryInfo(gpu_context(), free, total);
}
absl::StatusOr<DeviceMemoryBase> RocmExecutor::GetSymbol(
const std::string& symbol_name, ModuleHandle module_handle) {
void* mem = nullptr;
size_t bytes = 0;
absl::MutexLock lock{&in_memory_modules_mu_};
if (static_cast<bool>(module_handle)) {
auto it = gpu_binary_to_module_.find(module_handle.id());
CHECK(it != gpu_binary_to_module_.end());
TF_RETURN_IF_ERROR(GpuDriver::GetModuleSymbol(
gpu_context(), it->second.first, symbol_name.c_str(),
reinterpret_cast<hipDeviceptr_t*>(&mem), &bytes));
return DeviceMemoryBase(mem, bytes);
}
for (auto& it : gpu_binary_to_module_) {
TF_RETURN_IF_ERROR(GpuDriver::GetModuleSymbol(
gpu_context(), it.second.first, symbol_name.c_str(),
reinterpret_cast<hipDeviceptr_t*>(&mem), &bytes));
return DeviceMemoryBase(mem, bytes);
}
LOG(INFO) << "Falied to find symbol in any modules: " << symbol_name;
return absl::NotFoundError(
absl::StrCat("Check if module containing symbol ", symbol_name,
" is loaded (module_handle = ",
reinterpret_cast<uintptr_t>(module_handle.id()), ")"));
}
absl::Status FillBlockDimLimit(GpuDeviceHandle device,
BlockDim* block_dim_limit) {
int x, y, z;
TF_RETURN_IF_ERROR(GpuDriver::GetGridLimits(&x, &y, &z, device));
block_dim_limit->x = x;
block_dim_limit->y = y;
block_dim_limit->z = z;
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<GpuEvent>> RocmExecutor::CreateGpuEvent(
bool allow_timing) {
auto gpu_event = std::make_unique<RocmEvent>(gpu_context());
TF_RETURN_IF_ERROR(gpu_event->Init(allow_timing));
return std::move(gpu_event);
}
absl::StatusOr<std::unique_ptr<Event>> RocmExecutor::CreateEvent() {
return CreateGpuEvent(false);
}
absl::StatusOr<std::unique_ptr<Stream>> RocmExecutor::CreateStream(
std::optional<std::variant<StreamPriority, int>> priority) {
TF_ASSIGN_OR_RETURN(auto event, CreateGpuEvent(false));
auto stream = std::make_unique<GpuStream>(this, std::move(event), priority);
absl::MutexLock l(&alive_gpu_streams_mu_);
TF_RETURN_IF_ERROR(stream->Init());
auto gpu_stream = stream->gpu_stream();
alive_gpu_streams_[gpu_stream] = stream.get();
return std::move(stream);
}
absl::StatusOr<std::unique_ptr<CommandBuffer>>
RocmExecutor::CreateCommandBuffer(CommandBuffer::Mode mode) {
VLOG(2) << "Create ROCm command buffer (ROCm graph)";
GpuGraphHandle graph = nullptr;
TF_RETURN_IF_ERROR(GpuDriver::CreateGraph(&graph));
return std::make_unique<GpuCommandBuffer>(mode, this, graph);
}
absl::Status RocmExecutor::TrimGraphMemory() {
return GpuDriver::DeviceGraphMemTrim(device_);
}
absl::StatusOr<std::unique_ptr<DeviceDescription>>
RocmExecutor::CreateDeviceDescription(int device_ordinal) {
GpuDeviceHandle device;
auto status = GpuDriver::GetDevice(device_ordinal, &device);
if (!status.ok()) {
return status;
}
int version;
status = GpuDriver::GetGpuISAVersion(&version, device);
if (!status.ok()) {
return status;
}
std::string gcn_arch_name;
status = GpuDriver::GetGpuGCNArchName(device, &gcn_arch_name);
if (!status.ok()) {
return status;
}
DeviceDescription desc;
{
std::string pci_bus_id = GpuDriver::GetPCIBusID(device);
pci_bus_id = absl::AsciiStrToLower(pci_bus_id);
desc.set_pci_bus_id(pci_bus_id);
int numa_node = ReadNumaNode(pci_bus_id, device_ordinal);
desc.set_numa_node(numa_node);
}
hipDeviceProp_t prop;
if (GpuDriver::GetDeviceProperties(&prop, device_ordinal)) {
desc.set_threads_per_block_limit(prop.maxThreadsPerBlock);
ThreadDim thread_dim_limit;
thread_dim_limit.x = prop.maxThreadsDim[0];
thread_dim_limit.y = prop.maxThreadsDim[1];
thread_dim_limit.z = prop.maxThreadsDim[2];
desc.set_thread_dim_limit(thread_dim_limit);
float clock_rate_ghz = static_cast<float>(prop.clockRate) / 1e6;
desc.set_clock_rate_ghz(clock_rate_ghz);
int64_t memory_bandwidth =
2 * (static_cast<int64_t>(prop.memoryBusWidth) / 8) *
(static_cast<int64_t>(prop.memoryClockRate) * 1000);
desc.set_memory_bandwidth(memory_bandwidth);
desc.set_l2_cache_size(prop.l2CacheSize);
}
{
bool ecc_enabled = false;
(void)GpuDriver::IsEccEnabled(device, &ecc_enabled);
desc.set_ecc_enabled(ecc_enabled);
}
uint64_t device_memory_size = -1;
(void)GpuDriver::GetDeviceTotalMemory(device, &device_memory_size);
desc.set_device_memory_size(device_memory_size);
{
BlockDim block_dim_limit;
TF_RETURN_IF_ERROR(FillBlockDimLimit(device, &block_dim_limit));
desc.set_block_dim_limit(block_dim_limit);
}
{
std::string device_name;
TF_RETURN_IF_ERROR(GpuDriver::GetDeviceName(device, &device_name));
desc.set_name(device_name);
}
desc.set_platform_version(
absl::StrCat("AMDGPU ISA version: ", gcn_arch_name));
desc.set_device_address_bits(64);
desc.set_device_vendor("Advanced Micro Devices, Inc");
desc.set_rocm_compute_capability(gcn_arch_name);
desc.set_shared_memory_per_core(
GpuDriver::GetMaxSharedMemoryPerCore(device).value());
desc.set_shared_memory_per_block(
GpuDriver::GetMaxSharedMemoryPerBlock(device).value());
int core_count = GpuDriver::GetMultiprocessorCount(device).value();
desc.set_core_count(core_count);
desc.set_fpus_per_core(fpus_per_core(gcn_arch_name));
desc.set_threads_per_core_limit(
GpuDriver::GetMaxThreadsPerMultiprocessor(device).value());
desc.set_registers_per_block_limit(
GpuDriver::GetMaxRegistersPerBlock(device).value());
desc.set_threads_per_warp(GpuDriver::GetThreadsPerWarp(device).value());
desc.set_registers_per_core_limit(64 * 1024);
desc.set_compile_time_toolkit_version(
SemanticVersion{HIP_VERSION_MAJOR, HIP_VERSION_MINOR, HIP_VERSION_PATCH});
desc.set_runtime_version(
ParseRocmVersion(RocmRuntime::GetRuntimeVersion().value_or(0))
.value_or(SemanticVersion{0, 0, 0}));
desc.set_driver_version(
ParseRocmVersion(GpuDriver::GetDriverVersion().value_or(0))
.value_or(SemanticVersion{0, 0, 0}));
int cc_major = 0;
int cc_minor = 0;
GpuDriver::GetComputeCapability(&cc_major, &cc_minor, device).IgnoreError();
desc.set_model_str(absl::StrFormat("cc_%d.%d with %dB RAM, %d cores",
cc_major, cc_minor, device_memory_size,
core_count));
return std::make_unique<DeviceDescription>(std::move(desc));
}
}
}
STREAM_EXECUTOR_REGISTER_MODULE_INITIALIZER(rocm_executor, {}); | #include "xla/stream_executor/rocm/rocm_executor.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
namespace stream_executor::gpu {
namespace {
using testing::Field;
using testing::Ge;
using testing::IsEmpty;
using testing::Not;
using testing::VariantWith;
TEST(RocmExecutorTest, CreateDeviceDescription) {
TF_ASSERT_OK(GpuDriver::Init());
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<DeviceDescription> result,
CudaExecutor::CreateDeviceDescription(0));
constexpr SemanticVersion kNullVersion{0, 0, 0};
EXPECT_NE(result->runtime_version(), kNullVersion);
EXPECT_NE(result->driver_version(), kNullVersion);
EXPECT_NE(result->compile_time_toolkit_version(), kNullVersion);
EXPECT_THAT(result->platform_version(), Not(IsEmpty()));
EXPECT_THAT(result->name(), Not(IsEmpty()));
EXPECT_THAT(result->model_str(), Not(IsEmpty()));
EXPECT_THAT(result->device_vendor(), "Advanced Micro Devices, Inc");
EXPECT_THAT(result->gpu_compute_capability(),
VariantWith<RocmComputeCapability>(
Field("gcn_arch_name", &RocmComputeCapability::gcn_arch_name,
Not(IsEmpty()))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/rocm/rocm_executor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/rocm/rocm_executor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0337c7e9-779f-4dc9-ab9f-7dca89e9e980 | cpp | tensorflow/tensorflow | c_api_conversions | third_party/xla/xla/stream_executor/tpu/c_api_conversions.cc | third_party/xla/xla/stream_executor/tpu/c_api_conversions_test.cc | #include "xla/stream_executor/tpu/c_api_conversions.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/layout.h"
#include "xla/literal.h"
#include "xla/service/computation_layout.h"
#include "xla/service/computation_placer.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/service/shaped_buffer.h"
#include "xla/shape.h"
#include "xla/shape_layout.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/tpu/c_api_decl.h"
#include "xla/stream_executor/tpu/c_api_defn.h"
#include "xla/stream_executor/tpu/proto_helper.h"
#include "xla/stream_executor/tpu/tpu_api.h"
#include "xla/stream_executor/tpu/tpu_executor_api.h"
#include "xla/stream_executor/tpu/tpu_executor_c_api.h"
#include "xla/stream_executor/tpu/tpu_ops_c_api.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace ApiConverter {
template <typename Src, typename Dst, typename DstList>
static void CreateVectorBase(const absl::Span<Src> src, DstList* dst) {
dst->size = src.size();
if (dst->size > TPU_C_API_MAX_INLINED) {
dst->heap = new Dst[dst->size];
std::copy(src.begin(), src.end(), dst->heap);
} else {
std::copy(src.begin(), src.end(), dst->inlined);
}
}
void CreateVector(const absl::Span<const int> src, IntList* dst) {
return CreateVectorBase<const int, int, IntList>(src, dst);
}
void CreateVector(const absl::Span<const int64_t> src, Int64List* dst) {
return CreateVectorBase<const int64_t, int64_t, Int64List>(src, dst);
}
void CreateVector(const absl::Span<const float> src, FloatList* dst) {
return CreateVectorBase<const float, float, FloatList>(src, dst);
}
void CreateVector(const absl::Span<const bool> src, BoolList* dst) {
return CreateVectorBase<const bool, bool, BoolList>(src, dst);
}
void CreateVector(const absl::Span<const xla::DimLevelType> src, IntList* dst) {
CreateVectorBase<const xla::DimLevelType, int, IntList>(src, dst);
}
static void CreateVector(const absl::Span<const bool> src, IntList* dst) {
CreateVectorBase<const bool, int, IntList>(src, dst);
}
static void CreateVector(const absl::Span<const xla::Tile> src, TileList* dst) {
dst->size = src.size();
XLA_Tile* c_tiles;
if (dst->size > TPU_C_API_MAX_INLINED) {
dst->heap = new XLA_Tile[dst->size];
c_tiles = dst->heap;
} else {
c_tiles = dst->inlined;
}
for (int i = 0; i < dst->size; ++i) {
ToC(src[i], &c_tiles[i]);
}
}
template <typename Dst, typename Src, typename SrcList>
static absl::Span<const Dst> MakeSpanBase(const SrcList& src_list) {
static_assert(sizeof(Src) == sizeof(Dst), "Mismatched types");
const Src* src = src_list.size > TPU_C_API_MAX_INLINED ? src_list.heap
: &src_list.inlined[0];
return absl::Span<const Dst>(reinterpret_cast<const Dst*>(src),
src_list.size);
}
absl::Span<const int> MakeSpan(const IntList& src_list) {
return MakeSpanBase<int, int, IntList>(src_list);
}
absl::Span<const int64_t> MakeSpan(const Int64List& src_list) {
return MakeSpanBase<int64_t, int64_t, Int64List>(src_list);
}
absl::Span<const float> MakeSpan(const FloatList& src_list) {
return MakeSpanBase<float, float, FloatList>(src_list);
}
absl::Span<const bool> MakeSpan(const BoolList& src_list) {
return MakeSpanBase<bool, bool, BoolList>(src_list);
}
xla::ShapedBuffer FromC(XLA_ShapedBuffer* c_buffer) {
xla::Shape xla_on_device_shape =
ApiConverter::FromC(&c_buffer->on_device_shape);
xla::ShapeTree<stream_executor::DeviceMemoryBase> xla_shape_tree(
xla_on_device_shape);
size_t i = 0;
for (auto& pair : xla_shape_tree) {
pair.second = ApiConverter::FromC(c_buffer->bases[i]);
i++;
}
xla::ShapedBuffer xla_shaped_buffer(xla_on_device_shape,
c_buffer->device_ordinal);
xla_shaped_buffer.set_buffers(xla_shape_tree);
return xla_shaped_buffer;
}
SE_MaybeOwningDeviceMemory ToC(xla::MaybeOwningDeviceMemory& mem,
bool aliased) {
SE_MaybeOwningDeviceMemory se_mem;
se_mem.owned = mem.HasOwnership();
se_mem.memory = ApiConverter::ToC(mem.AsDeviceMemoryBase());
if (mem.HasOwnership()) {
const stream_executor::OwningDeviceMemory* owned =
mem.AsOwningDeviceMemory();
se_mem.device_ordinal = owned->device_ordinal();
se_mem.allocator = ApiConverter::ToC(owned->allocator());
if (!aliased) {
mem.Release()->Release();
}
} else {
se_mem.allocator =
ToC(static_cast<stream_executor::DeviceMemoryAllocator*>(nullptr));
se_mem.device_ordinal = -1;
}
return se_mem;
}
xla::MaybeOwningDeviceMemory FromC(
SE_MaybeOwningDeviceMemory* se_mem,
stream_executor::DeviceMemoryAllocator* allocator) {
if (se_mem->owned) {
return xla::MaybeOwningDeviceMemory(
stream_executor::OwningDeviceMemory(ApiConverter::FromC(se_mem->memory),
se_mem->device_ordinal, allocator));
} else {
return xla::MaybeOwningDeviceMemory(ApiConverter::FromC(se_mem->memory));
}
}
SE_DeviceMemoryAllocator ToC(
stream_executor::DeviceMemoryAllocator* allocator) {
SE_DeviceMemoryAllocator se_allocator;
if (allocator == nullptr) {
se_allocator.ctx = nullptr;
se_allocator.platform = nullptr;
se_allocator.allocate = nullptr;
se_allocator.deallocate = nullptr;
return se_allocator;
}
se_allocator.platform = nullptr;
se_allocator.ctx = allocator;
se_allocator.allocate = [](void* ctx, int device_ordinal, uint64_t size,
bool retry_on_failure, int64_t memory_space,
SE_ScopedDeviceMemory* memory,
TF_Status* se_status) {
auto allocation =
reinterpret_cast<stream_executor::DeviceMemoryAllocator*>(ctx)
->Allocate(device_ordinal, size, retry_on_failure, memory_space);
if (!allocation.ok()) {
auto status = allocation.status();
auto message = status.message();
stream_executor::tpu::ExecutorApiFn()->TpuStatus_SetFn(
se_status, status.raw_code(), message.data(), message.size());
} else {
auto& scoped_memory = allocation.value();
memory->wrapped = ApiConverter::ToC(scoped_memory.Release());
memory->device_ordinal = scoped_memory.device_ordinal();
}
};
se_allocator.deallocate = [](void* ctx, SE_DeviceMemoryBase* base,
int device_ordinal, TF_Status* se_status) {
auto status = reinterpret_cast<stream_executor::DeviceMemoryAllocator*>(ctx)
->Deallocate(device_ordinal, ApiConverter::FromC(*base));
if (!status.ok()) {
auto message = status.message();
stream_executor::tpu::ExecutorApiFn()->TpuStatus_SetFn(
se_status, status.raw_code(), message.data(), message.size());
}
};
return se_allocator;
}
stream_executor::DeviceMemoryAllocator* FromC(
const SE_DeviceMemoryAllocator& c_allocator) {
return reinterpret_cast<stream_executor::DeviceMemoryAllocator*>(
c_allocator.ctx);
}
SE_MaybeOwningDeviceMemory ToC(stream_executor::OwningDeviceMemory* mem) {
SE_MaybeOwningDeviceMemory se_mem;
se_mem.device_ordinal = mem->device_ordinal();
se_mem.memory = ApiConverter::ToC(mem->Release());
se_mem.allocator = ApiConverter::ToC(mem->allocator());
se_mem.owned = true;
return se_mem;
}
void ToC(const stream_executor::DeviceMemoryBase& base,
SE_DeviceMemoryBase* se_base) {
se_base->opaque = const_cast<void*>(base.opaque());
se_base->payload = base.payload();
se_base->size = base.size();
}
SE_DeviceMemoryBase ToC(const stream_executor::DeviceMemoryBase& base) {
SE_DeviceMemoryBase se_base;
ToC(base, &se_base);
return se_base;
}
stream_executor::DeviceMemoryBase FromC(const SE_DeviceMemoryBase& se_base) {
stream_executor::DeviceMemoryBase base(se_base.opaque, se_base.size);
base.SetPayload(se_base.payload);
return base;
}
void ToC(const xla::Shape& xla_shape, XLA_Shape* c_shape) {
c_shape->element_type = xla_shape.element_type();
CreateVector(xla_shape.dimensions(), &c_shape->dimensions);
CreateVector(xla_shape.dynamic_dimensions(), &c_shape->dynamic_dimensions);
c_shape->ntuple_shapes = xla_shape.tuple_shapes_size();
if (c_shape->ntuple_shapes > 0) {
c_shape->tuple_shapes = new XLA_Shape[c_shape->ntuple_shapes];
for (int i = 0; i < c_shape->ntuple_shapes; ++i) {
ToC(xla_shape.tuple_shapes(i), &c_shape->tuple_shapes[i]);
}
}
if (xla_shape.has_layout()) {
c_shape->has_layout = true;
ToC(xla_shape.layout(), &c_shape->layout);
} else {
c_shape->has_layout = false;
}
}
xla::Shape FromC(const XLA_Shape* c_shape) {
absl::Span<const int64_t> dims = MakeSpan(c_shape->dimensions);
absl::Span<const bool> dynamic_dims = MakeSpan(c_shape->dynamic_dimensions);
std::vector<xla::Shape> tuple_shapes;
tuple_shapes.reserve(c_shape->ntuple_shapes);
for (int i = 0; i < c_shape->ntuple_shapes; ++i) {
tuple_shapes.push_back(FromC(&c_shape->tuple_shapes[i]));
}
xla::Shape result(static_cast<xla::PrimitiveType>(c_shape->element_type),
dims, dynamic_dims, std::move(tuple_shapes));
if (c_shape->has_layout) {
*result.mutable_layout() = FromC(&c_shape->layout);
}
return result;
}
void Destroy(XLA_Shape* c_shape) {
if (c_shape->dimensions.size > TPU_C_API_MAX_INLINED) {
delete[] c_shape->dimensions.heap;
}
if (c_shape->dynamic_dimensions.size > TPU_C_API_MAX_INLINED) {
delete[] c_shape->dynamic_dimensions.heap;
}
if (c_shape->ntuple_shapes > 0) {
for (int i = 0; i < c_shape->ntuple_shapes; ++i) {
Destroy(&c_shape->tuple_shapes[i]);
}
delete[] c_shape->tuple_shapes;
}
if (c_shape->has_layout) {
Destroy(&c_shape->layout);
}
}
void ToC(const xla::Layout& layout, XLA_Layout* c_layout) {
CreateVector(layout.minor_to_major(), &c_layout->minor_to_major);
{
const int n = layout.dim_level_types_size();
absl::InlinedVector<xla::DimLevelType, xla::InlineRank()> dim_level_types(
n);
for (int i = 0; i < n; i++) {
dim_level_types[i] = layout.dim_level_type(i);
}
CreateVector(dim_level_types, &c_layout->dim_level_types);
}
{
const int n = layout.dim_unique_size();
absl::InlinedVector<bool, xla::InlineRank()> dim_unique(n);
for (int i = 0; i < n; i++) {
dim_unique[i] = layout.dim_unique(i);
}
CreateVector(dim_unique, &c_layout->dim_unique);
}
{
const int n = layout.dim_ordered_size();
absl::InlinedVector<bool, xla::InlineRank()> dim_ordered(n);
for (int i = 0; i < n; i++) {
dim_ordered[i] = layout.dim_ordered(i);
}
CreateVector(dim_ordered, &c_layout->dim_ordered);
}
c_layout->index_primitive_type = layout.index_primitive_type();
c_layout->pointer_primitive_type = layout.pointer_primitive_type();
c_layout->element_size_in_bits = layout.element_size_in_bits();
c_layout->memory_space = layout.memory_space();
c_layout->dynamic_shape_metadata_prefix_bytes =
layout.dynamic_shape_metadata_prefix_bytes();
CreateVector(layout.tiles(), &c_layout->tiles);
c_layout->tail_padding_alignment_in_elements =
layout.tail_padding_alignment_in_elements();
}
xla::Layout FromC(const XLA_Layout* c_layout) {
absl::Span<const int64_t> minor_to_major = MakeSpan(c_layout->minor_to_major);
absl::Span<const int> dim_level_type_ints =
MakeSpan(c_layout->dim_level_types);
xla::DimLevelTypeVector dim_level_types;
dim_level_types.reserve(dim_level_type_ints.size());
for (int dim_level_type : dim_level_type_ints) {
dim_level_types.push_back(static_cast<xla::DimLevelType>(dim_level_type));
}
absl::Span<const int> dim_unique_ints = MakeSpan(c_layout->dim_unique);
absl::InlinedVector<bool, xla::InlineRank()> dim_unique(
dim_unique_ints.begin(), dim_unique_ints.end());
absl::Span<const int> dim_ordered_ints = MakeSpan(c_layout->dim_unique);
absl::InlinedVector<bool, xla::InlineRank()> dim_ordered(
dim_ordered_ints.begin(), dim_ordered_ints.end());
absl::InlinedVector<xla::Tile, 1> tiles;
const XLA_Tile* c_tiles = c_layout->tiles.size > TPU_C_API_MAX_INLINED
? c_layout->tiles.heap
: c_layout->tiles.inlined;
tiles.reserve(c_layout->tiles.size);
for (int i = 0; i < c_layout->tiles.size; ++i) {
tiles.push_back(FromC(&c_tiles[i]));
}
return xla::Layout(
minor_to_major, dim_level_types, dim_unique, dim_ordered, tiles,
c_layout->tail_padding_alignment_in_elements,
static_cast<xla::PrimitiveType>(c_layout->index_primitive_type),
static_cast<xla::PrimitiveType>(c_layout->pointer_primitive_type),
c_layout->element_size_in_bits, c_layout->memory_space,
{},
nullptr,
c_layout->dynamic_shape_metadata_prefix_bytes);
}
void Destroy(XLA_Layout* c_layout) {
if (c_layout->minor_to_major.size > TPU_C_API_MAX_INLINED) {
delete[] c_layout->minor_to_major.heap;
}
if (c_layout->dim_level_types.size > TPU_C_API_MAX_INLINED) {
delete[] c_layout->dim_level_types.heap;
}
if (c_layout->tiles.size > TPU_C_API_MAX_INLINED) {
delete[] c_layout->tiles.heap;
}
}
void ToC(const xla::Tile& tile, XLA_Tile* c_tile) {
CreateVector(tile.dimensions(), &c_tile->dimensions);
}
xla::Tile FromC(const XLA_Tile* c_tile) {
absl::Span<const int64_t> dims = MakeSpan(c_tile->dimensions);
return xla::Tile(dims);
}
void Destroy(XLA_Tile* c_tile) {
if (c_tile->dimensions.size > TPU_C_API_MAX_INLINED) {
delete[] c_tile->dimensions.heap;
}
}
XLA_ShapeIndex ToC(const xla::ShapeIndex& xla_shape) {
XLA_ShapeIndex c_shape;
CHECK_LT(xla_shape.size(), 8);
c_shape.count = xla_shape.size();
for (int i = 0; i < xla_shape.size(); ++i) {
c_shape.indices[i] = xla_shape[i];
}
return c_shape;
}
xla::ShapeIndex FromC(XLA_ShapeIndex* c_shape) {
return xla::ShapeIndex(c_shape->indices, c_shape->indices + c_shape->count);
}
void ToC(const xla::LiteralSlice& literal, XLA_Literal* c_literal) {
ApiConverter::ToC(literal.shape(), &c_literal->shape);
auto shapes = xla::ShapeUtil::GetLeafShapes(literal.shape());
c_literal->buffers = new char*[shapes.size()];
c_literal->sizes = new size_t[shapes.size()];
c_literal->count = shapes.size();
for (int i = 0; i < shapes.size(); ++i) {
c_literal->buffers[i] = reinterpret_cast<char*>(
const_cast<void*>(literal.untyped_data(shapes[i].index)));
c_literal->sizes[i] = literal.size_bytes(shapes[i].index);
}
}
xla::MutableBorrowingLiteral FromC(XLA_Literal* c_literal) {
xla::Shape shape = ApiConverter::FromC(&c_literal->shape);
return xla::MutableBorrowingLiteral(
absl::MakeSpan(c_literal->buffers, c_literal->count), shape);
}
void ToC(const xla::ShapedBuffer& buffer, XLA_ShapedBuffer* c_device_buffer) {
ApiConverter::ToC(buffer.on_device_shape(),
&c_device_buffer->on_device_shape);
c_device_buffer->device_ordinal = buffer.device_ordinal();
absl::InlinedVector<SE_DeviceMemoryBase, 2> bases;
for (auto& pair : buffer.buffers()) {
bases.push_back(ApiConverter::ToC(pair.second));
}
c_device_buffer->count = bases.size();
c_device_buffer->bases = new SE_DeviceMemoryBase[bases.size()];
for (int i = 0; i < bases.size(); ++i) {
c_device_buffer->bases[i] = bases[i];
}
}
std::unique_ptr<TpuEmbeddingEngineParametersData> Create(int num_tables) {
auto data = std::make_unique<TpuEmbeddingEngineParametersData>();
data->c_params.num_tables = num_tables;
for (int i = 0; i < 8; i++) {
data->vectors[i].resize(num_tables);
data->c_params.parameters[i] = data->vectors[i].data();
}
return data;
}
void Destroy(XLA_ShapeIndex* shape_index) { delete[] shape_index; }
void Destroy(SE_DeviceMemoryBase*) {}
void Destroy(XLA_Literal* c_literal) {
delete[] c_literal->buffers;
delete[] c_literal->sizes;
ApiConverter::Destroy(&c_literal->shape);
}
void Destroy(XLA_ShapedBuffer* c_buffer) {
ApiConverter::Destroy(&c_buffer->on_device_shape);
delete[] c_buffer->bases;
}
XLA_HloModule ToC(const xla::HloModule& module) {
XLA_HloModule c_module;
c_module.proto = stream_executor::tpu::SerializeProto(module.ToProto());
c_module.module_config = ApiConverter::ToC(module.config());
return c_module;
}
absl::StatusOr<std::unique_ptr<xla::HloModule>> FromC(
const XLA_HloModule& c_module) {
xla::HloModuleProto module_proto =
stream_executor::tpu::DeserializeProto<xla::HloModuleProto>(
c_module.proto);
return xla::HloModule::CreateFromProto(
module_proto, ApiConverter::FromC(c_module.module_config));
}
void Destroy(XLA_HloModule* c_module) {
stream_executor::tpu::SerializedProto_Free(c_module->proto);
Destroy(&c_module->module_config);
}
static xla::HloModuleConfig ConfigWithLayout(
const XLA_HloModuleConfig& se_config) {
xla::ShapeLayout result_layout(
FromC(&se_config.entry_computation_layout.result_layout));
xla::ComputationLayout layout(result_layout);
for (int i = 0; i < se_config.entry_computation_layout.parameter_count; ++i) {
layout.add_parameter_layout(xla::ShapeLayout(
FromC(&se_config.entry_computation_layout.parameter_layouts[i])));
}
return xla::HloModuleConfig(layout);
}
XLA_HloModuleConfig ToC(const xla::HloModuleConfig& config) {
XLA_HloModuleConfig hlo_config;
hlo_config.seed = config.seed();
hlo_config.launch_id = config.launch_id();
hlo_config.replica_count = config.replica_count();
hlo_config.num_partitions = config.num_partitions();
hlo_config.use_spmd_partitioning = config.use_spmd_partitioning();
hlo_config.use_auto_spmd_partitioning = config.use_auto_spmd_partitioning();
CreateVector(config.allow_spmd_sharding_propagation_to_parameters(),
&hlo_config.allow_spmd_sharding_propagation_to_parameters);
CreateVector(config.allow_spmd_sharding_propagation_to_output(),
&hlo_config.allow_spmd_sharding_propagation_to_output);
CreateVector(config.auto_spmd_partitioning_mesh_shape(),
&hlo_config.auto_spmd_partitioning_mesh_shape);
CreateVector(config.auto_spmd_partitioning_mesh_ids(),
&hlo_config.auto_spmd_partitioning_mesh_ids);
hlo_config.has_static_device_assignment =
config.has_static_device_assignment();
hlo_config.has_entry_computation_layout =
config.has_entry_computation_layout();
if (config.has_static_device_assignment()) {
xla::DeviceAssignmentProto dev_proto;
config.static_device_assignment().Serialize(&dev_proto);
hlo_config.static_device_assignment =
stream_executor::tpu::SerializeProto(dev_proto);
}
hlo_config.debug_options =
stream_executor::tpu::SerializeProto(config.debug_options());
if (config.has_entry_computation_layout()) {
const auto& layout = config.entry_computation_layout();
ApiConverter::ToC(layout.result_layout().shape(),
&hlo_config.entry_computation_layout.result_layout);
hlo_config.entry_computation_layout.parameter_layouts =
new XLA_Shape[layout.parameter_count()];
for (int i = 0; i < layout.parameter_count(); ++i) {
ApiConverter::ToC(
layout.parameter_layout(i).shape(),
&hlo_config.entry_computation_layout.parameter_layouts[i]);
}
hlo_config.entry_computation_layout.parameter_count =
layout.parameter_count();
}
return hlo_config;
}
xla::HloModuleConfig FromC(const XLA_HloModuleConfig& c_config) {
xla::HloModuleConfig config = c_config.has_entry_computation_layout
? ConfigWithLayout(c_config)
: xla::HloModuleConfig();
config.set_launch_id(c_config.launch_id);
config.set_seed(c_config.seed);
config.set_replica_count(c_config.replica_count);
config.set_num_partitions(c_config.num_partitions);
config.set_use_spmd_partitioning(c_config.use_spmd_partitioning);
config.set_use_auto_spmd_partitioning(c_config.use_auto_spmd_partitioning);
config.set_allow_spmd_sharding_propagation_to_parameters(
MakeSpan(c_config.allow_spmd_sharding_propagation_to_parameters));
config.set_allow_spmd_sharding_propagation_to_output(
MakeSpan(c_config.allow_spmd_sharding_propagation_to_output));
absl::Span<const int64_t> mesh_shape_span =
MakeSpan(c_config.auto_spmd_partitioning_mesh_shape);
config.set_auto_spmd_partitioning_mesh_shape(
std::vector<int64_t>(mesh_shape_span.begin(), mesh_shape_span.end()));
absl::Span<const int64_t> mesh_ids_span =
MakeSpan(c_config.auto_spmd_partitioning_mesh_ids);
config.set_auto_spmd_partitioning_mesh_ids(
std::vector<int64_t>(mesh_ids_span.begin(), mesh_ids_span.end()));
if (c_config.has_static_device_assignment) {
auto device_assignment = xla::DeviceAssignment::Deserialize(
stream_executor::tpu::DeserializeProto<xla::DeviceAssignmentProto>(
c_config.static_device_assignment));
config.set_static_device_assignment(
*(std::move(device_assignment).value()));
}
config.set_debug_options(
stream_executor::tpu::DeserializeProto<xla::DebugOptions>(
c_config.debug_options));
return config;
}
void Destroy(XLA_HloModuleConfig* c_config) {
for (auto i = 0; i < c_config->entry_computation_layout.parameter_count;
++i) {
ApiConverter::Destroy(
&c_config->entry_computation_layout.parameter_layouts[i]);
}
delete[] c_config->entry_computation_layout.parameter_layouts;
ApiConverter::Destroy(&c_config->entry_computation_layout.result_layout);
if (c_config->has_static_device_assignment) {
stream_executor::tpu::SerializedProto_Free(
c_config->static_device_assignment);
}
stream_executor::tpu::SerializedProto_Free(c_config->debug_options);
}
void Destroy(FloatList* float_list) {
if (float_list->size > TPU_C_API_MAX_INLINED) {
delete[] float_list->heap;
}
}
} | #include "xla/stream_executor/tpu/c_api_conversions.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/executable_run_options.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/layout.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/tpu/c_api_decl.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/protobuf.h"
namespace ApiConverter {
namespace {
constexpr absl::string_view kHloString =
R"(
HloModule TupleCreate_module:
ENTRY %TupleCreate.v4 (v1: f32[], v2: f32[3], v3: f32[2,3]) -> (f32[], f32[3], f32[2,3]) {
%v1 = f32[] parameter(0)
%v2 = f32[3]{0} parameter(1)
%v3 = f32[2,3]{1,0} parameter(2)
ROOT %tuple = (f32[], f32[3]{0}, f32[2,3]{1,0}) tuple(f32[] %v1, f32[3]{0} %v2, f32[2,3]{1,0} %v3)
}
)";
TEST(XlaTile, ToCInlined) {
std::vector<int64_t> tile_dimensions{2, 3, 4, 5};
xla::Tile cpp_tile(tile_dimensions);
XLA_Tile c_tile;
ToC(cpp_tile, &c_tile);
absl::Span<const int64_t> cpp_tile_dimensions = cpp_tile.dimensions();
ASSERT_EQ(cpp_tile_dimensions, tile_dimensions);
absl::Span<const int64_t> c_tile_dimensions = MakeSpan(c_tile.dimensions);
EXPECT_EQ(cpp_tile_dimensions, c_tile_dimensions);
Destroy(&c_tile);
}
TEST(XlaTile, ToCDynamic) {
std::vector<int64_t> tile_dimensions{2, 3, 4, 5, 6, 7, 8, 9};
xla::Tile cpp_tile(tile_dimensions);
XLA_Tile c_tile;
ToC(cpp_tile, &c_tile);
absl::Span<const int64_t> cpp_tile_dimensions = cpp_tile.dimensions();
ASSERT_EQ(cpp_tile_dimensions, tile_dimensions);
absl::Span<const int64_t> c_tile_dimensions = MakeSpan(c_tile.dimensions);
EXPECT_EQ(cpp_tile_dimensions, c_tile_dimensions);
Destroy(&c_tile);
}
TEST(XlaTile, FromCInlined) {
constexpr size_t kInlinedSize = 4;
Int64List tile_dimensions;
tile_dimensions.size = kInlinedSize;
for (int i = 0; i < kInlinedSize; ++i) {
tile_dimensions.inlined[i] = i + 2;
}
XLA_Tile c_tile{tile_dimensions};
xla::Tile cpp_tile = FromC(&c_tile);
auto cpp_dimensions = cpp_tile.dimensions();
EXPECT_EQ(cpp_dimensions.size(), kInlinedSize);
for (int i = 0; i < kInlinedSize; ++i) {
EXPECT_EQ(cpp_dimensions[i], i + 2);
}
Destroy(&c_tile);
}
TEST(XlaTile, FromCDynamic) {
constexpr size_t kDynamicSize = 8;
int64_t* dynamic = new int64_t[kDynamicSize];
for (int i = 0; i < kDynamicSize; ++i) {
dynamic[i] = i + 2;
}
Int64List tile_dimensions;
tile_dimensions.size = kDynamicSize;
tile_dimensions.heap = dynamic;
XLA_Tile c_tile{tile_dimensions};
xla::Tile cpp_tile = FromC(&c_tile);
auto cpp_dimensions = cpp_tile.dimensions();
EXPECT_EQ(cpp_dimensions.size(), kDynamicSize);
for (int i = 0; i < kDynamicSize; ++i) {
EXPECT_EQ(cpp_dimensions[i], i + 2);
}
Destroy(&c_tile);
}
namespace TestImpl {
void XlaLayout_ToC(const xla::Layout& cpp_layout) {
XLA_Layout c_layout;
ToC(cpp_layout, &c_layout);
absl::Span<const int64_t> cpp_minor_to_major = cpp_layout.minor_to_major();
absl::Span<const int64_t> c_minor_to_major =
MakeSpan(c_layout.minor_to_major);
EXPECT_EQ(cpp_minor_to_major, c_minor_to_major);
absl::Span<const int> c_dim_level_types = MakeSpan(c_layout.dim_level_types);
EXPECT_EQ(cpp_layout.dim_level_types_size(), c_dim_level_types.size());
for (int i = 0; i < c_dim_level_types.size(); ++i) {
EXPECT_EQ(static_cast<int>(cpp_layout.dim_level_type(i)),
c_dim_level_types[i]);
}
absl::Span<const int> c_dim_unique = MakeSpan(c_layout.dim_unique);
EXPECT_EQ(cpp_layout.dim_unique_size(), c_dim_unique.size());
for (int i = 0; i < c_dim_unique.size(); ++i) {
EXPECT_EQ(cpp_layout.dim_unique(i), static_cast<bool>(c_dim_unique[i]));
}
absl::Span<const int> c_dim_ordered = MakeSpan(c_layout.dim_ordered);
EXPECT_EQ(cpp_layout.dim_ordered_size(), c_dim_ordered.size());
for (int i = 0; i < c_dim_ordered.size(); ++i) {
EXPECT_EQ(cpp_layout.dim_ordered(i), static_cast<bool>(c_dim_ordered[i]));
}
absl::Span<const xla::Tile> cpp_tiles = cpp_layout.tiles();
TileList c_tiles = c_layout.tiles;
EXPECT_EQ(cpp_tiles.size(), c_tiles.size);
XLA_Tile* tile_base =
(c_tiles.size > TPU_C_API_MAX_INLINED) ? c_tiles.heap : c_tiles.inlined;
for (int i = 0; i < c_tiles.size; ++i) {
xla::Tile converted_c_tile = FromC(&tile_base[i]);
EXPECT_EQ(cpp_tiles[i], converted_c_tile);
}
EXPECT_EQ(cpp_layout.index_primitive_type(), c_layout.index_primitive_type);
EXPECT_EQ(cpp_layout.pointer_primitive_type(),
c_layout.pointer_primitive_type);
EXPECT_EQ(cpp_layout.element_size_in_bits(), c_layout.element_size_in_bits);
EXPECT_EQ(cpp_layout.memory_space(), c_layout.memory_space);
EXPECT_EQ(cpp_layout.dynamic_shape_metadata_prefix_bytes(),
c_layout.dynamic_shape_metadata_prefix_bytes);
Destroy(&c_layout);
}
}
TEST(XlaLayout, ToCScalar) {
xla::Shape cpp_shape = xla::ShapeUtil::MakeShapeWithType<float>({4});
xla::Layout cpp_layout = cpp_shape.layout();
TestImpl::XlaLayout_ToC(cpp_layout);
}
TEST(XlaLayout, ToCNested) {
xla::Shape cpp_shape = xla::ShapeUtil::MakeShapeWithType<float>({4, 3, 2});
xla::Layout cpp_layout = cpp_shape.layout();
TestImpl::XlaLayout_ToC(cpp_layout);
}
TEST(XlaLayout, FromCScalar) {
xla::Shape cpp_shape = xla::ShapeUtil::MakeShapeWithType<float>({4});
xla::Layout in_layout = cpp_shape.layout();
XLA_Layout c_layout;
ToC(in_layout, &c_layout);
xla::Layout out_layout = FromC(&c_layout);
EXPECT_EQ(in_layout, out_layout);
Destroy(&c_layout);
}
TEST(XlaLayout, FromCNested) {
xla::Shape cpp_shape = xla::ShapeUtil::MakeShapeWithType<float>({4, 3, 2});
xla::Layout in_layout = cpp_shape.layout();
XLA_Layout c_layout;
ToC(in_layout, &c_layout);
xla::Layout out_layout = FromC(&c_layout);
EXPECT_EQ(in_layout, out_layout);
Destroy(&c_layout);
}
TEST(XlaShape, ToCScalar) {
xla::Shape cpp_shape = xla::ShapeUtil::MakeShapeWithType<float>({4});
XLA_Shape c_shape;
ToC(cpp_shape, &c_shape);
EXPECT_EQ(cpp_shape.element_type(), c_shape.element_type);
absl::Span<const int64_t> cpp_dimensions = cpp_shape.dimensions();
absl::Span<const int64_t> c_dimensions = MakeSpan(c_shape.dimensions);
EXPECT_EQ(cpp_dimensions, c_dimensions);
absl::Span<const bool> cpp_dynamic_dimensions =
cpp_shape.dynamic_dimensions();
absl::Span<const bool> c_dynamic_dimensions =
MakeSpan(c_shape.dynamic_dimensions);
EXPECT_EQ(cpp_dynamic_dimensions, c_dynamic_dimensions);
int cpp_ntuple_shapes = cpp_shape.tuple_shapes_size();
int c_ntuple_shapes = c_shape.ntuple_shapes;
EXPECT_EQ(cpp_ntuple_shapes, c_ntuple_shapes);
EXPECT_EQ(cpp_ntuple_shapes, 0);
bool cpp_has_layout = cpp_shape.has_layout();
bool c_has_layout = c_shape.has_layout;
EXPECT_EQ(cpp_has_layout, c_has_layout);
Destroy(&c_shape);
}
TEST(XlaShape, ToCNested) {
xla::Shape cpp_shape = xla::ShapeUtil::MakeShapeWithType<float>({4, 3, 2});
XLA_Shape c_shape;
ToC(cpp_shape, &c_shape);
EXPECT_EQ(cpp_shape.element_type(), c_shape.element_type);
absl::Span<const int64_t> cpp_dimensions = cpp_shape.dimensions();
absl::Span<const int64_t> c_dimensions = MakeSpan(c_shape.dimensions);
EXPECT_EQ(cpp_dimensions, c_dimensions);
absl::Span<const bool> cpp_dynamic_dimensions =
cpp_shape.dynamic_dimensions();
absl::Span<const bool> c_dynamic_dimensions =
MakeSpan(c_shape.dynamic_dimensions);
EXPECT_EQ(cpp_dynamic_dimensions, c_dynamic_dimensions);
int cpp_ntuple_shapes = cpp_shape.tuple_shapes_size();
int c_ntuple_shapes = c_shape.ntuple_shapes;
EXPECT_EQ(cpp_ntuple_shapes, c_ntuple_shapes);
const std::vector<xla::Shape>& cpp_tuple_shapes = cpp_shape.tuple_shapes();
absl::Span<const XLA_Shape> c_tuple_shapes(c_shape.tuple_shapes,
c_ntuple_shapes);
for (int i = 0; i < c_ntuple_shapes; ++i) {
xla::Shape converted_c_shape = FromC(&c_tuple_shapes[i]);
EXPECT_EQ(cpp_tuple_shapes[i], converted_c_shape);
}
bool cpp_has_layout = cpp_shape.has_layout();
bool c_has_layout = c_shape.has_layout;
EXPECT_EQ(cpp_has_layout, c_has_layout);
if (c_has_layout) {
xla::Layout converted_c_layout = FromC(&c_shape.layout);
EXPECT_EQ(cpp_shape.layout(), converted_c_layout);
}
Destroy(&c_shape);
}
TEST(XlaShape, FromCScalar) {
xla::Shape in_shape = xla::ShapeUtil::MakeShapeWithType<float>({4});
XLA_Shape c_shape;
ToC(in_shape, &c_shape);
xla::Shape out_shape = FromC(&c_shape);
EXPECT_EQ(in_shape, out_shape);
Destroy(&c_shape);
}
TEST(XlaShape, FromCNested) {
xla::Shape in_shape = xla::ShapeUtil::MakeShapeWithType<float>({4, 3, 2});
XLA_Shape c_shape;
ToC(in_shape, &c_shape);
xla::Shape out_shape = FromC(&c_shape);
EXPECT_EQ(in_shape, out_shape);
Destroy(&c_shape);
}
TEST(XlaHloModuleConfig, ToAndFromC) {
absl::StatusOr<std::unique_ptr<xla::HloModule>> hlo_module =
xla::ParseAndReturnUnverifiedModule(kHloString);
ASSERT_TRUE(hlo_module.ok());
xla::HloModule& cpp_module = *hlo_module.value();
xla::HloModuleConfig in_config = cpp_module.config();
XLA_HloModuleConfig c_config = ToC(in_config);
xla::HloModuleConfig out_config = FromC(c_config);
xla::HloModuleConfigProto in_config_proto = in_config.ToProto();
xla::HloModuleConfigProto out_config_proto = out_config.ToProto();
tsl::protobuf::util::MessageDifferencer diff;
diff.set_message_field_comparison(
tsl::protobuf::util::MessageDifferencer::EQUIVALENT);
EXPECT_TRUE(diff.Equals(in_config_proto, out_config_proto));
Destroy(&c_config);
}
TEST(XlaHloModule, ToAndFromC) {
absl::StatusOr<std::unique_ptr<xla::HloModule>> hlo_module =
xla::ParseAndReturnUnverifiedModule(kHloString);
ASSERT_TRUE(hlo_module.ok());
xla::HloModule& in_module = *hlo_module.value();
XLA_HloModule c_module = ToC(in_module);
absl::StatusOr<std::unique_ptr<xla::HloModule>> out_module_ptr =
FromC(c_module);
ASSERT_TRUE(out_module_ptr.ok());
xla::HloModule& out_module = *out_module_ptr.value();
xla::HloModuleProtoWithConfig in_module_proto = in_module.ToProtoWithConfig();
xla::HloModuleProtoWithConfig out_module_proto =
out_module.ToProtoWithConfig();
tsl::protobuf::util::MessageDifferencer diff;
diff.set_message_field_comparison(
tsl::protobuf::util::MessageDifferencer::EQUIVALENT);
const auto* ignore_unique_id =
xla::HloModuleProto::GetDescriptor()->FindFieldByName("id");
diff.IgnoreField(ignore_unique_id);
EXPECT_TRUE(diff.Compare(in_module_proto, out_module_proto));
Destroy(&c_module);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/tpu/c_api_conversions.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/tpu/c_api_conversions_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
38bacd4d-da84-44e2-a7b8-67bd2578d95b | cpp | tensorflow/tensorflow | file_system | third_party/xla/third_party/tsl/tsl/platform/file_system.cc | tensorflow/core/platform/file_system_test.cc | #include "tsl/platform/file_system.h"
#include <sys/stat.h>
#include <algorithm>
#include <deque>
#include <string>
#include <utility>
#include <vector>
#include "tsl/platform/status.h"
#if defined(PLATFORM_POSIX) || defined(IS_MOBILE_PLATFORM) || \
defined(PLATFORM_GOOGLE)
#include <fnmatch.h>
#else
#include "tsl/platform/regexp.h"
#endif
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/platform.h"
#include "tsl/platform/scanner.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/strcat.h"
namespace tsl {
bool FileSystem::Match(const string& filename, const string& pattern) {
#if defined(PLATFORM_POSIX) || defined(IS_MOBILE_PLATFORM) || \
defined(PLATFORM_GOOGLE)
return fnmatch(pattern.c_str(), filename.c_str(), FNM_PATHNAME) == 0;
#else
string regexp(pattern);
regexp = str_util::StringReplace(regexp, "*", "[^/]*", true);
regexp = str_util::StringReplace(regexp, "?", ".", true);
regexp = str_util::StringReplace(regexp, "(", "\\(", true);
regexp = str_util::StringReplace(regexp, ")", "\\)", true);
return RE2::FullMatch(filename, regexp);
#endif
}
string FileSystem::TranslateName(const string& name) const {
if (name.empty()) return name;
absl::string_view scheme, host, path;
this->ParseURI(name, &scheme, &host, &path);
if (path.empty()) return "/";
return this->CleanPath(path);
}
absl::Status FileSystem::IsDirectory(const string& name,
TransactionToken* token) {
TF_RETURN_IF_ERROR(FileExists(name));
FileStatistics stat;
TF_RETURN_IF_ERROR(Stat(name, &stat));
if (stat.is_directory) {
return absl::OkStatus();
}
return absl::Status(absl::StatusCode::kFailedPrecondition, "Not a directory");
}
absl::Status FileSystem::HasAtomicMove(const string& path,
bool* has_atomic_move) {
*has_atomic_move = true;
return absl::OkStatus();
}
absl::Status FileSystem::CanCreateTempFile(const std::string& fname,
bool* can_create_temp_file) {
*can_create_temp_file = true;
return absl::OkStatus();
}
void FileSystem::FlushCaches(TransactionToken* token) {}
bool FileSystem::FilesExist(const std::vector<string>& files,
TransactionToken* token,
std::vector<absl::Status>* status) {
bool result = true;
for (const auto& file : files) {
absl::Status s = FileExists(file);
result &= s.ok();
if (status != nullptr) {
status->push_back(s);
} else if (!result) {
return false;
}
}
return result;
}
absl::Status FileSystem::DeleteRecursively(const string& dirname,
TransactionToken* token,
int64_t* undeleted_files,
int64_t* undeleted_dirs) {
CHECK_NOTNULL(undeleted_files);
CHECK_NOTNULL(undeleted_dirs);
*undeleted_files = 0;
*undeleted_dirs = 0;
absl::Status exists_status = FileExists(dirname);
if (!exists_status.ok()) {
(*undeleted_dirs)++;
return exists_status;
}
if (!IsDirectory(dirname).ok()) {
absl::Status delete_root_status = DeleteFile(dirname);
if (!delete_root_status.ok()) (*undeleted_files)++;
return delete_root_status;
}
std::deque<string> dir_q;
std::vector<string> dir_list;
dir_q.push_back(dirname);
absl::Status ret;
while (!dir_q.empty()) {
string dir = dir_q.front();
dir_q.pop_front();
dir_list.push_back(dir);
std::vector<string> children;
absl::Status s = GetChildren(dir, &children);
ret.Update(s);
if (!s.ok()) {
(*undeleted_dirs)++;
continue;
}
for (const string& child : children) {
const string child_path = this->JoinPath(dir, child);
if (IsDirectory(child_path).ok()) {
dir_q.push_back(child_path);
} else {
absl::Status del_status = DeleteFile(child_path);
ret.Update(del_status);
if (!del_status.ok()) {
(*undeleted_files)++;
}
}
}
}
std::reverse(dir_list.begin(), dir_list.end());
for (const string& dir : dir_list) {
absl::Status s = DeleteDir(dir);
ret.Update(s);
if (!s.ok()) {
(*undeleted_dirs)++;
}
}
return ret;
}
absl::Status FileSystem::RecursivelyCreateDir(const string& dirname,
TransactionToken* token) {
absl::string_view scheme, host, remaining_dir;
this->ParseURI(dirname, &scheme, &host, &remaining_dir);
std::vector<absl::string_view> sub_dirs;
while (!remaining_dir.empty()) {
std::string current_entry = this->CreateURI(scheme, host, remaining_dir);
absl::Status exists_status = FileExists(current_entry);
if (exists_status.ok()) {
absl::Status directory_status = IsDirectory(current_entry);
if (directory_status.ok()) {
break;
} else if (directory_status.code() == absl::StatusCode::kUnimplemented) {
return directory_status;
} else {
return errors::FailedPrecondition(remaining_dir, " is not a directory");
}
}
if (exists_status.code() != error::Code::NOT_FOUND) {
return exists_status;
}
if (!absl::EndsWith(remaining_dir, "/")) {
sub_dirs.push_back(this->Basename(remaining_dir));
}
remaining_dir = this->Dirname(remaining_dir);
}
std::reverse(sub_dirs.begin(), sub_dirs.end());
string built_path(remaining_dir);
for (const absl::string_view sub_dir : sub_dirs) {
built_path = this->JoinPath(built_path, sub_dir);
absl::Status status = CreateDir(this->CreateURI(scheme, host, built_path));
if (!status.ok() && status.code() != absl::StatusCode::kAlreadyExists) {
return status;
}
}
return absl::OkStatus();
}
absl::Status FileSystem::CopyFile(const string& src, const string& target,
TransactionToken* token) {
return FileSystemCopyFile(this, src, this, target);
}
char FileSystem::Separator() const { return '/'; }
string FileSystem::JoinPathImpl(
std::initializer_list<absl::string_view> paths) {
string result;
for (absl::string_view path : paths) {
if (path.empty()) continue;
if (result.empty()) {
result = string(path);
continue;
}
if (result[result.size() - 1] == '/') {
if (this->IsAbsolutePath(path)) {
strings::StrAppend(&result, path.substr(1));
} else {
strings::StrAppend(&result, path);
}
} else {
if (this->IsAbsolutePath(path)) {
strings::StrAppend(&result, path);
} else {
strings::StrAppend(&result, "/", path);
}
}
}
return result;
}
std::pair<absl::string_view, absl::string_view> FileSystem::SplitPath(
absl::string_view uri) const {
absl::string_view scheme, host, path;
ParseURI(uri, &scheme, &host, &path);
if (path.empty()) {
return std::make_pair(absl::string_view(), absl::string_view());
}
size_t pos = path.rfind(this->Separator());
#ifdef PLATFORM_WINDOWS
size_t pos2 = path.rfind('/');
if (pos == string::npos) {
pos = pos2;
} else {
if (pos2 != string::npos) {
pos = pos > pos2 ? pos : pos2;
}
}
#endif
if (pos == absl::string_view::npos) {
if (host.empty()) {
return std::make_pair(absl::string_view(), path);
}
return std::make_pair(
absl::string_view(uri.data(), host.end() - uri.begin()), path);
}
if (pos == 0) {
return std::make_pair(
absl::string_view(uri.data(), path.begin() + 1 - uri.begin()),
absl::string_view(path.data() + 1, path.size() - 1));
}
return std::make_pair(
absl::string_view(uri.data(), path.begin() + pos - uri.begin()),
absl::string_view(path.data() + pos + 1, path.size() - (pos + 1)));
}
bool FileSystem::IsAbsolutePath(absl::string_view path) const {
return !path.empty() && path[0] == '/';
}
absl::string_view FileSystem::Dirname(absl::string_view path) const {
return this->SplitPath(path).first;
}
absl::string_view FileSystem::Basename(absl::string_view path) const {
return this->SplitPath(path).second;
}
absl::string_view FileSystem::Extension(absl::string_view path) const {
absl::string_view basename = this->Basename(path);
size_t pos = basename.rfind('.');
if (pos == absl::string_view::npos) {
return absl::string_view(path.data() + path.size(), 0);
} else {
return absl::string_view(path.data() + pos + 1, path.size() - (pos + 1));
}
}
string FileSystem::CleanPath(absl::string_view unclean_path) const {
string path(unclean_path);
const char* src = path.c_str();
string::iterator dst = path.begin();
const bool is_absolute_path = *src == '/';
if (is_absolute_path) {
*dst++ = *src++;
while (*src == '/') ++src;
}
string::const_iterator backtrack_limit = dst;
while (*src) {
bool parsed = false;
if (src[0] == '.') {
if (src[1] == '/' || !src[1]) {
if (*++src) {
++src;
}
parsed = true;
} else if (src[1] == '.' && (src[2] == '/' || !src[2])) {
src += 2;
if (dst != backtrack_limit) {
for (--dst; dst != backtrack_limit && dst[-1] != '/'; --dst) {
}
} else if (!is_absolute_path) {
src -= 2;
*dst++ = *src++;
*dst++ = *src++;
if (*src) {
*dst++ = *src;
}
backtrack_limit = dst;
}
if (*src) {
++src;
}
parsed = true;
}
}
if (!parsed) {
while (*src && *src != '/') {
*dst++ = *src++;
}
if (*src) {
*dst++ = *src++;
}
}
while (*src == '/') {
++src;
}
}
string::difference_type path_length = dst - path.begin();
if (path_length != 0) {
if (path_length > 1 && path[path_length - 1] == '/') {
--path_length;
}
path.resize(path_length);
} else {
path.assign(1, '.');
}
return path;
}
void FileSystem::ParseURI(absl::string_view remaining,
absl::string_view* scheme, absl::string_view* host,
absl::string_view* path) const {
if (!strings::Scanner(remaining)
.One(strings::Scanner::LETTER)
.Many(strings::Scanner::LETTER_DIGIT_DOT)
.StopCapture()
.OneLiteral(":
.GetResult(&remaining, scheme)) {
*scheme = absl::string_view();
*host = absl::string_view();
*path = remaining;
return;
}
if (!strings::Scanner(remaining).ScanUntil('/').GetResult(&remaining, host)) {
*host = remaining;
*path = absl::string_view();
return;
}
*path = remaining;
}
string FileSystem::CreateURI(absl::string_view scheme, absl::string_view host,
absl::string_view path) const {
if (scheme.empty()) {
return string(path);
}
return strings::StrCat(scheme, ":
}
std::string FileSystem::DecodeTransaction(const TransactionToken* token) {
if (token) {
std::stringstream oss;
oss << "Token= " << token->token << ", Owner=" << token->owner;
return oss.str();
}
return "No Transaction";
}
} | #include "tensorflow/core/platform/file_system.h"
#include <sys/stat.h>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/null_file_system.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
static const char* const kPrefix = "ipfs:
class InterPlanetaryFileSystem : public NullFileSystem {
public:
TF_USE_FILESYSTEM_METHODS_WITH_NO_TRANSACTION_SUPPORT;
Status FileExists(const string& fname, TransactionToken* token) override {
string parsed_path;
ParsePath(fname, &parsed_path);
if (BodyExists(parsed_path)) {
return absl::OkStatus();
}
return Status(absl::StatusCode::kNotFound, "File does not exist");
}
Status CreateDir(const string& dirname, TransactionToken* token) override {
string parsed_path;
ParsePath(dirname, &parsed_path);
if (celestial_bodies_.find(parsed_path) != celestial_bodies_.end()) {
return Status(absl::StatusCode::kAlreadyExists,
"dirname already exists.");
}
std::vector<string> split_path = str_util::Split(parsed_path, '/');
if (split_path.size() > 3) {
return Status(absl::StatusCode::kInvalidArgument, "Bad dirname");
}
if (split_path.empty()) {
return absl::OkStatus();
}
if (split_path.size() == 1) {
celestial_bodies_[""].insert(parsed_path);
celestial_bodies_.insert(
std::pair<string, std::set<string>>(parsed_path, {}));
return absl::OkStatus();
}
if (split_path.size() == 2) {
if (!BodyExists(split_path[0])) {
return Status(absl::StatusCode::kFailedPrecondition,
"Base dir not created");
}
celestial_bodies_[split_path[0]].insert(split_path[1]);
celestial_bodies_.insert(
std::pair<string, std::set<string>>(parsed_path, {}));
return absl::OkStatus();
}
if (split_path.size() == 3) {
const string& parent_path = this->JoinPath(split_path[0], split_path[1]);
if (!BodyExists(parent_path)) {
return Status(absl::StatusCode::kFailedPrecondition,
"Base dir not created");
}
celestial_bodies_[parent_path].insert(split_path[2]);
celestial_bodies_.insert(
std::pair<string, std::set<string>>(parsed_path, {}));
return absl::OkStatus();
}
return Status(absl::StatusCode::kFailedPrecondition, "Failed to create");
}
Status IsDirectory(const string& dirname, TransactionToken* token) override {
string parsed_path;
ParsePath(dirname, &parsed_path);
if (parsed_path == "evil_directory") {
LOG(FATAL) << "evil_directory cannot be accessed";
}
std::vector<string> split_path = str_util::Split(parsed_path, '/');
if (split_path.size() > 2) {
return Status(absl::StatusCode::kFailedPrecondition, "Not a dir");
}
if (celestial_bodies_.find(parsed_path) != celestial_bodies_.end()) {
return absl::OkStatus();
}
return Status(absl::StatusCode::kFailedPrecondition, "Not a dir");
}
Status GetChildren(const string& dir, TransactionToken* token,
std::vector<string>* result) override {
TF_RETURN_IF_ERROR(IsDirectory(dir, nullptr));
string parsed_path;
ParsePath(dir, &parsed_path);
result->insert(result->begin(), celestial_bodies_[parsed_path].begin(),
celestial_bodies_[parsed_path].end());
return absl::OkStatus();
}
private:
bool BodyExists(const string& name) {
return celestial_bodies_.find(name) != celestial_bodies_.end();
}
void ParsePath(const string& name, string* parsed_path) {
StringPiece scheme, host, path;
this->ParseURI(name, &scheme, &host, &path);
ASSERT_EQ(scheme, "ipfs");
ASSERT_EQ(host, "solarsystem");
absl::ConsumePrefix(&path, "/");
*parsed_path = string(path);
}
std::map<string, std::set<string>> celestial_bodies_ = {
std::pair<string, std::set<string>>(
"", {"Mercury", "Venus", "Earth", "Mars", "Jupiter", "Saturn",
"Uranus", "Neptune"}),
std::pair<string, std::set<string>>("Mercury", {}),
std::pair<string, std::set<string>>("Venus", {}),
std::pair<string, std::set<string>>("Earth", {"Moon"}),
std::pair<string, std::set<string>>("Mars", {}),
std::pair<string, std::set<string>>("Jupiter",
{"Europa", "Io", "Ganymede"}),
std::pair<string, std::set<string>>("Saturn", {}),
std::pair<string, std::set<string>>("Uranus", {}),
std::pair<string, std::set<string>>("Neptune", {}),
std::pair<string, std::set<string>>("Earth/Moon", {}),
std::pair<string, std::set<string>>("Jupiter/Europa", {}),
std::pair<string, std::set<string>>("Jupiter/Io", {}),
std::pair<string, std::set<string>>("Jupiter/Ganymede", {})};
};
string Match(InterPlanetaryFileSystem* ipfs, const string& suffix_pattern) {
std::vector<string> results;
Status s = ipfs->GetMatchingPaths(ipfs->JoinPath(kPrefix, suffix_pattern),
nullptr, &results);
if (!s.ok()) {
return s.ToString();
} else {
std::vector<StringPiece> trimmed_results;
std::sort(results.begin(), results.end());
for (const string& result : results) {
StringPiece trimmed_result(result);
EXPECT_TRUE(
absl::ConsumePrefix(&trimmed_result, strings::StrCat(kPrefix, "/")));
trimmed_results.push_back(trimmed_result);
}
return absl::StrJoin(trimmed_results, ",");
}
}
TEST(InterPlanetaryFileSystemTest, IPFSMatch) {
InterPlanetaryFileSystem ipfs;
EXPECT_EQ(Match(&ipfs, "thereisnosuchfile"), "");
EXPECT_EQ(Match(&ipfs, "*"),
"Earth,Jupiter,Mars,Mercury,Neptune,Saturn,Uranus,Venus");
EXPECT_EQ(Match(&ipfs, "Jupiter*"),
"Earth/Moon,Jupiter/Europa,Jupiter/Ganymede,Jupiter/Io");
TF_EXPECT_OK(ipfs.CreateDir(ipfs.JoinPath(kPrefix, "Planet0"), nullptr));
TF_EXPECT_OK(ipfs.CreateDir(ipfs.JoinPath(kPrefix, "Planet1"), nullptr));
EXPECT_EQ(Match(&ipfs, "Planet[0-1]"), "Planet0,Planet1");
EXPECT_EQ(Match(&ipfs, "Planet?"), "Planet0,Planet1");
}
TEST(InterPlanetaryFileSystemTest, MatchSimple) {
InterPlanetaryFileSystem ipfs;
TF_EXPECT_OK(ipfs.CreateDir(ipfs.JoinPath(kPrefix, "match-00"), nullptr));
TF_EXPECT_OK(ipfs.CreateDir(ipfs.JoinPath(kPrefix, "match-0a"), nullptr));
TF_EXPECT_OK(ipfs.CreateDir(ipfs.JoinPath(kPrefix, "match-01"), nullptr));
TF_EXPECT_OK(ipfs.CreateDir(ipfs.JoinPath(kPrefix, "match-aaa"), nullptr));
EXPECT_EQ(Match(&ipfs, "match-*"), "match-00,match-01,match-0a,match-aaa");
EXPECT_EQ(Match(&ipfs, "match-0[0-9]"), "match-00,match-01");
EXPECT_EQ(Match(&ipfs, "match-?[0-9]"), "match-00,match-01");
EXPECT_EQ(Match(&ipfs, "match-?a*"), "match-0a,match-aaa");
EXPECT_EQ(Match(&ipfs, "match-??"), "match-00,match-01,match-0a");
}
TEST(InterPlanetaryFileSystemTest, MatchOnlyNeeded) {
InterPlanetaryFileSystem ipfs;
TF_EXPECT_OK(ipfs.CreateDir(ipfs.JoinPath(kPrefix, "abcd"), nullptr));
TF_EXPECT_OK(
ipfs.CreateDir(ipfs.JoinPath(kPrefix, "evil_directory"), nullptr));
EXPECT_EQ(Match(&ipfs, "abcd"), "abcd");
}
TEST(InterPlanetaryFileSystemTest, MatchDirectory) {
InterPlanetaryFileSystem ipfs;
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(
ipfs.JoinPath(kPrefix, "match-00/abc/x"), nullptr));
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(
ipfs.JoinPath(kPrefix, "match-0a/abc/x"), nullptr));
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(
ipfs.JoinPath(kPrefix, "match-01/abc/x"), nullptr));
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(
ipfs.JoinPath(kPrefix, "match-aaa/abc/x"), nullptr));
EXPECT_EQ(Match(&ipfs, "match-*/abc/x"),
"match-00/abc/x,match-01/abc/x,match-0a/abc/x,match-aaa/abc/x");
EXPECT_EQ(Match(&ipfs, "match-0[0-9]/abc/x"),
"match-00/abc/x,match-01/abc/x");
EXPECT_EQ(Match(&ipfs, "match-?[0-9]/abc/x"),
"match-00/abc/x,match-01/abc/x");
EXPECT_EQ(Match(&ipfs, "match-?a*/abc/x"), "match-0a/abc/x,match-aaa/abc/x");
EXPECT_EQ(Match(&ipfs, "match-?[^a]/abc/x"), "match-00/abc/x,match-01/abc/x");
}
TEST(InterPlanetaryFileSystemTest, MatchMultipleWildcards) {
InterPlanetaryFileSystem ipfs;
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(
ipfs.JoinPath(kPrefix, "match-00/abc/00"), nullptr));
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(
ipfs.JoinPath(kPrefix, "match-00/abc/01"), nullptr));
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(
ipfs.JoinPath(kPrefix, "match-00/abc/09"), nullptr));
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(
ipfs.JoinPath(kPrefix, "match-01/abc/00"), nullptr));
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(
ipfs.JoinPath(kPrefix, "match-01/abc/04"), nullptr));
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(
ipfs.JoinPath(kPrefix, "match-01/abc/10"), nullptr));
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(
ipfs.JoinPath(kPrefix, "match-02/abc/00"), nullptr));
EXPECT_EQ(Match(&ipfs, "match-0[0-1]/abc/0[0-8]"),
"match-00/abc/00,match-00/abc/01,match-01/abc/00,match-01/abc/04");
}
TEST(InterPlanetaryFileSystemTest, RecursivelyCreateAlreadyExistingDir) {
InterPlanetaryFileSystem ipfs;
const string dirname = ipfs.JoinPath(kPrefix, "match-00/abc/00");
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(dirname));
}
TEST(InterPlanetaryFileSystemTest, HasAtomicMove) {
InterPlanetaryFileSystem ipfs;
const string dirname = io::JoinPath(kPrefix, "match-00/abc/00");
bool has_atomic_move;
TF_EXPECT_OK(ipfs.HasAtomicMove(dirname, &has_atomic_move));
EXPECT_EQ(has_atomic_move, true);
}
TEST(InterPlanetaryFileSystemTest, CanCreateTempFile) {
InterPlanetaryFileSystem ipfs;
const string dirname = io::JoinPath(kPrefix, "match-00/abc/00");
bool can_create_temp_file;
TF_EXPECT_OK(ipfs.CanCreateTempFile(dirname, &can_create_temp_file));
EXPECT_EQ(can_create_temp_file, true);
}
class TestFileSystem : public NullFileSystem {
public:
Status IsDirectory(const string& dirname, TransactionToken* token) override {
if (dirname == "." || dirname.empty()) {
return absl::OkStatus();
}
return Status(absl::StatusCode::kFailedPrecondition, "Not a dir");
}
Status GetChildren(const string& dir, TransactionToken* token,
std::vector<string>* result) override {
if (dir == "." || dir.empty()) {
result->push_back("test");
}
return absl::OkStatus();
}
};
TEST(TestFileSystemTest, RootDirectory) {
TestFileSystem fs;
std::vector<string> results;
auto ret = fs.GetMatchingPaths("./te*", nullptr, &results);
EXPECT_EQ(1, results.size());
EXPECT_EQ("./test", results[0]);
ret = fs.GetMatchingPaths("te*", nullptr, &results);
EXPECT_EQ(1, results.size());
EXPECT_EQ("./test", results[0]);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/file_system.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/file_system_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8a284c9a-cb16-42f0-bc10-a7d4c1d598ae | cpp | tensorflow/tensorflow | str_util | third_party/xla/third_party/tsl/tsl/platform/str_util.cc | third_party/xla/third_party/tsl/tsl/platform/str_util_test.cc | #include "tsl/platform/str_util.h"
#include <cctype>
#include <cstdint>
#include <string>
#include "absl/strings/ascii.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/stringpiece.h"
namespace tsl {
namespace str_util {
size_t RemoveLeadingWhitespace(absl::string_view* text) {
absl::string_view new_text = absl::StripLeadingAsciiWhitespace(*text);
size_t count = text->size() - new_text.size();
*text = new_text;
return count;
}
size_t RemoveTrailingWhitespace(absl::string_view* text) {
absl::string_view new_text = absl::StripTrailingAsciiWhitespace(*text);
size_t count = text->size() - new_text.size();
*text = new_text;
return count;
}
size_t RemoveWhitespaceContext(absl::string_view* text) {
absl::string_view new_text = absl::StripAsciiWhitespace(*text);
size_t count = text->size() - new_text.size();
*text = new_text;
return count;
}
bool ConsumeLeadingDigits(absl::string_view* s, uint64_t* val) {
const char* p = s->data();
const char* limit = p + s->size();
uint64_t v = 0;
while (p < limit) {
const char c = *p;
if (c < '0' || c > '9') break;
uint64_t new_v = (v * 10) + (c - '0');
if (new_v / 8 < v) {
return false;
}
v = new_v;
p++;
}
if (p > s->data()) {
s->remove_prefix(p - s->data());
*val = v;
return true;
} else {
return false;
}
}
bool ConsumeNonWhitespace(absl::string_view* s, absl::string_view* val) {
const char* p = s->data();
const char* limit = p + s->size();
while (p < limit) {
const char c = *p;
if (isspace(c)) break;
p++;
}
const size_t n = p - s->data();
if (n > 0) {
*val = absl::string_view(s->data(), n);
s->remove_prefix(n);
return true;
} else {
*val = absl::string_view();
return false;
}
}
void TitlecaseString(string* s, absl::string_view delimiters) {
bool upper = true;
for (string::iterator ss = s->begin(); ss != s->end(); ++ss) {
if (upper) {
*ss = toupper(*ss);
}
upper = (delimiters.find(*ss) != absl::string_view::npos);
}
}
string StringReplace(absl::string_view s, absl::string_view oldsub,
absl::string_view newsub, bool replace_all) {
string res(s);
size_t pos = 0;
while ((pos = res.find(oldsub.data(), pos, oldsub.size())) != string::npos) {
res.replace(pos, oldsub.size(), newsub.data(), newsub.size());
pos += newsub.size();
if (oldsub.empty()) {
pos++;
}
if (!replace_all) {
break;
}
}
return res;
}
size_t Strnlen(const char* str, const size_t string_max_len) {
size_t len = 0;
while (len < string_max_len && str[len] != '\0') {
++len;
}
return len;
}
string ArgDefCase(absl::string_view s) {
const size_t n = s.size();
size_t extra_us = 0;
size_t to_skip = 0;
for (size_t i = 0; i < n; ++i) {
if (i == to_skip && !isalpha(s[i])) {
++to_skip;
continue;
}
if (isupper(s[i]) && i != to_skip && i > 0 && isalnum(s[i - 1])) {
++extra_us;
}
}
string result(n + extra_us - to_skip, '_');
for (size_t i = to_skip, j = 0; i < n; ++i, ++j) {
DCHECK_LT(j, result.size());
char c = s[i];
if (isalnum(c)) {
if (isupper(c)) {
if (i != to_skip) {
DCHECK_GT(j, 0);
if (result[j - 1] != '_') ++j;
}
result[j] = tolower(c);
} else {
result[j] = c;
}
}
}
return result;
}
}
} | #include "tsl/platform/str_util.h"
#include <vector>
#include "tsl/platform/test.h"
namespace tsl {
TEST(CEscape, Basic) {
EXPECT_EQ(absl::CEscape("hello"), "hello");
EXPECT_EQ(absl::CEscape("hello\n"), "hello\\n");
EXPECT_EQ(absl::CEscape("hello\r"), "hello\\r");
EXPECT_EQ(absl::CEscape("\t\r\"'"), "\\t\\r\\\"\\'");
EXPECT_EQ(absl::CEscape("\320hi\200"), "\\320hi\\200");
}
string ExpectCUnescapeSuccess(absl::string_view source) {
string dest;
string error;
EXPECT_TRUE(absl::CUnescape(source, &dest, &error)) << error;
return dest;
}
TEST(CUnescape, Basic) {
EXPECT_EQ("hello", ExpectCUnescapeSuccess("hello"));
EXPECT_EQ("hello\n", ExpectCUnescapeSuccess("hello\\n"));
EXPECT_EQ("hello\r", ExpectCUnescapeSuccess("hello\\r"));
EXPECT_EQ("\t\r\"'", ExpectCUnescapeSuccess("\\t\\r\\\"\\'"));
EXPECT_EQ("\320hi\200", ExpectCUnescapeSuccess("\\320hi\\200"));
}
TEST(CUnescape, HandlesCopyOnWriteStrings) {
string dest = "hello";
string read = dest;
string error;
absl::string_view source = "llohe";
EXPECT_TRUE(absl::CUnescape(source, &dest, &error));
EXPECT_EQ("hello", read);
}
TEST(StripTrailingWhitespace, Basic) {
string test;
test = "hello";
absl::StripTrailingAsciiWhitespace(&test);
EXPECT_EQ(test, "hello");
test = "foo ";
absl::StripTrailingAsciiWhitespace(&test);
EXPECT_EQ(test, "foo");
test = " ";
absl::StripTrailingAsciiWhitespace(&test);
EXPECT_EQ(test, "");
test = "";
absl::StripTrailingAsciiWhitespace(&test);
EXPECT_EQ(test, "");
test = " abc\t";
absl::StripTrailingAsciiWhitespace(&test);
EXPECT_EQ(test, " abc");
}
TEST(RemoveLeadingWhitespace, Basic) {
string text = " \t \n \r Quick\t";
absl::string_view data(text);
EXPECT_EQ(str_util::RemoveLeadingWhitespace(&data), 11);
EXPECT_EQ(data, absl::string_view("Quick\t"));
EXPECT_EQ(str_util::RemoveLeadingWhitespace(&data), 0);
EXPECT_EQ(data, absl::string_view("Quick\t"));
}
TEST(RemoveLeadingWhitespace, TerminationHandling) {
string text = "\t";
absl::string_view data(text);
EXPECT_EQ(str_util::RemoveLeadingWhitespace(&data), 1);
EXPECT_EQ(data, absl::string_view(""));
EXPECT_EQ(str_util::RemoveLeadingWhitespace(&data), 0);
EXPECT_EQ(data, absl::string_view(""));
}
TEST(RemoveTrailingWhitespace, Basic) {
string text = " \t \n \r Quick \t";
absl::string_view data(text);
EXPECT_EQ(str_util::RemoveTrailingWhitespace(&data), 2);
EXPECT_EQ(data, absl::string_view(" \t \n \r Quick"));
EXPECT_EQ(str_util::RemoveTrailingWhitespace(&data), 0);
EXPECT_EQ(data, absl::string_view(" \t \n \r Quick"));
}
TEST(RemoveTrailingWhitespace, TerminationHandling) {
string text = "\t";
absl::string_view data(text);
EXPECT_EQ(str_util::RemoveTrailingWhitespace(&data), 1);
EXPECT_EQ(data, absl::string_view(""));
EXPECT_EQ(str_util::RemoveTrailingWhitespace(&data), 0);
EXPECT_EQ(data, absl::string_view(""));
}
TEST(RemoveWhitespaceContext, Basic) {
string text = " \t \n \r Quick \t";
absl::string_view data(text);
EXPECT_EQ(str_util::RemoveWhitespaceContext(&data), 13);
EXPECT_EQ(data, absl::string_view("Quick"));
EXPECT_EQ(str_util::RemoveWhitespaceContext(&data), 0);
EXPECT_EQ(data, absl::string_view("Quick"));
text = "";
data = text;
EXPECT_EQ(str_util::RemoveWhitespaceContext(&data), 0);
EXPECT_EQ(data, absl::string_view(""));
}
void TestConsumeLeadingDigits(absl::string_view s, int64_t expected,
absl::string_view remaining) {
uint64 v;
absl::string_view input(s);
if (str_util::ConsumeLeadingDigits(&input, &v)) {
EXPECT_EQ(v, static_cast<uint64>(expected));
EXPECT_EQ(input, remaining);
} else {
EXPECT_LT(expected, 0);
EXPECT_EQ(input, remaining);
}
}
TEST(ConsumeLeadingDigits, Basic) {
using str_util::ConsumeLeadingDigits;
TestConsumeLeadingDigits("123", 123, "");
TestConsumeLeadingDigits("a123", -1, "a123");
TestConsumeLeadingDigits("9_", 9, "_");
TestConsumeLeadingDigits("11111111111xyz", 11111111111ll, "xyz");
TestConsumeLeadingDigits("1111111111111111111111111111111xyz", -1,
"1111111111111111111111111111111xyz");
TestConsumeLeadingDigits("18446744073709551616xyz", -1,
"18446744073709551616xyz");
TestConsumeLeadingDigits("18446744073709551615xyz", 18446744073709551615ull,
"xyz");
TestConsumeLeadingDigits("184467440737095516159yz", -1,
"184467440737095516159yz");
}
void TestConsumeNonWhitespace(absl::string_view s, absl::string_view expected,
absl::string_view remaining) {
absl::string_view v;
absl::string_view input(s);
if (str_util::ConsumeNonWhitespace(&input, &v)) {
EXPECT_EQ(v, expected);
EXPECT_EQ(input, remaining);
} else {
EXPECT_EQ(expected, "");
EXPECT_EQ(input, remaining);
}
}
TEST(ConsumeNonWhitespace, Basic) {
TestConsumeNonWhitespace("", "", "");
TestConsumeNonWhitespace(" ", "", " ");
TestConsumeNonWhitespace("abc", "abc", "");
TestConsumeNonWhitespace("abc ", "abc", " ");
}
TEST(ConsumePrefix, Basic) {
string s("abcdef");
absl::string_view input(s);
EXPECT_FALSE(absl::ConsumePrefix(&input, "abcdefg"));
EXPECT_EQ(input, "abcdef");
EXPECT_FALSE(absl::ConsumePrefix(&input, "abce"));
EXPECT_EQ(input, "abcdef");
EXPECT_TRUE(absl::ConsumePrefix(&input, ""));
EXPECT_EQ(input, "abcdef");
EXPECT_FALSE(absl::ConsumePrefix(&input, "abcdeg"));
EXPECT_EQ(input, "abcdef");
EXPECT_TRUE(absl::ConsumePrefix(&input, "abcdef"));
EXPECT_EQ(input, "");
input = s;
EXPECT_TRUE(absl::ConsumePrefix(&input, "abcde"));
EXPECT_EQ(input, "f");
}
TEST(StripPrefix, Basic) {
EXPECT_EQ(absl::StripPrefix("abcdef", "abcdefg"), "abcdef");
EXPECT_EQ(absl::StripPrefix("abcdef", "abce"), "abcdef");
EXPECT_EQ(absl::StripPrefix("abcdef", ""), "abcdef");
EXPECT_EQ(absl::StripPrefix("abcdef", "abcdeg"), "abcdef");
EXPECT_EQ(absl::StripPrefix("abcdef", "abcdef"), "");
EXPECT_EQ(absl::StripPrefix("abcdef", "abcde"), "f");
}
TEST(JoinStrings, Basic) {
std::vector<string> s;
s = {"hi"};
EXPECT_EQ(absl::StrJoin(s, " "), "hi");
s = {"hi", "there", "strings"};
EXPECT_EQ(absl::StrJoin(s, " "), "hi there strings");
std::vector<absl::string_view> sp;
sp = {"hi"};
EXPECT_EQ(absl::StrJoin(sp, ",,"), "hi");
sp = {"hi", "there", "strings"};
EXPECT_EQ(absl::StrJoin(sp, "--"), "hi--there--strings");
}
TEST(JoinStrings, Join3) {
std::vector<string> s;
s = {"hi"};
auto l1 = [](string* out, string s) { *out += s; };
EXPECT_EQ(str_util::Join(s, " ", l1), "hi");
s = {"hi", "there", "strings"};
auto l2 = [](string* out, string s) { *out += s[0]; };
EXPECT_EQ(str_util::Join(s, " ", l2), "h t s");
}
TEST(Split, Basic) {
EXPECT_TRUE(str_util::Split("", ',').empty());
EXPECT_EQ(absl::StrJoin(str_util::Split("a", ','), "|"), "a");
EXPECT_EQ(absl::StrJoin(str_util::Split(",", ','), "|"), "|");
EXPECT_EQ(absl::StrJoin(str_util::Split("a,b,c", ','), "|"), "a|b|c");
EXPECT_EQ(absl::StrJoin(str_util::Split("a,,,b,,c,", ','), "|"), "a|||b||c|");
EXPECT_EQ(absl::StrJoin(str_util::Split("a!,!b,!c,", ",!"), "|"),
"a|||b||c|");
EXPECT_EQ(absl::StrJoin(
str_util::Split("a,,,b,,c,", ',', str_util::SkipEmpty()), "|"),
"a|b|c");
EXPECT_EQ(
absl::StrJoin(
str_util::Split("a, ,b,,c,", ',', str_util::SkipWhitespace()), "|"),
"a|b|c");
EXPECT_EQ(absl::StrJoin(str_util::Split("a. !b,;c,", ".,;!",
str_util::SkipWhitespace()),
"|"),
"a|b|c");
}
TEST(Lowercase, Basic) {
EXPECT_EQ("", absl::AsciiStrToLower(""));
EXPECT_EQ("hello", absl::AsciiStrToLower("hello"));
EXPECT_EQ("hello world", absl::AsciiStrToLower("Hello World"));
}
TEST(Uppercase, Basic) {
EXPECT_EQ("", absl::AsciiStrToUpper(""));
EXPECT_EQ("HELLO", absl::AsciiStrToUpper("hello"));
EXPECT_EQ("HELLO WORLD", absl::AsciiStrToUpper("Hello World"));
}
TEST(SnakeCase, Basic) {
EXPECT_EQ("", str_util::ArgDefCase(""));
EXPECT_EQ("", str_util::ArgDefCase("!"));
EXPECT_EQ("", str_util::ArgDefCase("5"));
EXPECT_EQ("", str_util::ArgDefCase("!:"));
EXPECT_EQ("", str_util::ArgDefCase("5-5"));
EXPECT_EQ("", str_util::ArgDefCase("_!"));
EXPECT_EQ("", str_util::ArgDefCase("_5"));
EXPECT_EQ("a", str_util::ArgDefCase("_a"));
EXPECT_EQ("a", str_util::ArgDefCase("_A"));
EXPECT_EQ("i", str_util::ArgDefCase("I"));
EXPECT_EQ("i", str_util::ArgDefCase("i"));
EXPECT_EQ("i_", str_util::ArgDefCase("I%"));
EXPECT_EQ("i_", str_util::ArgDefCase("i%"));
EXPECT_EQ("i", str_util::ArgDefCase("%I"));
EXPECT_EQ("i", str_util::ArgDefCase("-i"));
EXPECT_EQ("i", str_util::ArgDefCase("3i"));
EXPECT_EQ("i", str_util::ArgDefCase("32i"));
EXPECT_EQ("i3", str_util::ArgDefCase("i3"));
EXPECT_EQ("i_a3", str_util::ArgDefCase("i_A3"));
EXPECT_EQ("i_i", str_util::ArgDefCase("II"));
EXPECT_EQ("i_i", str_util::ArgDefCase("I_I"));
EXPECT_EQ("i__i", str_util::ArgDefCase("I__I"));
EXPECT_EQ("i_i_32", str_util::ArgDefCase("II-32"));
EXPECT_EQ("ii_32", str_util::ArgDefCase("Ii-32"));
EXPECT_EQ("hi_there", str_util::ArgDefCase("HiThere"));
EXPECT_EQ("hi_hi", str_util::ArgDefCase("Hi!Hi"));
EXPECT_EQ("hi_hi", str_util::ArgDefCase("HiHi"));
EXPECT_EQ("hihi", str_util::ArgDefCase("Hihi"));
EXPECT_EQ("hi_hi", str_util::ArgDefCase("Hi_Hi"));
}
TEST(TitlecaseString, Basic) {
string s = "sparse_lookup";
str_util::TitlecaseString(&s, "_");
ASSERT_EQ(s, "Sparse_Lookup");
s = "sparse_lookup";
str_util::TitlecaseString(&s, " ");
ASSERT_EQ(s, "Sparse_lookup");
s = "dense";
str_util::TitlecaseString(&s, " ");
ASSERT_EQ(s, "Dense");
}
TEST(StringReplace, Basic) {
EXPECT_EQ("XYZ_XYZ_XYZ", str_util::StringReplace("ABC_ABC_ABC", "ABC", "XYZ",
true));
}
TEST(StringReplace, OnlyFirst) {
EXPECT_EQ("XYZ_ABC_ABC", str_util::StringReplace("ABC_ABC_ABC", "ABC", "XYZ",
false));
}
TEST(StringReplace, IncreaseLength) {
EXPECT_EQ("a b c",
str_util::StringReplace("abc", "b", " b ", true));
}
TEST(StringReplace, IncreaseLengthMultipleMatches) {
EXPECT_EQ("a b b c",
str_util::StringReplace("abbc", "b", " b ", true));
}
TEST(StringReplace, NoChange) {
EXPECT_EQ("abc",
str_util::StringReplace("abc", "d", "X", true));
}
TEST(StringReplace, EmptyStringReplaceFirst) {
EXPECT_EQ("", str_util::StringReplace("", "a", "X", false));
}
TEST(StringReplace, EmptyStringReplaceAll) {
EXPECT_EQ("", str_util::StringReplace("", "a", "X", true));
}
TEST(Strnlen, Basic) {
EXPECT_EQ(0, str_util::Strnlen("ab", 0));
EXPECT_EQ(1, str_util::Strnlen("a", 1));
EXPECT_EQ(2, str_util::Strnlen("abcd", 2));
EXPECT_EQ(3, str_util::Strnlen("abc", 10));
EXPECT_EQ(4, str_util::Strnlen("a \t\n", 10));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/str_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/str_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c4be1bae-1373-4467-bcc5-68d232ece85f | cpp | tensorflow/tensorflow | errors | third_party/xla/third_party/tsl/tsl/platform/errors.cc | third_party/xla/third_party/tsl/tsl/platform/errors_test.cc | #include "tsl/platform/errors.h"
#include <errno.h>
#include <string.h>
#include "tsl/platform/status.h"
#include "tsl/platform/strcat.h"
namespace tsl {
namespace errors {
namespace {
absl::StatusCode ErrnoToCode(int err_number) {
absl::StatusCode code;
switch (err_number) {
case 0:
code = absl::StatusCode::kOk;
break;
case EINVAL:
case ENAMETOOLONG:
case E2BIG:
case EDESTADDRREQ:
case EDOM:
case EFAULT:
case EILSEQ:
case ENOPROTOOPT:
case ENOSTR:
case ENOTSOCK:
case ENOTTY:
case EPROTOTYPE:
case ESPIPE:
code = absl::StatusCode::kInvalidArgument;
break;
case ETIMEDOUT:
case ETIME:
code = absl::StatusCode::kDeadlineExceeded;
break;
case ENODEV:
case ENOENT:
case ENXIO:
case ESRCH:
code = absl::StatusCode::kNotFound;
break;
case EEXIST:
case EADDRNOTAVAIL:
case EALREADY:
code = absl::StatusCode::kAlreadyExists;
break;
case EPERM:
case EACCES:
case EROFS:
code = absl::StatusCode::kPermissionDenied;
break;
case ENOTEMPTY:
case EISDIR:
case ENOTDIR:
case EADDRINUSE:
case EBADF:
case EBUSY:
case ECHILD:
case EISCONN:
#if !defined(_WIN32) && !defined(__HAIKU__)
case ENOTBLK:
#endif
case ENOTCONN:
case EPIPE:
#if !defined(_WIN32)
case ESHUTDOWN:
#endif
case ETXTBSY:
code = absl::StatusCode::kFailedPrecondition;
break;
case ENOSPC:
#if !defined(_WIN32)
case EDQUOT:
#endif
case EMFILE:
case EMLINK:
case ENFILE:
case ENOBUFS:
case ENODATA:
case ENOMEM:
case ENOSR:
#if !defined(_WIN32) && !defined(__HAIKU__)
case EUSERS:
#endif
code = absl::StatusCode::kResourceExhausted;
break;
case EFBIG:
case EOVERFLOW:
case ERANGE:
code = absl::StatusCode::kOutOfRange;
break;
case ENOSYS:
case ENOTSUP:
case EAFNOSUPPORT:
#if !defined(_WIN32)
case EPFNOSUPPORT:
#endif
case EPROTONOSUPPORT:
#if !defined(_WIN32) && !defined(__HAIKU__)
case ESOCKTNOSUPPORT:
#endif
case EXDEV:
code = absl::StatusCode::kUnimplemented;
break;
case EAGAIN:
case ECONNREFUSED:
case ECONNABORTED:
case ECONNRESET:
case EINTR:
#if !defined(_WIN32)
case EHOSTDOWN:
#endif
case EHOSTUNREACH:
case ENETDOWN:
case ENETRESET:
case ENETUNREACH:
case ENOLCK:
case ENOLINK:
#if !(defined(__APPLE__) || defined(__FreeBSD__) || defined(_WIN32) || \
defined(__HAIKU__))
case ENONET:
#endif
code = absl::StatusCode::kUnavailable;
break;
case EDEADLK:
#if !defined(_WIN32)
case ESTALE:
#endif
code = absl::StatusCode::kAborted;
break;
case ECANCELED:
code = absl::StatusCode::kCancelled;
break;
case EBADMSG:
case EIDRM:
case EINPROGRESS:
case EIO:
case ELOOP:
case ENOEXEC:
case ENOMSG:
case EPROTO:
#if !defined(_WIN32) && !defined(__HAIKU__)
case EREMOTE:
#endif
code = absl::StatusCode::kUnknown;
break;
default: {
code = absl::StatusCode::kUnknown;
break;
}
}
return code;
}
}
absl::Status IOError(const string& context, int err_number) {
auto code = ErrnoToCode(err_number);
return absl::Status(code,
strings::StrCat(context, "; ", strerror(err_number)));
}
bool IsAborted(const absl::Status& status) {
return status.code() == tsl::error::Code::ABORTED;
}
bool IsAlreadyExists(const absl::Status& status) {
return status.code() == tsl::error::Code::ALREADY_EXISTS;
}
bool IsCancelled(const absl::Status& status) {
return status.code() == tsl::error::Code::CANCELLED;
}
bool IsDataLoss(const absl::Status& status) {
return status.code() == tsl::error::Code::DATA_LOSS;
}
bool IsDeadlineExceeded(const absl::Status& status) {
return status.code() == tsl::error::Code::DEADLINE_EXCEEDED;
}
bool IsFailedPrecondition(const absl::Status& status) {
return status.code() == tsl::error::Code::FAILED_PRECONDITION;
}
bool IsInternal(const absl::Status& status) {
return status.code() == tsl::error::Code::INTERNAL;
}
bool IsInvalidArgument(const absl::Status& status) {
return status.code() == tsl::error::Code::INVALID_ARGUMENT;
}
bool IsNotFound(const absl::Status& status) {
return status.code() == tsl::error::Code::NOT_FOUND;
}
bool IsOutOfRange(const absl::Status& status) {
return status.code() == tsl::error::Code::OUT_OF_RANGE;
}
bool IsPermissionDenied(const absl::Status& status) {
return status.code() == tsl::error::Code::PERMISSION_DENIED;
}
bool IsResourceExhausted(const absl::Status& status) {
return status.code() == tsl::error::Code::RESOURCE_EXHAUSTED;
}
bool IsUnauthenticated(const absl::Status& status) {
return status.code() == tsl::error::Code::UNAUTHENTICATED;
}
bool IsUnavailable(const absl::Status& status) {
return status.code() == tsl::error::Code::UNAVAILABLE;
}
bool IsUnimplemented(const absl::Status& status) {
return status.code() == tsl::error::Code::UNIMPLEMENTED;
}
bool IsUnknown(const absl::Status& status) {
return status.code() == tsl::error::Code::UNKNOWN;
}
}
} | #include "tsl/platform/errors.h"
#include "absl/status/status.h"
#include "tsl/platform/test.h"
namespace tsl {
TEST(AppendToMessageTest, PayloadsAreCopied) {
absl::Status status = errors::Aborted("Aborted Error Message");
status.SetPayload("payload_key", absl::Cord("payload_value"));
errors::AppendToMessage(&status, "Appended Message");
EXPECT_EQ(status.message(), "Aborted Error Message\n\tAppended Message");
EXPECT_EQ(status.GetPayload("payload_key"), absl::Cord("payload_value"));
}
TEST(Status, GetAllPayloads) {
absl::Status s_error(absl::StatusCode::kInternal, "Error message");
s_error.SetPayload("Error key", absl::Cord("foo"));
auto payloads_error_status = errors::GetPayloads(s_error);
ASSERT_EQ(payloads_error_status.size(), 1);
ASSERT_EQ(payloads_error_status["Error key"], "foo");
absl::Status s_ok = absl::Status();
auto payloads_ok_status = errors::GetPayloads(s_ok);
ASSERT_TRUE(payloads_ok_status.empty());
}
TEST(Status, OKStatusInsertPayloadsFromErrorStatus) {
absl::Status s_error(absl::StatusCode::kInternal, "Error message");
s_error.SetPayload("Error key", absl::Cord("foo"));
absl::Status s_ok = absl::Status();
errors::InsertPayloads(s_ok, errors::GetPayloads(s_error));
auto payloads_ok_status = errors::GetPayloads(s_ok);
ASSERT_TRUE(payloads_ok_status.empty());
}
TEST(Status, ErrorStatusInsertPayloadsFromOKStatus) {
absl::Status s_error(absl::StatusCode::kInternal, "Error message");
s_error.SetPayload("Error key", absl::Cord("foo"));
absl::Status s_ok = absl::Status();
errors::InsertPayloads(s_error, errors::GetPayloads(s_ok));
ASSERT_EQ(s_error.GetPayload("Error key"), "foo");
}
TEST(Status, ErrorStatusInsertPayloadsFromErrorStatus) {
absl::Status s_error1(absl::StatusCode::kInternal, "Error message");
s_error1.SetPayload("Error key 1", absl::Cord("foo"));
s_error1.SetPayload("Error key 2", absl::Cord("bar"));
absl::Status s_error2(absl::StatusCode::kInternal, "Error message");
s_error2.SetPayload("Error key", absl::Cord("bar"));
ASSERT_EQ(s_error2.GetPayload("Error key"), "bar");
errors::InsertPayloads(s_error2, errors::GetPayloads(s_error1));
ASSERT_EQ(s_error2.GetPayload("Error key 1"), "foo");
ASSERT_EQ(s_error2.GetPayload("Error key 2"), "bar");
auto payloads_error_status = errors::GetPayloads(s_error2);
ASSERT_EQ(payloads_error_status.size(), 3);
}
#if defined(PLATFORM_GOOGLE)
absl::Status GetError() {
return absl::InvalidArgumentError("An invalid argument error");
}
absl::Status PropagateError() {
TF_RETURN_IF_ERROR(GetError());
return absl::OkStatus();
}
absl::Status PropagateError2() {
TF_RETURN_IF_ERROR(PropagateError());
return absl::OkStatus();
}
TEST(Status, StackTracePropagation) {
absl::Status s = PropagateError2();
auto sources = s.GetSourceLocations();
ASSERT_EQ(sources.size(), 3);
for (int i = 0; i < 3; ++i) {
ASSERT_EQ(sources[i].file_name(),
"third_party/tensorflow/tsl/platform/errors_test.cc");
}
}
TEST(Status, SourceLocationsPreservedByAppend) {
absl::Status s = PropagateError2();
ASSERT_EQ(s.GetSourceLocations().size(), 3);
errors::AppendToMessage(&s, "A new message.");
ASSERT_EQ(s.GetSourceLocations().size(), 3);
}
TEST(Status, SourceLocationsPreservedByUpdate) {
absl::Status s = PropagateError2();
ASSERT_EQ(s.GetSourceLocations().size(), 3);
absl::Status s2 = errors::CreateWithUpdatedMessage(s, "New message.");
ASSERT_EQ(s2.GetSourceLocations().size(), 3);
}
#endif
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/errors.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/errors_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9cd56c00-f971-453e-acfd-2b230713e4ec | cpp | tensorflow/tensorflow | status | third_party/xla/third_party/tsl/tsl/platform/status.cc | tensorflow/core/lib/core/status_test.cc | #include "tsl/platform/status.h"
#include <stdio.h>
#include <deque>
#include <functional>
#include <memory>
#include <ostream>
#include <sstream>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/escaping.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/stack_frame.h"
#include "tsl/platform/stacktrace.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/strcat.h"
#include "tsl/platform/stringprintf.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tsl {
namespace {
class StatusLogSink : public TFLogSink {
public:
static StatusLogSink* GetInstance() {
static StatusLogSink* sink = new StatusLogSink();
return sink;
}
void enable() {
absl::call_once(flag_, [this] {
num_messages_ = 5;
if (const char* num_msgs_str =
getenv("TF_WORKER_NUM_FORWARDED_LOG_MESSAGES")) {
if (!absl::SimpleAtoi(num_msgs_str, &num_messages_)) {
LOG(WARNING) << "Failed to parse env variable "
"TF_WORKER_NUM_WARNING_ERROR_LOG_IN_STATUS="
<< num_msgs_str << " as int. Using the default value "
<< num_messages_ << ".";
}
}
if (num_messages_ > 0) {
TFAddLogSink(this);
}
});
}
void GetMessages(std::vector<std::string>* logs) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
for (auto& msg : messages_) {
logs->push_back(msg);
}
}
void Send(const TFLogEntry& entry) override TF_LOCKS_EXCLUDED(mu_) {
if (entry.log_severity() < absl::LogSeverity::kWarning) return;
mutex_lock lock(mu_);
messages_.emplace_back(entry.ToString());
if (messages_.size() > static_cast<size_t>(num_messages_)) {
messages_.pop_front();
}
}
private:
mutex mu_;
absl::once_flag flag_;
int num_messages_ = 0;
std::deque<std::string> messages_ TF_GUARDED_BY(mu_);
};
}
namespace errors {
static constexpr const char kStackTraceProtoUrl[] =
"type.googleapis.com/tensorflow.StackTracePayload";
void SetStackTrace(absl::Status& status, std::vector<StackFrame> stack_trace) {
std::vector<std::string> items;
items.reserve(stack_trace.size());
for (StackFrame& frame : stack_trace) {
items.push_back(
absl::StrCat(absl::StrReplaceAll(frame.file_name, {{"\n", ""}}), "\n",
frame.line_number, "\n",
absl::StrReplaceAll(frame.function_name, {{"\n", ""}})));
}
status.SetPayload(kStackTraceProtoUrl,
absl::Cord(absl::StrJoin(items, "\n")));
}
std::vector<StackFrame> GetStackTrace(const absl::Status& status) {
std::vector<StackFrame> stack_trace;
absl::optional<absl::Cord> maybe_serialized_payload =
status.GetPayload(kStackTraceProtoUrl);
if (maybe_serialized_payload.has_value()) {
std::vector<std::string> split =
absl::StrSplit(maybe_serialized_payload.value().Flatten(), '\n');
assert(split.size() % 3 == 0);
for (int i = 0; i < split.size() / 3; ++i) {
const int idx = 3 * i;
int line_number = -1;
CHECK(absl::SimpleAtoi(split[idx + 1], &line_number));
stack_trace.emplace_back(std::move(split[idx]), line_number,
std::move(split[idx + 2]));
}
}
return stack_trace;
}
}
#ifdef _WIN32
const char* NullTerminatedMessage(const absl::Status& status) {
return absl::StatusMessageAsCStr(status);
}
#endif
std::string* TfCheckOpHelperOutOfLine(const absl::Status& v, const char* msg) {
std::stringstream ss;
ss << "Non-OK-status: " << msg << "\nStatus: " << v;
return new std::string(ss.str());
}
StatusGroup::StatusGroup() {}
StatusGroup::StatusGroup(std::initializer_list<absl::Status> statuses) {
for (const absl::Status& s : statuses) {
Update(s);
}
}
static constexpr const char kDerivedStatusProtoUrl[] =
"type.googleapis.com/tensorflow.DerivedStatus";
absl::Status StatusGroup::MakeDerived(const absl::Status& s) {
if (IsDerived(s)) {
return s;
} else {
absl::Status derived(s);
derived.SetPayload(kDerivedStatusProtoUrl, absl::Cord(""));
return derived;
}
}
bool StatusGroup::IsDerived(const absl::Status& s) {
return s.GetPayload(kDerivedStatusProtoUrl).has_value();
}
void StatusGroup::ConfigureLogHistory() {
StatusLogSink::GetInstance()->enable();
}
void StatusGroup::Update(const absl::Status& s) {
if (s.ok()) {
++num_ok_;
} else {
ok_ = false;
if (IsDerived(s)) {
derived_.insert(s);
} else {
non_derived_.insert(s);
}
}
}
static constexpr int kMaxAggregatedStatusMessageSize = 8 * 1024;
static constexpr int kMaxAttachedLogMessageSize = 512;
std::unordered_map<std::string, absl::Cord> StatusGroup::GetPayloads() const {
std::unordered_map<std::string, absl::Cord> payloads;
auto capture_payload = [&payloads](absl::string_view key,
const absl::Cord& value) {
payloads[std::string(key)] = value;
};
for (const auto& status : derived_) {
status.ForEachPayload(capture_payload);
}
for (const auto& status : non_derived_) {
status.ForEachPayload(capture_payload);
}
payloads.erase(kDerivedStatusProtoUrl);
return payloads;
}
absl::Status MakeStatus(
absl::StatusCode code, absl::string_view message,
const std::unordered_map<std::string, absl::Cord>& payloads) {
absl::Status status(code, message);
for (const auto& payload : payloads) {
status.SetPayload(payload.first, payload.second);
}
return status;
}
std::string MakeString(const absl::Status& status) {
return absl::StrCat(absl::StatusCodeToString(status.code()), ": ",
status.message());
}
absl::Status StatusGroup::as_summary_status() const {
if (ok_) {
return absl::OkStatus();
}
auto get_recent_logs = [this]() -> std::string {
if (!recent_logs_.empty()) {
std::vector<std::string> fmt;
fmt.push_back("\nRecent warning and error logs:");
for (auto& log : recent_logs_) {
fmt.push_back(" " + log.substr(0, kMaxAttachedLogMessageSize));
}
return absl::StrJoin(fmt, "\n");
} else {
return "";
}
};
if (non_derived_.size() == 1) {
return MakeStatus(
non_derived_.begin()->code(),
strings::StrCat(non_derived_.begin()->message(), get_recent_logs()),
GetPayloads());
}
if (!non_derived_.empty()) {
std::vector<std::string> fmt;
fmt.push_back(
strings::Printf("%zu root error(s) found.", non_derived_.size()));
int index = 0;
auto code = absl::StatusCode::kCancelled;
for (const auto& s : non_derived_) {
if (code == absl::StatusCode::kCancelled &&
s.code() != absl::StatusCode::kCancelled) {
code = s.code();
}
fmt.emplace_back(strings::StrCat(" (", index, ") ", MakeString(s)));
++index;
}
fmt.push_back(strings::Printf("%zu successful operations.", num_ok_));
fmt.push_back(
strings::Printf("%zu derived errors ignored.", derived_.size()));
std::string error_msg =
absl::StrJoin(fmt, "\n").substr(0, kMaxAggregatedStatusMessageSize);
return MakeStatus(code, strings::StrCat(error_msg, get_recent_logs()),
GetPayloads());
} else {
return MakeDerived(MakeStatus(derived_.begin()->code(),
derived_.begin()->message(), GetPayloads()));
}
}
absl::Status StatusGroup::as_concatenated_status() const {
if (ok_) {
return absl::OkStatus();
}
if (non_derived_.size() == 1) {
return MakeStatus(non_derived_.begin()->code(),
non_derived_.begin()->message(), GetPayloads());
}
if (!non_derived_.empty()) {
std::vector<string> fmt;
fmt.emplace_back("\n=====================");
for (const auto& s : non_derived_) {
fmt.emplace_back(MakeString(s));
}
fmt.emplace_back("=====================\n");
return MakeStatus(
non_derived_.begin()->code(),
absl::StrJoin(fmt, "\n").substr(0, kMaxAggregatedStatusMessageSize),
GetPayloads());
} else {
return MakeDerived(MakeStatus(derived_.begin()->code(),
derived_.begin()->message(), GetPayloads()));
}
}
void StatusGroup::AttachLogMessages() {
recent_logs_.clear();
StatusLogSink::GetInstance()->GetMessages(&recent_logs_);
}
} | #include "tensorflow/core/lib/core/status.h"
#include "absl/strings/match.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
TEST(Status, OK) {
EXPECT_EQ(absl::OkStatus().code(), error::OK);
EXPECT_EQ(absl::OkStatus().message(), "");
TF_EXPECT_OK(absl::OkStatus());
TF_ASSERT_OK(absl::OkStatus());
EXPECT_EQ(absl::OkStatus(), Status());
Status s;
EXPECT_TRUE(s.ok());
}
TEST(DeathStatus, CheckOK) {
Status status(errors::InvalidArgument("Invalid"));
ASSERT_DEATH(TF_CHECK_OK(status), "Invalid");
}
TEST(Status, Set) {
Status status;
status = Status(absl::StatusCode::kCancelled, "Error message");
EXPECT_EQ(status.code(), absl::StatusCode::kCancelled);
EXPECT_EQ(status.message(), "Error message");
}
TEST(Status, Copy) {
Status a(errors::InvalidArgument("Invalid"));
Status b(a);
ASSERT_EQ(a.ToString(), b.ToString());
}
TEST(Status, Assign) {
Status a(errors::InvalidArgument("Invalid"));
Status b;
b = a;
ASSERT_EQ(a.ToString(), b.ToString());
}
TEST(Status, Move) {
Status a(errors::InvalidArgument("Invalid"));
Status b(std::move(a));
ASSERT_EQ("INVALID_ARGUMENT: Invalid", b.ToString());
}
TEST(Status, MoveAssign) {
Status a(errors::InvalidArgument("Invalid"));
Status b;
b = std::move(a);
ASSERT_EQ("INVALID_ARGUMENT: Invalid", b.ToString());
}
TEST(Status, Update) {
Status s;
s.Update(absl::OkStatus());
ASSERT_TRUE(s.ok());
Status a(errors::InvalidArgument("Invalid"));
s.Update(a);
ASSERT_EQ(s.ToString(), a.ToString());
Status b(errors::Internal("Internal"));
s.Update(b);
ASSERT_EQ(s.ToString(), a.ToString());
s.Update(absl::OkStatus());
ASSERT_EQ(s.ToString(), a.ToString());
ASSERT_FALSE(s.ok());
}
TEST(Status, EqualsOK) { ASSERT_EQ(absl::OkStatus(), Status()); }
TEST(Status, EqualsSame) {
Status a(errors::InvalidArgument("Invalid"));
Status b(errors::InvalidArgument("Invalid"));
ASSERT_EQ(a, b);
}
TEST(Status, EqualsCopy) {
const Status a(errors::InvalidArgument("Invalid"));
const Status b = a;
ASSERT_EQ(a, b);
}
TEST(Status, EqualsDifferentCode) {
const Status a(errors::InvalidArgument("message"));
const Status b(errors::Internal("message"));
ASSERT_NE(a, b);
}
TEST(Status, EqualsDifferentMessage) {
const Status a(errors::InvalidArgument("message"));
const Status b(errors::InvalidArgument("another"));
ASSERT_NE(a, b);
}
TEST(StatusGroup, OKStatusGroup) {
StatusGroup c;
c.Update(absl::OkStatus());
c.Update(absl::OkStatus());
ASSERT_EQ(c.as_summary_status(), absl::OkStatus());
ASSERT_EQ(c.as_concatenated_status(), absl::OkStatus());
}
TEST(StatusGroup, AggregateWithSingleErrorStatus) {
StatusGroup c;
const Status internal(errors::Internal("Original error."));
c.Update(internal);
ASSERT_EQ(c.as_summary_status(), internal);
Status concat_status = c.as_concatenated_status();
ASSERT_EQ(concat_status.code(), internal.code());
ASSERT_TRUE(absl::StrContains(concat_status.message(), internal.message()));
const Status derived =
StatusGroup::MakeDerived(errors::Internal("Derived error."));
c.Update(derived);
ASSERT_EQ(c.as_summary_status(), internal);
concat_status = c.as_concatenated_status();
ASSERT_EQ(concat_status.code(), internal.code());
ASSERT_TRUE(absl::StrContains(concat_status.message(), internal.message()));
}
TEST(StatusGroup, AggregateWithMultipleErrorStatus) {
StatusGroup c;
const Status internal(errors::Internal("Original error."));
const Status cancelled(errors::Cancelled("Cancelled after 10 steps."));
const Status aborted(errors::Aborted("Aborted after 10 steps."));
c.Update(internal);
c.Update(cancelled);
c.Update(aborted);
Status summary = c.as_summary_status();
ASSERT_EQ(summary.code(), internal.code());
ASSERT_TRUE(absl::StrContains(summary.message(), internal.message()));
ASSERT_TRUE(absl::StrContains(summary.message(), cancelled.message()));
ASSERT_TRUE(absl::StrContains(summary.message(), aborted.message()));
Status concat_status = c.as_concatenated_status();
ASSERT_EQ(concat_status.code(), internal.code());
ASSERT_TRUE(absl::StrContains(concat_status.message(), internal.message()));
ASSERT_TRUE(absl::StrContains(concat_status.message(), cancelled.message()));
ASSERT_TRUE(absl::StrContains(concat_status.message(), aborted.message()));
}
TEST(Status, InvalidPayloadGetsIgnored) {
Status s = Status();
s.SetPayload("Invalid", absl::Cord("Invalid Val"));
ASSERT_FALSE(s.GetPayload("Invalid").has_value());
bool is_err_erased = s.ErasePayload("Invalid");
ASSERT_EQ(is_err_erased, false);
}
TEST(Status, SetPayloadSetsOrUpdatesIt) {
Status s(absl::StatusCode::kInternal, "Error message");
s.SetPayload("Error key", absl::Cord("Original"));
ASSERT_EQ(s.GetPayload("Error key"), absl::Cord("Original"));
s.SetPayload("Error key", absl::Cord("Updated"));
ASSERT_EQ(s.GetPayload("Error key"), absl::Cord("Updated"));
}
TEST(Status, ErasePayloadRemovesIt) {
Status s(absl::StatusCode::kInternal, "Error message");
s.SetPayload("Error key", absl::Cord("Original"));
bool is_err_erased = s.ErasePayload("Error key");
ASSERT_EQ(is_err_erased, true);
is_err_erased = s.ErasePayload("Error key");
ASSERT_EQ(is_err_erased, false);
ASSERT_FALSE(s.GetPayload("Error key").has_value());
}
static void BM_TF_CHECK_OK(::testing::benchmark::State& state) {
tensorflow::Status s = (state.max_iterations < 0)
? errors::InvalidArgument("Invalid")
: absl::OkStatus();
for (auto i : state) {
TF_CHECK_OK(s);
}
}
BENCHMARK(BM_TF_CHECK_OK);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/status.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/core/status_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4e3b1365-8dff-460a-9cc1-436efbb0f2a9 | cpp | tensorflow/tensorflow | path | third_party/xla/third_party/tsl/tsl/platform/path.cc | third_party/xla/third_party/tsl/tsl/platform/path_test.cc | #include "tsl/platform/path.h"
#include <errno.h>
#include <fcntl.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/types.h>
#if defined(PLATFORM_WINDOWS)
#include <windows.h>
#else
#include <unistd.h>
#endif
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/scanner.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/strcat.h"
#include "tsl/platform/stringpiece.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace io {
namespace internal {
namespace {
const char kPathSep[] = "/";
}
string JoinPathImpl(std::initializer_list<absl::string_view> paths) {
string result;
for (absl::string_view path : paths) {
if (path.empty()) continue;
if (result.empty()) {
result = string(path);
continue;
}
if (IsAbsolutePath(path)) path = path.substr(1);
if (result[result.size() - 1] == kPathSep[0]) {
strings::StrAppend(&result, path);
} else {
strings::StrAppend(&result, kPathSep, path);
}
}
return result;
}
std::pair<absl::string_view, absl::string_view> SplitPath(
absl::string_view uri) {
absl::string_view scheme, host, path;
ParseURI(uri, &scheme, &host, &path);
auto pos = path.rfind('/');
#ifdef PLATFORM_WINDOWS
if (pos == StringPiece::npos) pos = path.rfind('\\');
#endif
if (pos == absl::string_view::npos)
return std::make_pair(
absl::string_view(uri.data(), host.end() - uri.begin()), path);
if (pos == 0)
return std::make_pair(
absl::string_view(uri.data(), path.begin() + 1 - uri.begin()),
absl::string_view(path.data() + 1, path.size() - 1));
return std::make_pair(
absl::string_view(uri.data(), path.begin() + pos - uri.begin()),
absl::string_view(path.data() + pos + 1, path.size() - (pos + 1)));
}
std::pair<absl::string_view, absl::string_view> SplitBasename(
absl::string_view path) {
path = Basename(path);
auto pos = path.rfind('.');
if (pos == absl::string_view::npos)
return std::make_pair(path,
absl::string_view(path.data() + path.size(), 0));
return std::make_pair(
absl::string_view(path.data(), pos),
absl::string_view(path.data() + pos + 1, path.size() - (pos + 1)));
}
}
bool IsAbsolutePath(absl::string_view path) {
return !path.empty() && path[0] == '/';
}
absl::string_view Dirname(absl::string_view path) {
return internal::SplitPath(path).first;
}
absl::string_view Basename(absl::string_view path) {
return internal::SplitPath(path).second;
}
absl::string_view Extension(absl::string_view path) {
return internal::SplitBasename(path).second;
}
absl::string_view BasenamePrefix(absl::string_view path) {
return internal::SplitBasename(path).first;
}
string CleanPath(absl::string_view unclean_path) {
string path(unclean_path);
const char* src = path.c_str();
string::iterator dst = path.begin();
const bool is_absolute_path = *src == '/';
if (is_absolute_path) {
*dst++ = *src++;
while (*src == '/') ++src;
}
string::const_iterator backtrack_limit = dst;
while (*src) {
bool parsed = false;
if (src[0] == '.') {
if (src[1] == '/' || !src[1]) {
if (*++src) {
++src;
}
parsed = true;
} else if (src[1] == '.' && (src[2] == '/' || !src[2])) {
src += 2;
if (dst != backtrack_limit) {
for (--dst; dst != backtrack_limit && dst[-1] != '/'; --dst) {
}
} else if (!is_absolute_path) {
src -= 2;
*dst++ = *src++;
*dst++ = *src++;
if (*src) {
*dst++ = *src;
}
backtrack_limit = dst;
}
if (*src) {
++src;
}
parsed = true;
}
}
if (!parsed) {
while (*src && *src != '/') {
*dst++ = *src++;
}
if (*src) {
*dst++ = *src++;
}
}
while (*src == '/') {
++src;
}
}
string::difference_type path_length = dst - path.begin();
if (path_length != 0) {
if (path_length > 1 && path[path_length - 1] == '/') {
--path_length;
}
path.resize(path_length);
} else {
path.assign(1, '.');
}
return path;
}
void ParseURI(absl::string_view uri, absl::string_view* scheme,
absl::string_view* host, absl::string_view* path) {
if (!strings::Scanner(uri)
.One(strings::Scanner::LETTER)
.Many(strings::Scanner::LETTER_DIGIT_DOT)
.StopCapture()
.OneLiteral(":
.GetResult(&uri, scheme)) {
*scheme = absl::string_view(uri.data(), 0);
*host = absl::string_view(uri.data(), 0);
*path = uri;
return;
}
if (!strings::Scanner(uri).ScanUntil('/').GetResult(&uri, host)) {
*host = uri;
*path = absl::string_view();
return;
}
*path = uri;
}
string CreateURI(absl::string_view scheme, absl::string_view host,
absl::string_view path) {
if (scheme.empty()) {
return string(path);
}
return strings::StrCat(scheme, ":
}
int64_t UniqueId() {
static mutex mu(LINKER_INITIALIZED);
static int64_t id = 0;
mutex_lock l(mu);
return ++id;
}
string CommonPathPrefix(absl::Span<const string> paths) {
if (paths.empty()) return "";
size_t min_filename_size =
absl::c_min_element(paths, [](const string& a, const string& b) {
return a.size() < b.size();
})->size();
if (min_filename_size == 0) return "";
size_t common_prefix_size = [&] {
for (size_t prefix_size = 0; prefix_size < min_filename_size;
prefix_size++) {
char c = paths[0][prefix_size];
for (int f = 1; f < paths.size(); f++) {
if (paths[f][prefix_size] != c) {
return prefix_size;
}
}
}
return min_filename_size;
}();
size_t rpos = absl::string_view(paths[0])
.substr(0, common_prefix_size)
.rfind(internal::kPathSep);
return rpos == std::string::npos
? ""
: std::string(absl::string_view(paths[0]).substr(0, rpos + 1));
}
string GetTempFilename(const string& extension) {
#if defined(__ANDROID__)
LOG(FATAL) << "GetTempFilename is not implemented in this platform.";
#elif defined(PLATFORM_WINDOWS)
char temp_dir[_MAX_PATH];
DWORD retval;
retval = GetTempPath(_MAX_PATH, temp_dir);
if (retval > _MAX_PATH || retval == 0) {
LOG(FATAL) << "Cannot get the directory for temporary files.";
}
char temp_file_name[_MAX_PATH];
retval = GetTempFileName(temp_dir, "", UniqueId(), temp_file_name);
if (retval > _MAX_PATH || retval == 0) {
LOG(FATAL) << "Cannot get a temporary file in: " << temp_dir;
}
string full_tmp_file_name(temp_file_name);
full_tmp_file_name.append(extension);
return full_tmp_file_name;
#else
for (const char* dir : std::vector<const char*>(
{getenv("TEST_TMPDIR"), getenv("TMPDIR"), getenv("TMP"), "/tmp"})) {
if (!dir || !dir[0]) {
continue;
}
struct stat statbuf;
if (!stat(dir, &statbuf) && S_ISDIR(statbuf.st_mode)) {
string tmp_filepath;
int fd;
if (extension.length()) {
tmp_filepath = io::JoinPath(
dir, strings::StrCat("tmp_file_tensorflow_", UniqueId(), "_XXXXXX.",
extension));
fd = mkstemps(&tmp_filepath[0], extension.length() + 1);
} else {
tmp_filepath = io::JoinPath(
dir,
strings::StrCat("tmp_file_tensorflow_", UniqueId(), "_XXXXXX"));
fd = mkstemp(&tmp_filepath[0]);
}
if (fd < 0) {
LOG(FATAL) << "Failed to create temp file.";
} else {
if (close(fd) < 0) {
LOG(ERROR) << "close() failed: " << strerror(errno);
}
return tmp_filepath;
}
}
}
LOG(FATAL) << "No temp directory found.";
std::abort();
#endif
}
namespace {
bool StartsWithSegment(absl::string_view path, absl::string_view segment) {
return absl::StartsWith(path, segment) &&
(path.size() == segment.size() ||
path.at(segment.size()) == internal::kPathSep[0]);
}
}
bool GetTestWorkspaceDir(string* dir) {
const char* srcdir = getenv("TEST_SRCDIR");
if (srcdir == nullptr) {
return false;
}
const char* workspace = getenv("TEST_WORKSPACE");
if (workspace == nullptr) {
return false;
}
if (dir != nullptr) {
*dir = tsl::io::JoinPath(srcdir, workspace);
}
return true;
}
bool GetTestUndeclaredOutputsDir(string* dir) {
const char* outputs_dir = getenv("TEST_UNDECLARED_OUTPUTS_DIR");
if (outputs_dir == nullptr) {
return false;
}
if (dir != nullptr) {
*dir = outputs_dir;
}
return true;
}
bool ResolveTestPrefixes(absl::string_view path, string& resolved_path) {
constexpr absl::string_view kTestWorkspaceSegment = "TEST_WORKSPACE";
constexpr absl::string_view kOutputDirSegment = "TEST_UNDECLARED_OUTPUTS_DIR";
if (StartsWithSegment(path, kTestWorkspaceSegment)) {
if (!GetTestWorkspaceDir(&resolved_path)) {
return false;
}
resolved_path += path.substr(kTestWorkspaceSegment.size());
return true;
} else if (StartsWithSegment(path, kOutputDirSegment)) {
if (!GetTestUndeclaredOutputsDir(&resolved_path)) {
return false;
}
resolved_path += path.substr(kOutputDirSegment.size());
return true;
} else {
resolved_path = path;
return true;
}
}
[[maybe_unused]] std::string& AppendDotExeIfWindows(std::string& path) {
#ifdef PLATFORM_WINDOWS
path.append(".exe");
#endif
return path;
}
}
} | #include "tsl/platform/path.h"
#include <string>
#include "tsl/platform/env.h"
#include "tsl/platform/stringpiece.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace io {
TEST(PathTest, JoinPath) {
EXPECT_EQ("/foo/bar", JoinPath("/foo", "bar"));
EXPECT_EQ("foo/bar", JoinPath("foo", "bar"));
EXPECT_EQ("foo/bar", JoinPath("foo", "/bar"));
EXPECT_EQ("/foo/bar", JoinPath("/foo", "/bar"));
EXPECT_EQ("/bar", JoinPath("", "/bar"));
EXPECT_EQ("bar", JoinPath("", "bar"));
EXPECT_EQ("/foo", JoinPath("/foo", ""));
EXPECT_EQ("/foo/bar/baz/blah/blink/biz",
JoinPath("/foo/bar/baz/", "/blah/blink/biz"));
EXPECT_EQ("/foo/bar/baz/blah", JoinPath("/foo", "bar", "baz", "blah"));
}
TEST(PathTest, IsAbsolutePath) {
EXPECT_FALSE(IsAbsolutePath(""));
EXPECT_FALSE(IsAbsolutePath("../foo"));
EXPECT_FALSE(IsAbsolutePath("foo"));
EXPECT_FALSE(IsAbsolutePath("./foo"));
EXPECT_FALSE(IsAbsolutePath("foo/bar/baz/"));
EXPECT_TRUE(IsAbsolutePath("/foo"));
EXPECT_TRUE(IsAbsolutePath("/foo/bar/../baz"));
}
TEST(PathTest, Dirname) {
EXPECT_EQ("hdfs:
Dirname("hdfs:
EXPECT_EQ("/hello", Dirname("/hello/"));
EXPECT_EQ("/", Dirname("/hello"));
EXPECT_EQ("hello", Dirname("hello/world"));
EXPECT_EQ("hello", Dirname("hello/"));
EXPECT_EQ("", Dirname("world"));
EXPECT_EQ("/", Dirname("/"));
EXPECT_EQ("", Dirname(""));
}
TEST(PathTest, Basename) {
EXPECT_EQ("", Basename("/hello/"));
EXPECT_EQ("hello", Basename("/hello"));
EXPECT_EQ("world", Basename("hello/world"));
EXPECT_EQ("", Basename("hello/"));
EXPECT_EQ("world", Basename("world"));
EXPECT_EQ("", Basename("/"));
EXPECT_EQ("", Basename(""));
}
TEST(PathTest, Extension) {
EXPECT_EQ("gif", Extension("foo.gif"));
EXPECT_EQ("", Extension("foo."));
EXPECT_EQ("", Extension(""));
EXPECT_EQ("", Extension("/"));
EXPECT_EQ("", Extension("foo"));
EXPECT_EQ("", Extension("foo/"));
EXPECT_EQ("gif", Extension("/a/path/to/foo.gif"));
EXPECT_EQ("html", Extension("/a/path.bar/to/foo.html"));
EXPECT_EQ("", Extension("/a/path.bar/to/foo"));
EXPECT_EQ("baz", Extension("/a/path.bar/to/foo.bar.baz"));
}
TEST(PathTest, CleanPath) {
EXPECT_EQ(".", CleanPath(""));
EXPECT_EQ("x", CleanPath("x"));
EXPECT_EQ("/a/b/c/d", CleanPath("/a/b/c/d"));
EXPECT_EQ("/a/b/c/dtrue);
tsl::setenv("TEST_WORKSPACE", "my/workspace", true);
EXPECT_TRUE(GetTestWorkspaceDir(&dir));
EXPECT_EQ(dir, "/repo/src/my/workspace");
EXPECT_TRUE(GetTestWorkspaceDir(nullptr));
dir = kOriginalValue;
tsl::unsetenv("TEST_SRCDIR");
tsl::setenv("TEST_WORKSPACE", "my/workspace", true);
EXPECT_FALSE(GetTestWorkspaceDir(&dir));
EXPECT_EQ(dir, kOriginalValue);
EXPECT_FALSE(GetTestWorkspaceDir(nullptr));
dir = kOriginalValue;
tsl::setenv("TEST_SRCDIR", "/repo/src", true);
tsl::unsetenv("TEST_WORKSPACE");
EXPECT_FALSE(GetTestWorkspaceDir(&dir));
EXPECT_EQ(dir, kOriginalValue);
EXPECT_FALSE(GetTestWorkspaceDir(nullptr));
dir = kOriginalValue;
tsl::unsetenv("TEST_SRCDIR");
tsl::unsetenv("TEST_WORKSPACE");
EXPECT_FALSE(GetTestWorkspaceDir(&dir));
EXPECT_EQ(dir, kOriginalValue);
EXPECT_FALSE(GetTestWorkspaceDir(nullptr));
}
TEST(PathTest, GetTestUndeclaredOutputsDir) {
constexpr absl::string_view kOriginalValue = "original value";
std::string dir;
dir = kOriginalValue;
tsl::setenv("TEST_UNDECLARED_OUTPUTS_DIR", "/test/outputs",
true);
EXPECT_TRUE(GetTestUndeclaredOutputsDir(&dir));
EXPECT_EQ(dir, "/test/outputs");
EXPECT_TRUE(GetTestUndeclaredOutputsDir(nullptr));
dir = kOriginalValue;
tsl::unsetenv("TEST_UNDECLARED_OUTPUTS_DIR");
EXPECT_FALSE(GetTestUndeclaredOutputsDir(&dir));
EXPECT_EQ(dir, kOriginalValue);
EXPECT_FALSE(GetTestUndeclaredOutputsDir(nullptr));
}
TEST(PathTest, ResolveTestPrefixesKeepsThePathUnchanged) {
constexpr absl::string_view kOriginalValue = "original value";
std::string resolved_path;
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("", resolved_path));
EXPECT_EQ(resolved_path, "");
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("/", resolved_path));
EXPECT_EQ(resolved_path, "/");
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("alpha/beta", resolved_path));
EXPECT_EQ(resolved_path, "alpha/beta");
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("/alpha/beta", resolved_path));
EXPECT_EQ(resolved_path, "/alpha/beta");
}
TEST(PathTest, ResolveTestPrefixesCanResolveTestWorkspace) {
constexpr absl::string_view kOriginalValue = "original value";
std::string resolved_path;
tsl::setenv("TEST_SRCDIR", "/repo/src", true);
tsl::setenv("TEST_WORKSPACE", "my/workspace", true);
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("TEST_WORKSPACE", resolved_path));
EXPECT_EQ(resolved_path, "/repo/src/my/workspace");
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("TEST_WORKSPACE/", resolved_path));
EXPECT_EQ(resolved_path, "/repo/src/my/workspace/");
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("TEST_WORKSPACE/a/b", resolved_path));
EXPECT_EQ(resolved_path, "/repo/src/my/workspace/a/b");
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("TEST_WORKSPACEE", resolved_path));
EXPECT_EQ(resolved_path, "TEST_WORKSPACEE");
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("/TEST_WORKSPACE", resolved_path));
EXPECT_EQ(resolved_path, "/TEST_WORKSPACE");
}
TEST(PathTest, ResolveTestPrefixesCannotResolveTestWorkspace) {
constexpr absl::string_view kOriginalValue = "original value";
std::string resolved_path;
tsl::unsetenv("TEST_SRCDIR");
tsl::unsetenv("TEST_WORKSPACE");
resolved_path = kOriginalValue;
EXPECT_FALSE(ResolveTestPrefixes("TEST_WORKSPACE", resolved_path));
EXPECT_EQ(resolved_path, kOriginalValue);
}
TEST(PathTest, ResolveTestPrefixesCanResolveTestUndeclaredOutputsDir) {
constexpr absl::string_view kOriginalValue = "original value";
std::string resolved_path;
tsl::setenv("TEST_UNDECLARED_OUTPUTS_DIR", "/test/outputs",
true);
resolved_path = kOriginalValue;
EXPECT_TRUE(
ResolveTestPrefixes("TEST_UNDECLARED_OUTPUTS_DIR", resolved_path));
EXPECT_EQ(resolved_path, "/test/outputs");
resolved_path = kOriginalValue;
EXPECT_TRUE(
ResolveTestPrefixes("TEST_UNDECLARED_OUTPUTS_DIR/", resolved_path));
EXPECT_EQ(resolved_path, "/test/outputs/");
resolved_path = kOriginalValue;
EXPECT_TRUE(
ResolveTestPrefixes("TEST_UNDECLARED_OUTPUTS_DIR/a/b", resolved_path));
EXPECT_EQ(resolved_path, "/test/outputs/a/b");
resolved_path = kOriginalValue;
EXPECT_TRUE(
ResolveTestPrefixes("TEST_UNDECLARED_OUTPUTS_DIRR", resolved_path));
EXPECT_EQ(resolved_path, "TEST_UNDECLARED_OUTPUTS_DIRR");
resolved_path = kOriginalValue;
EXPECT_TRUE(
ResolveTestPrefixes("/TEST_UNDECLARED_OUTPUTS_DIR", resolved_path));
EXPECT_EQ(resolved_path, "/TEST_UNDECLARED_OUTPUTS_DIR");
}
TEST(PathTest, ResolveTestPrefixesCannotResolveTestUndeclaredOutputsDir) {
constexpr absl::string_view kOriginalValue = "original value";
std::string resolved_path;
tsl::unsetenv("TEST_UNDECLARED_OUTPUTS_DIR");
resolved_path = kOriginalValue;
EXPECT_FALSE(
ResolveTestPrefixes("TEST_UNDECLARED_OUTPUTS_DIR", resolved_path));
EXPECT_EQ(resolved_path, kOriginalValue);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/path.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/path_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
986d4332-9393-4e1e-9d83-25ea2e3f208e | cpp | tensorflow/tensorflow | cpu_info | third_party/xla/third_party/tsl/tsl/platform/cpu_info.cc | third_party/xla/third_party/tsl/tsl/platform/cpu_info_test.cc | #include "tsl/platform/cpu_info.h"
#include "absl/base/call_once.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/platform.h"
#include "tsl/platform/types.h"
#if defined(PLATFORM_IS_X86)
#include <mutex>
#endif
#if defined(PLATFORM_IS_ARM64) && !defined(__APPLE__) && !defined(__OpenBSD__)
#include <sys/auxv.h>
#ifndef HWCAP_CPUID
#define HWCAP_CPUID (1 << 11)
#endif
#include <fstream>
#endif
#ifdef PLATFORM_IS_X86
#ifdef PLATFORM_WINDOWS
#define GETCPUID(a, b, c, d, a_inp, c_inp) \
{ \
int cpu_info[4] = {-1}; \
__cpuidex(cpu_info, a_inp, c_inp); \
a = cpu_info[0]; \
b = cpu_info[1]; \
c = cpu_info[2]; \
d = cpu_info[3]; \
}
#else
#define GETCPUID(a, b, c, d, a_inp, c_inp) \
asm("mov %%rbx, %%rdi\n" \
"cpuid\n" \
"xchg %%rdi, %%rbx\n" \
: "=a"(a), "=D"(b), "=c"(c), "=d"(d) \
: "a"(a_inp), "2"(c_inp))
#endif
#endif
namespace tsl {
namespace port {
namespace {
#ifdef PLATFORM_IS_X86
class CPUIDInfo;
void InitCPUIDInfo();
CPUIDInfo *cpuid = nullptr;
#ifdef PLATFORM_WINDOWS
int GetXCR0EAX() { return _xgetbv(0); }
#else
int GetXCR0EAX() {
int eax, edx;
asm("XGETBV" : "=a"(eax), "=d"(edx) : "c"(0));
return eax;
}
#endif
class CPUIDInfo {
public:
CPUIDInfo()
: have_adx_(0),
have_aes_(0),
have_amx_bf16_(0),
have_amx_fp16_(0),
have_amx_int8_(0),
have_amx_tile_(0),
have_avx_(0),
have_avx2_(0),
have_avx512f_(0),
have_avx512cd_(0),
have_avx512er_(0),
have_avx512pf_(0),
have_avx512vl_(0),
have_avx512bw_(0),
have_avx512dq_(0),
have_avx512vbmi_(0),
have_avx512ifma_(0),
have_avx512_4vnniw_(0),
have_avx512_4fmaps_(0),
have_avx512_bf16_(0),
have_avx512_fp16_(0),
have_avx512_vnni_(0),
have_avx_vnni_(0),
have_avx_vnni_int8_(0),
have_avx_ne_convert_(0),
have_bmi1_(0),
have_bmi2_(0),
have_cmov_(0),
have_cmpxchg16b_(0),
have_cmpxchg8b_(0),
have_f16c_(0),
have_fma_(0),
have_mmx_(0),
have_pclmulqdq_(0),
have_popcnt_(0),
have_prefetchw_(0),
have_prefetchwt1_(0),
have_rdrand_(0),
have_rdseed_(0),
have_smap_(0),
have_sse_(0),
have_sse2_(0),
have_sse3_(0),
have_sse4_1_(0),
have_sse4_2_(0),
have_ssse3_(0),
have_hypervisor_(0) {}
static void Initialize() {
CHECK(cpuid == nullptr) << __func__ << " ran more than once";
cpuid = new CPUIDInfo;
uint32 eax, ebx, ecx, edx;
GETCPUID(eax, ebx, ecx, edx, 0, 0);
cpuid->vendor_str_.append(reinterpret_cast<char *>(&ebx), 4);
cpuid->vendor_str_.append(reinterpret_cast<char *>(&edx), 4);
cpuid->vendor_str_.append(reinterpret_cast<char *>(&ecx), 4);
GETCPUID(eax, ebx, ecx, edx, 1, 0);
cpuid->model_num_ = static_cast<int>((eax >> 4) & 0xf);
cpuid->family_ = static_cast<int>((eax >> 8) & 0xf);
cpuid->have_aes_ = (ecx >> 25) & 0x1;
cpuid->have_cmov_ = (edx >> 15) & 0x1;
cpuid->have_cmpxchg16b_ = (ecx >> 13) & 0x1;
cpuid->have_cmpxchg8b_ = (edx >> 8) & 0x1;
cpuid->have_mmx_ = (edx >> 23) & 0x1;
cpuid->have_pclmulqdq_ = (ecx >> 1) & 0x1;
cpuid->have_popcnt_ = (ecx >> 23) & 0x1;
cpuid->have_rdrand_ = (ecx >> 30) & 0x1;
cpuid->have_sse2_ = (edx >> 26) & 0x1;
cpuid->have_sse3_ = ecx & 0x1;
cpuid->have_sse4_1_ = (ecx >> 19) & 0x1;
cpuid->have_sse4_2_ = (ecx >> 20) & 0x1;
cpuid->have_sse_ = (edx >> 25) & 0x1;
cpuid->have_ssse3_ = (ecx >> 9) & 0x1;
cpuid->have_hypervisor_ = (ecx >> 31) & 1;
const uint64 xcr0_xmm_mask = 0x2;
const uint64 xcr0_ymm_mask = 0x4;
const uint64 xcr0_maskreg_mask = 0x20;
const uint64 xcr0_zmm0_15_mask = 0x40;
const uint64 xcr0_zmm16_31_mask = 0x80;
const uint64 xcr0_avx_mask = xcr0_xmm_mask | xcr0_ymm_mask;
const uint64 xcr0_avx512_mask = xcr0_avx_mask | xcr0_maskreg_mask |
xcr0_zmm0_15_mask | xcr0_zmm16_31_mask;
const bool have_avx =
((ecx >> 27) & 0x1) &&
((GetXCR0EAX() & xcr0_avx_mask) == xcr0_avx_mask) &&
((ecx >> 28) & 0x1);
const bool have_avx512 =
((ecx >> 27) & 0x1) &&
((GetXCR0EAX() & xcr0_avx512_mask) == xcr0_avx512_mask);
cpuid->have_avx_ = have_avx;
cpuid->have_fma_ = have_avx && ((ecx >> 12) & 0x1);
cpuid->have_f16c_ = have_avx && ((ecx >> 29) & 0x1);
GETCPUID(eax, ebx, ecx, edx, 7, 0);
const uint32 kMaxNumSubLeaves = eax;
cpuid->have_adx_ = (ebx >> 19) & 0x1;
cpuid->have_avx2_ = have_avx && ((ebx >> 5) & 0x1);
cpuid->have_bmi1_ = (ebx >> 3) & 0x1;
cpuid->have_bmi2_ = (ebx >> 8) & 0x1;
cpuid->have_prefetchwt1_ = ecx & 0x1;
cpuid->have_rdseed_ = (ebx >> 18) & 0x1;
cpuid->have_smap_ = (ebx >> 20) & 0x1;
cpuid->have_avx512f_ = have_avx512 && ((ebx >> 16) & 0x1);
cpuid->have_avx512cd_ = have_avx512 && ((ebx >> 28) & 0x1);
cpuid->have_avx512er_ = have_avx512 && ((ebx >> 27) & 0x1);
cpuid->have_avx512pf_ = have_avx512 && ((ebx >> 26) & 0x1);
cpuid->have_avx512vl_ = have_avx512 && ((ebx >> 31) & 0x1);
cpuid->have_avx512bw_ = have_avx512 && ((ebx >> 30) & 0x1);
cpuid->have_avx512dq_ = have_avx512 && ((ebx >> 17) & 0x1);
cpuid->have_avx512vbmi_ = have_avx512 && ((ecx >> 1) & 0x1);
cpuid->have_avx512ifma_ = have_avx512 && ((ebx >> 21) & 0x1);
cpuid->have_avx512_4vnniw_ = have_avx512 && ((edx >> 2) & 0x1);
cpuid->have_avx512_4fmaps_ = have_avx512 && ((edx >> 3) & 0x1);
cpuid->have_avx512_vnni_ = have_avx512 && ((ecx >> 11) & 0x1);
cpuid->have_amx_tile_ = (edx >> 24) & 0x1;
cpuid->have_amx_int8_ = (edx >> 25) & 0x1;
cpuid->have_amx_bf16_ = (edx >> 22) & 0x1;
cpuid->have_avx512_fp16_ = have_avx512 && ((edx >> 23) & 0x1);
if (kMaxNumSubLeaves >= 1) {
GETCPUID(eax, ebx, ecx, edx, 7, 1);
cpuid->have_avx_vnni_ = (eax >> 4) & 0x1;
cpuid->have_avx512_bf16_ = have_avx512 && ((eax >> 5) & 0x1);
cpuid->have_amx_fp16_ = (eax >> 21) & 0x1;
cpuid->have_avx_vnni_int8_ = (edx >> 4) & 0x1;
cpuid->have_avx_ne_convert_ = (edx >> 5) & 0x1;
}
}
static bool TestFeature(CPUFeature feature) {
InitCPUIDInfo();
switch (feature) {
case ADX: return cpuid->have_adx_;
case AES: return cpuid->have_aes_;
case AMX_BF16: return cpuid->have_amx_bf16_;
case AMX_FP16: return cpuid->have_amx_fp16_;
case AMX_INT8: return cpuid->have_amx_int8_;
case AMX_TILE: return cpuid->have_amx_tile_;
case AVX2: return cpuid->have_avx2_;
case AVX: return cpuid->have_avx_;
case AVX512F: return cpuid->have_avx512f_;
case AVX512CD: return cpuid->have_avx512cd_;
case AVX512PF: return cpuid->have_avx512pf_;
case AVX512ER: return cpuid->have_avx512er_;
case AVX512VL: return cpuid->have_avx512vl_;
case AVX512BW: return cpuid->have_avx512bw_;
case AVX512DQ: return cpuid->have_avx512dq_;
case AVX512VBMI: return cpuid->have_avx512vbmi_;
case AVX512IFMA: return cpuid->have_avx512ifma_;
case AVX512_4VNNIW: return cpuid->have_avx512_4vnniw_;
case AVX512_4FMAPS: return cpuid->have_avx512_4fmaps_;
case AVX512_BF16: return cpuid->have_avx512_bf16_;
case AVX512_FP16: return cpuid->have_avx512_fp16_;
case AVX512_VNNI: return cpuid->have_avx512_vnni_;
case AVX_VNNI: return cpuid->have_avx_vnni_;
case AVX_VNNI_INT8: return cpuid->have_avx_vnni_int8_;
case AVX_NE_CONVERT: return cpuid->have_avx_ne_convert_;
case BMI1: return cpuid->have_bmi1_;
case BMI2: return cpuid->have_bmi2_;
case CMOV: return cpuid->have_cmov_;
case CMPXCHG16B: return cpuid->have_cmpxchg16b_;
case CMPXCHG8B: return cpuid->have_cmpxchg8b_;
case F16C: return cpuid->have_f16c_;
case FMA: return cpuid->have_fma_;
case MMX: return cpuid->have_mmx_;
case PCLMULQDQ: return cpuid->have_pclmulqdq_;
case POPCNT: return cpuid->have_popcnt_;
case PREFETCHW: return cpuid->have_prefetchw_;
case PREFETCHWT1: return cpuid->have_prefetchwt1_;
case RDRAND: return cpuid->have_rdrand_;
case RDSEED: return cpuid->have_rdseed_;
case SMAP: return cpuid->have_smap_;
case SSE2: return cpuid->have_sse2_;
case SSE3: return cpuid->have_sse3_;
case SSE4_1: return cpuid->have_sse4_1_;
case SSE4_2: return cpuid->have_sse4_2_;
case SSE: return cpuid->have_sse_;
case SSSE3: return cpuid->have_ssse3_;
case HYPERVISOR: return cpuid->have_hypervisor_;
default:
break;
}
return false;
}
string vendor_str() const { return vendor_str_; }
int family() const { return family_; }
int model_num() { return model_num_; }
private:
int have_adx_ : 1;
int have_aes_ : 1;
int have_amx_bf16_ : 1;
int have_amx_fp16_ : 1;
int have_amx_int8_ : 1;
int have_amx_tile_ : 1;
int have_avx_ : 1;
int have_avx2_ : 1;
int have_avx512f_ : 1;
int have_avx512cd_ : 1;
int have_avx512er_ : 1;
int have_avx512pf_ : 1;
int have_avx512vl_ : 1;
int have_avx512bw_ : 1;
int have_avx512dq_ : 1;
int have_avx512vbmi_ : 1;
int have_avx512ifma_ : 1;
int have_avx512_4vnniw_ : 1;
int have_avx512_4fmaps_ : 1;
int have_avx512_bf16_ : 1;
int have_avx512_fp16_ : 1;
int have_avx512_vnni_ : 1;
int have_avx_vnni_ : 1;
int have_avx_vnni_int8_ : 1;
int have_avx_ne_convert_ : 1;
int have_bmi1_ : 1;
int have_bmi2_ : 1;
int have_cmov_ : 1;
int have_cmpxchg16b_ : 1;
int have_cmpxchg8b_ : 1;
int have_f16c_ : 1;
int have_fma_ : 1;
int have_mmx_ : 1;
int have_pclmulqdq_ : 1;
int have_popcnt_ : 1;
int have_prefetchw_ : 1;
int have_prefetchwt1_ : 1;
int have_rdrand_ : 1;
int have_rdseed_ : 1;
int have_smap_ : 1;
int have_sse_ : 1;
int have_sse2_ : 1;
int have_sse3_ : 1;
int have_sse4_1_ : 1;
int have_sse4_2_ : 1;
int have_ssse3_ : 1;
int have_hypervisor_ : 1;
string vendor_str_;
int family_;
int model_num_;
};
absl::once_flag cpuid_once_flag;
void InitCPUIDInfo() {
absl::call_once(cpuid_once_flag, CPUIDInfo::Initialize);
}
#endif
#if defined(PLATFORM_IS_ARM64) && !defined(__APPLE__) && !defined(__OpenBSD__)
class CPUIDInfo;
void InitCPUIDInfo();
CPUIDInfo *cpuid = nullptr;
class CPUIDInfo {
public:
CPUIDInfo()
: implementer_(0),
variant_(0),
cpunum_(0),
is_arm_neoverse_v1_(0),
is_arm_neoverse_n1_(0) {}
static void Initialize() {
if (cpuid != nullptr) return;
cpuid = new CPUIDInfo;
if (!(getauxval(AT_HWCAP) & HWCAP_CPUID)) {
return;
}
int present_cpu = -1;
#ifndef PLATFORM_WINDOWS
std::ifstream CPUspresent;
CPUspresent.open("/sys/devices/system/cpu/present", std::ios::in);
if (CPUspresent.is_open()) {
std::string line;
if (static_cast<bool>(getline(CPUspresent, line))) {
auto ending = line.end();
for (auto i = line.begin(); i < line.end(); ++i) {
if (*i == '-' || *i == ',') {
ending = i;
break;
}
}
line.erase(ending, line.end());
present_cpu = std::stoi(line);
}
}
#endif
if (present_cpu == -1) {
return;
}
#ifndef PLATFORM_WINDOWS
std::stringstream str;
str << "/sys/devices/system/cpu/cpu" << present_cpu
<< "/regs/identification/midr_el1";
std::ifstream midr_el1_file(str.str(), std::ios::in);
if (midr_el1_file.is_open()) {
std::string line;
if (static_cast<bool>(getline(midr_el1_file, line))) {
uint32 midr_el1 = std::stoul(line, nullptr, 16);
cpuid->implementer_ = (midr_el1 >> 24) & 0xFF;
cpuid->variant_ = (midr_el1 >> 20) & 0xF;
cpuid->cpunum_ = (midr_el1 >> 4) & 0xFFF;
if (cpuid->implementer_ == 0x41) {
switch (cpuid->cpunum_) {
case 0xd40:
cpuid->is_arm_neoverse_v1_ = 1;
break;
case 0xd0c:
cpuid->is_arm_neoverse_n1_ = 1;
break;
default:
break;
}
}
}
}
#endif
}
int implementer() const { return implementer_; }
int cpunum() const { return cpunum_; }
static bool TestAarch64CPU(Aarch64CPU cpu) {
InitCPUIDInfo();
switch (cpu) {
case ARM_NEOVERSE_V1:
return cpuid->is_arm_neoverse_v1_;
default:
return 0;
}
}
private:
int implementer_;
int variant_;
int cpunum_;
int is_arm_neoverse_v1_;
int is_arm_neoverse_n1_;
};
absl::once_flag cpuid_once_flag;
void InitCPUIDInfo() {
absl::call_once(cpuid_once_flag, CPUIDInfo::Initialize);
}
#endif
}
bool TestCPUFeature(CPUFeature feature) {
#ifdef PLATFORM_IS_X86
return CPUIDInfo::TestFeature(feature);
#else
return false;
#endif
}
bool TestAarch64CPU(Aarch64CPU cpu) {
#if defined(PLATFORM_IS_ARM64) && !defined(__APPLE__) && !defined(__OpenBSD__)
return CPUIDInfo::TestAarch64CPU(cpu);
#else
return false;
#endif
}
std::string CPUVendorIDString() {
#ifdef PLATFORM_IS_X86
InitCPUIDInfo();
return cpuid->vendor_str();
#else
return "";
#endif
}
int CPUFamily() {
#ifdef PLATFORM_IS_X86
InitCPUIDInfo();
return cpuid->family();
#elif defined(PLATFORM_IS_ARM64) && !defined(__APPLE__) && !defined(__OpenBSD__)
InitCPUIDInfo();
return cpuid->implementer();
#else
return 0;
#endif
}
int CPUModelNum() {
#ifdef PLATFORM_IS_X86
InitCPUIDInfo();
return cpuid->model_num();
#elif defined(PLATFORM_IS_ARM64) && !defined(__APPLE__) && !defined(__OpenBSD__)
InitCPUIDInfo();
return cpuid->cpunum();
#else
return 0;
#endif
}
int CPUIDNumSMT() {
#ifdef PLATFORM_IS_X86
uint32 eax, ebx, ecx, edx;
GETCPUID(eax, ebx, ecx, edx, 0, 0);
if (eax >= 11) {
GETCPUID(eax, ebx, ecx, edx, 11, 0);
if (ebx != 0 && ((ecx & 0xff00) >> 8) == 1) {
return 1 << (eax & 0x1f);
}
}
#endif
return 0;
}
}
} | #include "tsl/platform/cpu_info.h"
#include "tsl/platform/test.h"
namespace tsl {
TEST(CPUInfo, CommonX86CPU) {
if (port::TestCPUFeature(port::CPUFeature::SSE)) {
EXPECT_TRUE(port::IsX86CPU());
}
}
TEST(CPUInfo, Aarch64NeoverseV1CPU) {
if (port::TestAarch64CPU(port::Aarch64CPU::ARM_NEOVERSE_V1)) {
EXPECT_TRUE(port::IsAarch64CPU());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cpu_info.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cpu_info_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
172d4993-1731-40c0-8561-33d46cf51e04 | cpp | tensorflow/tensorflow | hash | third_party/xla/third_party/tsl/tsl/platform/hash.cc | third_party/xla/third_party/tsl/tsl/platform/hash_test.cc | #include "tsl/platform/hash.h"
#include <string.h>
#include "tsl/platform/macros.h"
#include "tsl/platform/raw_coding.h"
#include "tsl/platform/types.h"
namespace tsl {
static inline uint32 ByteAs32(char c) { return static_cast<uint32>(c) & 0xff; }
static inline uint64 ByteAs64(char c) { return static_cast<uint64>(c) & 0xff; }
uint32 Hash32(const char* data, size_t n, uint32 seed) {
const uint32 m = 0x5bd1e995;
const int r = 24;
uint32 h = seed ^ n;
while (n >= 4) {
uint32 k = core::DecodeFixed32(data);
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
data += 4;
n -= 4;
}
switch (n) {
case 3:
h ^= ByteAs32(data[2]) << 16;
TF_FALLTHROUGH_INTENDED;
case 2:
h ^= ByteAs32(data[1]) << 8;
TF_FALLTHROUGH_INTENDED;
case 1:
h ^= ByteAs32(data[0]);
h *= m;
}
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
uint64 Hash64(const char* data, size_t n, uint64 seed) {
const uint64 m = 0xc6a4a7935bd1e995;
const int r = 47;
uint64 h = seed ^ (n * m);
while (n >= 8) {
uint64 k = core::DecodeFixed64(data);
data += 8;
n -= 8;
k *= m;
k ^= k >> r;
k *= m;
h ^= k;
h *= m;
}
switch (n) {
case 7:
h ^= ByteAs64(data[6]) << 48;
TF_FALLTHROUGH_INTENDED;
case 6:
h ^= ByteAs64(data[5]) << 40;
TF_FALLTHROUGH_INTENDED;
case 5:
h ^= ByteAs64(data[4]) << 32;
TF_FALLTHROUGH_INTENDED;
case 4:
h ^= ByteAs64(data[3]) << 24;
TF_FALLTHROUGH_INTENDED;
case 3:
h ^= ByteAs64(data[2]) << 16;
TF_FALLTHROUGH_INTENDED;
case 2:
h ^= ByteAs64(data[1]) << 8;
TF_FALLTHROUGH_INTENDED;
case 1:
h ^= ByteAs64(data[0]);
h *= m;
}
h ^= h >> r;
h *= m;
h ^= h >> r;
return h;
}
} | #include <map>
#include <unordered_map>
#include <vector>
#include "tsl/platform/hash.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace tsl {
TEST(Hash, SignedUnsignedIssue) {
const unsigned char d1[1] = {0x62};
const unsigned char d2[2] = {0xc3, 0x97};
const unsigned char d3[3] = {0xe2, 0x99, 0xa5};
const unsigned char d4[4] = {0xe1, 0x80, 0xb9, 0x32};
const unsigned char d5[48] = {
0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
struct Case {
uint32 hash32;
uint64 hash64;
const unsigned char* data;
size_t size;
uint32 seed;
};
for (Case c : std::vector<Case>{
{0x471a8188u, 0x4c61ea3eeda4cb87ull, nullptr, 0, 0xbc9f1d34},
{0xd615eba5u, 0x091309f7ef916c8aull, d1, sizeof(d1), 0xbc9f1d34},
{0x0c3cccdau, 0xa815bcdf1d1af01cull, d2, sizeof(d2), 0xbc9f1d34},
{0x3ba37e0eu, 0x02167564e4d06430ull, d3, sizeof(d3), 0xbc9f1d34},
{0x16174eb3u, 0x8f7ed82ffc21071full, d4, sizeof(d4), 0xbc9f1d34},
{0x98b1926cu, 0xce196580c97aff1eull, d5, sizeof(d5), 0x12345678},
}) {
EXPECT_EQ(c.hash32,
Hash32(reinterpret_cast<const char*>(c.data), c.size, c.seed));
EXPECT_EQ(c.hash64,
Hash64(reinterpret_cast<const char*>(c.data), c.size, c.seed));
for (int align = 1; align <= 7; align++) {
std::string input(align, 'x');
input.append(reinterpret_cast<const char*>(c.data), c.size);
EXPECT_EQ(c.hash32, Hash32(&input[align], c.size, c.seed));
EXPECT_EQ(c.hash64, Hash64(&input[align], c.size, c.seed));
}
}
}
TEST(Hash, HashPtrIsNotIdentityFunction) {
int* ptr = reinterpret_cast<int*>(0xcafe0000);
EXPECT_NE(hash<int*>()(ptr), size_t{0xcafe0000});
}
static void BM_Hash32(::testing::benchmark::State& state) {
int len = state.range(0);
std::string input(len, 'x');
uint32 h = 0;
for (auto s : state) {
h = Hash32(input.data(), len, 1);
}
state.SetBytesProcessed(state.iterations() * len);
VLOG(1) << h;
}
BENCHMARK(BM_Hash32)->Range(1, 1024);
TEST(StringPieceHasher, Equality) {
StringPieceHasher hasher;
absl::string_view s1("foo");
absl::string_view s2("bar");
absl::string_view s3("baz");
absl::string_view s4("zot");
EXPECT_TRUE(hasher(s1) != hasher(s2));
EXPECT_TRUE(hasher(s1) != hasher(s3));
EXPECT_TRUE(hasher(s1) != hasher(s4));
EXPECT_TRUE(hasher(s2) != hasher(s3));
EXPECT_TRUE(hasher(s2) != hasher(s4));
EXPECT_TRUE(hasher(s3) != hasher(s4));
EXPECT_TRUE(hasher(s1) == hasher(s1));
EXPECT_TRUE(hasher(s2) == hasher(s2));
EXPECT_TRUE(hasher(s3) == hasher(s3));
EXPECT_TRUE(hasher(s4) == hasher(s4));
}
TEST(StringPieceHasher, HashMap) {
string s1("foo");
string s2("bar");
string s3("baz");
absl::string_view p1(s1);
absl::string_view p2(s2);
absl::string_view p3(s3);
std::unordered_map<absl::string_view, int, StringPieceHasher> map;
map.insert(std::make_pair(p1, 0));
map.insert(std::make_pair(p2, 1));
map.insert(std::make_pair(p3, 2));
EXPECT_EQ(map.size(), 3);
bool found[3] = {false, false, false};
for (auto const& val : map) {
int x = val.second;
EXPECT_TRUE(x >= 0 && x < 3);
EXPECT_TRUE(!found[x]);
found[x] = true;
}
EXPECT_EQ(found[0], true);
EXPECT_EQ(found[1], true);
EXPECT_EQ(found[2], true);
auto new_iter = map.find("zot");
EXPECT_TRUE(new_iter == map.end());
new_iter = map.find("bar");
EXPECT_TRUE(new_iter != map.end());
map.erase(new_iter);
EXPECT_EQ(map.size(), 2);
found[0] = false;
found[1] = false;
found[2] = false;
for (const auto& iter : map) {
int x = iter.second;
EXPECT_TRUE(x >= 0 && x < 3);
EXPECT_TRUE(!found[x]);
found[x] = true;
}
EXPECT_EQ(found[0], true);
EXPECT_EQ(found[1], false);
EXPECT_EQ(found[2], true);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/hash.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/hash_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f8e0291d-db3d-484b-b823-43c245721dd8 | cpp | tensorflow/tensorflow | resource_loader | third_party/xla/third_party/tsl/tsl/platform/resource_loader.cc | tensorflow/core/platform/resource_loader_test.cc | #include "tsl/platform/resource_loader.h"
#include <cstdlib>
#include <string>
#include "tsl/platform/logging.h"
#include "tsl/platform/path.h"
#include "tsl/platform/platform.h"
#include "tsl/platform/test.h"
namespace tsl {
std::string GetDataDependencyFilepath(const std::string& relative_path) {
const char* srcdir = std::getenv("TEST_SRCDIR");
if (!srcdir) {
LOG(FATAL) << "Environment variable TEST_SRCDIR unset!";
}
const char* workspace = std::getenv("TEST_WORKSPACE");
if (!workspace) {
LOG(FATAL) << "Environment variable TEST_WORKSPACE unset!";
}
return kIsOpenSource
? io::JoinPath(srcdir, workspace, relative_path)
: io::JoinPath(srcdir, workspace, "third_party", relative_path);
}
} | #include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
string DataDependencyPath() {
return io::JoinPath("tensorflow", "core", "platform", "resource_loader.h");
}
TEST(ResourceLoaderTest, FindsAndOpensFile) {
string filepath = GetDataDependencyFilepath(DataDependencyPath());
Status s = Env::Default()->FileExists(filepath);
EXPECT_TRUE(s.ok()) << "No file found at this location: " << filepath;
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/resource_loader.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/resource_loader_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
41a0de0d-5b77-42f1-b0a6-d12353b7ed28 | cpp | tensorflow/tensorflow | status_matchers | third_party/xla/third_party/tsl/tsl/platform/status_matchers.cc | third_party/xla/third_party/tsl/tsl/platform/status_matchers_test.cc | #include "tsl/platform/status_matchers.h"
#include <ostream>
#include <string>
#include "tsl/platform/status.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tsl {
namespace testing {
namespace internal_status {
void StatusIsMatcherCommonImpl::DescribeTo(std::ostream* os) const {
*os << "has a status code that ";
code_matcher_.DescribeTo(os);
*os << ", and has an error message that ";
message_matcher_.DescribeTo(os);
}
void StatusIsMatcherCommonImpl::DescribeNegationTo(std::ostream* os) const {
*os << "has a status code that ";
code_matcher_.DescribeNegationTo(os);
*os << ", or has an error message that ";
message_matcher_.DescribeNegationTo(os);
}
bool StatusIsMatcherCommonImpl::MatchAndExplain(
const absl::Status& status,
::testing::MatchResultListener* result_listener) const {
::testing::StringMatchResultListener inner_listener;
inner_listener.Clear();
if (!code_matcher_.MatchAndExplain(
static_cast<absl::StatusCode>(status.code()), &inner_listener)) {
*result_listener << (inner_listener.str().empty()
? "whose status code is wrong"
: "which has a status code " +
inner_listener.str());
return false;
}
if (!message_matcher_.Matches(std::string(status.message()))) {
*result_listener << "whose error message is wrong";
return false;
}
return true;
}
}
}
} | #include "tsl/platform/status_matchers.h"
#include <sstream>
#include <string>
#include <vector>
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tsl {
namespace testing {
namespace {
using ::testing::_;
using ::testing::ElementsAre;
using ::testing::HasSubstr;
using ::testing::Matcher;
using ::testing::MatchesRegex;
using ::testing::Ne;
using ::testing::Not;
using ::testing::PrintToString;
MATCHER_P(LessThan, upper, "") {
if (arg < upper) {
*result_listener << "which is " << (upper - arg) << " less than " << upper;
return true;
}
*result_listener << "which is " << (arg - upper) << " more than " << upper;
return false;
}
template <typename T>
std::string Describe(const Matcher<T>& matcher) {
std::stringstream ss;
matcher.DescribeTo(&ss);
return ss.str();
}
template <typename T>
std::string DescribeNegation(const Matcher<T>& matcher) {
std::stringstream ss;
matcher.DescribeNegationTo(&ss);
return ss.str();
}
template <typename T, typename V>
std::string ExplainMatch(const Matcher<T>& matcher, const V& value) {
::testing::StringMatchResultListener listener;
matcher.MatchAndExplain(value, &listener);
return listener.str();
}
TEST(IsOkAndHoldsTest, MatchesValue) {
absl::StatusOr<std::string> status_or_message("Hello, world");
EXPECT_THAT(status_or_message, IsOkAndHolds("Hello, world"));
EXPECT_THAT(status_or_message, IsOkAndHolds(HasSubstr("Hello,")));
}
TEST(IsOkAndHoldsTest, MatchesContainer) {
absl::StatusOr<std::vector<std::string>> status_or_messages =
std::vector<std::string>{"Hello, world", "Hello, tf"};
EXPECT_THAT(status_or_messages,
IsOkAndHolds(ElementsAre("Hello, world", "Hello, tf")));
EXPECT_THAT(status_or_messages,
IsOkAndHolds(ElementsAre(HasSubstr("world"), HasSubstr("tf"))));
}
TEST(IsOkAndHoldsTest, DoesNotMatchStatus) {
absl::StatusOr<std::string> status_or_message =
errors::InvalidArgument("Invalid argument");
EXPECT_THAT(status_or_message, Not(IsOkAndHolds("Hello, world")));
}
TEST(IsOkAndHoldsTest, DoesNotMatchValue) {
absl::StatusOr<std::string> status_or_message("Hello, tf");
EXPECT_THAT(status_or_message, Not(IsOkAndHolds("Hello, world")));
}
TEST(IsOkAndHoldsTest, DoesNotMatchContainer) {
absl::StatusOr<std::vector<int>> status_or_container({1, 2, 3});
EXPECT_THAT(status_or_container, Not(IsOkAndHolds(ElementsAre(4, 5, 6))));
}
TEST(IsOkAndHoldsTest, DescribeExpectedValue) {
Matcher<absl::StatusOr<std::string>> is_ok_and_has_substr =
IsOkAndHolds(HasSubstr("Hello"));
EXPECT_EQ(Describe(is_ok_and_has_substr),
"is OK and has a value that has substring \"Hello\"");
EXPECT_EQ(DescribeNegation(is_ok_and_has_substr),
"isn't OK or has a value that has no substring \"Hello\"");
}
TEST(IsOkAndHoldsTest, ExplainNotMatchingStatus) {
Matcher<absl::StatusOr<int>> is_ok_and_less_than =
IsOkAndHolds(LessThan(100));
absl::StatusOr<int> status = errors::Unknown("Unknown");
EXPECT_THAT(ExplainMatch(is_ok_and_less_than, status),
HasSubstr("which has status UNKNOWN: Unknown"));
}
TEST(IsOkAndHoldsTest, ExplainNotMatchingValue) {
Matcher<absl::StatusOr<int>> is_ok_and_less_than =
IsOkAndHolds(LessThan(100));
EXPECT_EQ(ExplainMatch(is_ok_and_less_than, 120),
"which contains value 120, which is 20 more than 100");
}
TEST(IsOkAndHoldsTest, ExplainNotMatchingContainer) {
Matcher<absl::StatusOr<std::vector<int>>> is_ok_and_less_than =
IsOkAndHolds(ElementsAre(1, 2, 3));
std::vector<int> actual{4, 5, 6};
EXPECT_THAT(ExplainMatch(is_ok_and_less_than, actual),
HasSubstr("which contains value " + PrintToString(actual)));
}
TEST(StatusIsTest, MatchesOK) {
EXPECT_THAT(absl::OkStatus(), StatusIs(error::OK));
absl::StatusOr<std::string> message("Hello, world");
EXPECT_THAT(message, StatusIs(error::OK));
}
TEST(StatusIsTest, DoesNotMatchOk) {
EXPECT_THAT(errors::DeadlineExceeded("Deadline exceeded"),
Not(StatusIs(error::OK)));
absl::StatusOr<std::string> status = errors::NotFound("Not found");
EXPECT_THAT(status, Not(StatusIs(error::OK)));
}
TEST(StatusIsTest, MatchesStatus) {
absl::Status s = errors::Cancelled("Cancelled");
EXPECT_THAT(s, StatusIs(error::CANCELLED));
EXPECT_THAT(s, StatusIs(error::CANCELLED, "Cancelled"));
EXPECT_THAT(s, StatusIs(_, "Cancelled"));
EXPECT_THAT(s, StatusIs(error::CANCELLED, _));
EXPECT_THAT(s, StatusIs(Ne(error::INVALID_ARGUMENT), _));
EXPECT_THAT(s, StatusIs(error::CANCELLED, HasSubstr("Can")));
EXPECT_THAT(s, StatusIs(error::CANCELLED, MatchesRegex("Can.*")));
}
TEST(StatusIsTest, StatusOrMatchesStatus) {
absl::StatusOr<int> s = errors::InvalidArgument("Invalid Argument");
EXPECT_THAT(s, StatusIs(error::INVALID_ARGUMENT));
EXPECT_THAT(s, StatusIs(error::INVALID_ARGUMENT, "Invalid Argument"));
EXPECT_THAT(s, StatusIs(_, "Invalid Argument"));
EXPECT_THAT(s, StatusIs(error::INVALID_ARGUMENT, _));
EXPECT_THAT(s, StatusIs(Ne(error::CANCELLED), _));
EXPECT_THAT(s, StatusIs(error::INVALID_ARGUMENT, HasSubstr("Argument")));
EXPECT_THAT(s, StatusIs(error::INVALID_ARGUMENT, MatchesRegex(".*Argument")));
}
TEST(StatusIsTest, DoesNotMatchStatus) {
absl::Status s = errors::Internal("Internal");
EXPECT_THAT(s, Not(StatusIs(error::FAILED_PRECONDITION)));
EXPECT_THAT(s, Not(StatusIs(error::INTERNAL, "Failed Precondition")));
EXPECT_THAT(s, Not(StatusIs(_, "Failed Precondition")));
EXPECT_THAT(s, Not(StatusIs(error::FAILED_PRECONDITION, _)));
}
TEST(StatusIsTest, StatusOrDoesNotMatchStatus) {
absl::StatusOr<int> s = errors::FailedPrecondition("Failed Precondition");
EXPECT_THAT(s, Not(StatusIs(error::INTERNAL)));
EXPECT_THAT(s, Not(StatusIs(error::FAILED_PRECONDITION, "Internal")));
EXPECT_THAT(s, Not(StatusIs(_, "Internal")));
EXPECT_THAT(s, Not(StatusIs(error::INTERNAL, _)));
}
TEST(StatusIsTest, DescribeExpectedValue) {
Matcher<absl::Status> status_is =
StatusIs(error::UNAVAILABLE, std::string("Unavailable"));
EXPECT_EQ(Describe(status_is),
"has a status code that is equal to UNAVAILABLE, "
"and has an error message that is equal to \"Unavailable\"");
}
TEST(StatusIsTest, DescribeNegatedExpectedValue) {
Matcher<absl::StatusOr<std::string>> status_is =
StatusIs(error::ABORTED, std::string("Aborted"));
EXPECT_EQ(DescribeNegation(status_is),
"has a status code that isn't equal to ABORTED, "
"or has an error message that isn't equal to \"Aborted\"");
}
TEST(StatusIsTest, ExplainNotMatchingErrorCode) {
Matcher<absl::Status> status_is = StatusIs(error::NOT_FOUND, _);
const absl::Status status = errors::AlreadyExists("Already exists");
EXPECT_EQ(ExplainMatch(status_is, status), "whose status code is wrong");
}
TEST(StatusIsTest, ExplainNotMatchingErrorMessage) {
Matcher<absl::Status> status_is = StatusIs(error::NOT_FOUND, "Not found");
const absl::Status status = errors::NotFound("Already exists");
EXPECT_EQ(ExplainMatch(status_is, status), "whose error message is wrong");
}
TEST(StatusIsTest, ExplainStatusOrNotMatchingErrorCode) {
Matcher<absl::StatusOr<int>> status_is = StatusIs(error::ALREADY_EXISTS, _);
const absl::StatusOr<int> status_or = errors::NotFound("Not found");
EXPECT_EQ(ExplainMatch(status_is, status_or), "whose status code is wrong");
}
TEST(StatusIsTest, ExplainStatusOrNotMatchingErrorMessage) {
Matcher<absl::StatusOr<int>> status_is =
StatusIs(error::ALREADY_EXISTS, "Already exists");
const absl::StatusOr<int> status_or = errors::AlreadyExists("Not found");
EXPECT_EQ(ExplainMatch(status_is, status_or), "whose error message is wrong");
}
TEST(StatusIsTest, ExplainStatusOrHasValue) {
Matcher<absl::StatusOr<int>> status_is =
StatusIs(error::RESOURCE_EXHAUSTED, "Resource exhausted");
const absl::StatusOr<int> value = -1;
EXPECT_EQ(ExplainMatch(status_is, value), "whose status code is wrong");
}
TEST(IsOkTest, MatchesOK) {
EXPECT_THAT(absl::OkStatus(), IsOk());
absl::StatusOr<std::string> message = std::string("Hello, world");
EXPECT_THAT(message, IsOk());
}
TEST(IsOkTest, DoesNotMatchOK) {
EXPECT_THAT(errors::PermissionDenied("Permission denied"), Not(IsOk()));
absl::StatusOr<std::string> status =
errors::Unauthenticated("Unauthenticated");
EXPECT_THAT(status, Not(IsOk()));
}
TEST(IsOkTest, DescribeExpectedValue) {
Matcher<absl::Status> status_is_ok = IsOk();
EXPECT_EQ(Describe(status_is_ok), "is OK");
Matcher<absl::StatusOr<std::string>> status_or_is_ok = IsOk();
EXPECT_EQ(Describe(status_or_is_ok), "is OK");
}
TEST(IsOkTest, DescribeNegatedExpectedValue) {
Matcher<absl::Status> status_is_ok = IsOk();
EXPECT_EQ(DescribeNegation(status_is_ok), "is not OK");
Matcher<absl::StatusOr<std::string>> status_or_is_ok = IsOk();
EXPECT_EQ(DescribeNegation(status_or_is_ok), "is not OK");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/status_matchers.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/status_matchers_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4daa7986-e5a3-4e28-88cf-081ff1871723 | cpp | tensorflow/tensorflow | setround | third_party/xla/third_party/tsl/tsl/platform/setround.cc | third_party/xla/third_party/tsl/tsl/platform/setround_test.cc | #include "tsl/platform/setround.h"
#include "tsl/platform/logging.h"
namespace tsl {
namespace port {
#if defined(TF_BROKEN_CFENV)
ScopedSetRound::ScopedSetRound(const int mode) : original_mode_(mode) {
DCHECK_EQ(mode, FE_TONEAREST);
}
ScopedSetRound::~ScopedSetRound() {}
#else
ScopedSetRound::ScopedSetRound(const int mode) {
original_mode_ = std::fegetround();
if (original_mode_ < 0) {
original_mode_ = FE_TONEAREST;
}
std::fesetround(mode);
}
ScopedSetRound::~ScopedSetRound() { std::fesetround(original_mode_); }
#endif
}
} | #include "tsl/platform/setround.h"
#include <cmath>
#include "tsl/platform/test.h"
#if !defined(__clang__) || !defined(__OPTIMIZE__)
namespace tsl {
namespace {
void CheckDownward() {
EXPECT_EQ(12, std::nearbyint(12.0));
EXPECT_EQ(12, std::nearbyint(12.1));
EXPECT_EQ(-13, std::nearbyint(-12.1));
EXPECT_EQ(12, std::nearbyint(12.5));
EXPECT_EQ(12, std::nearbyint(12.9));
EXPECT_EQ(-13, std::nearbyint(-12.9));
EXPECT_EQ(13, std::nearbyint(13.0));
}
void CheckToNearest() {
EXPECT_EQ(12, std::nearbyint(12.0));
EXPECT_EQ(12, std::nearbyint(12.1));
EXPECT_EQ(-12, std::nearbyint(-12.1));
EXPECT_EQ(12, std::nearbyint(12.5));
EXPECT_EQ(13, std::nearbyint(12.9));
EXPECT_EQ(-13, std::nearbyint(-12.9));
EXPECT_EQ(13, std::nearbyint(13.0));
}
void CheckTowardZero() {
EXPECT_EQ(12, std::nearbyint(12.0));
EXPECT_EQ(12, std::nearbyint(12.1));
EXPECT_EQ(-12, std::nearbyint(-12.1));
EXPECT_EQ(12, std::nearbyint(12.5));
EXPECT_EQ(12, std::nearbyint(12.9));
EXPECT_EQ(-12, std::nearbyint(-12.9));
EXPECT_EQ(13, std::nearbyint(13.0));
}
void CheckUpward() {
EXPECT_EQ(12, std::nearbyint(12.0));
EXPECT_EQ(13, std::nearbyint(12.1));
EXPECT_EQ(-12, std::nearbyint(-12.1));
EXPECT_EQ(13, std::nearbyint(12.5));
EXPECT_EQ(13, std::nearbyint(12.9));
EXPECT_EQ(-12, std::nearbyint(-12.9));
EXPECT_EQ(13, std::nearbyint(13.0));
}
TEST(SetScopedSetRound, Downward) {
port::ScopedSetRound round(FE_DOWNWARD);
CheckDownward();
}
TEST(SetScopedSetRound, ToNearest) {
port::ScopedSetRound round(FE_TONEAREST);
CheckToNearest();
}
TEST(SetScopedSetRound, TowardZero) {
port::ScopedSetRound round(FE_TOWARDZERO);
CheckTowardZero();
}
TEST(SetScopedSetRound, Upward) {
port::ScopedSetRound round(FE_UPWARD);
CheckUpward();
}
TEST(SetScopedSetRound, Scoped) {
std::fesetround(FE_TONEAREST);
CheckToNearest();
{
port::ScopedSetRound round(FE_UPWARD);
CheckUpward();
}
CheckToNearest();
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/setround.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/setround_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
38a634d6-0df3-4df7-8bf8-cc745ad43372 | cpp | tensorflow/tensorflow | abi | third_party/xla/third_party/tsl/tsl/platform/abi.cc | third_party/xla/third_party/tsl/tsl/platform/abi_test.cc | #include "tsl/platform/abi.h"
#include "tsl/platform/types.h"
#if defined(_MSC_VER)
#include <windows.h>
#include <cstring>
#else
#include <cxxabi.h>
#include <cstdlib>
#endif
#include <memory>
#include <string>
#if defined(_MSC_VER)
extern "C" char* __unDName(char* output_string, const char* name,
int max_string_length, void* (*p_alloc)(std::size_t),
void (*p_free)(void*), unsigned short disable_flags);
#endif
namespace tsl {
namespace port {
string MaybeAbiDemangle(const char* name) {
#if defined(_MSC_VER)
std::unique_ptr<char> demangled{__unDName(nullptr, name, 0, std::malloc,
std::free,
static_cast<unsigned short>(0))};
return string(demangled.get() != nullptr ? demangled.get() : name);
#else
int status = 0;
std::unique_ptr<char, void (*)(void*)> res{
abi::__cxa_demangle(name, nullptr, nullptr, &status), std::free};
return (status == 0) ? res.get() : name;
#endif
}
}
} | #include "tsl/platform/abi.h"
#include <typeinfo>
#include "tsl/platform/test.h"
namespace tsl {
struct MyRandomPODType {};
TEST(AbiTest, AbiDemangleTest) {
EXPECT_EQ(port::MaybeAbiDemangle(typeid(int).name()), "int");
#ifdef PLATFORM_WINDOWS
const char pod_type_name[] = "struct tsl::MyRandomPODType";
#else
const char pod_type_name[] = "tsl::MyRandomPODType";
#endif
EXPECT_EQ(port::MaybeAbiDemangle(typeid(MyRandomPODType).name()),
pod_type_name);
EXPECT_EQ(
port::MaybeAbiDemangle("help! i'm caught in a C++ mangle factoryasdf"),
"help! i'm caught in a C++ mangle factoryasdf");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/abi.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/abi_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7fc752cb-3193-459f-b0df-3863c1908434 | cpp | tensorflow/tensorflow | threadpool | third_party/xla/third_party/tsl/tsl/platform/threadpool.cc | tensorflow/core/lib/core/threadpool_test.cc | #include "tsl/platform/threadpool.h"
#define EIGEN_USE_THREADS
#include "absl/types/optional.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tsl/platform/blocking_counter.h"
#include "tsl/platform/context.h"
#include "tsl/platform/denormal.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/numa.h"
#include "tsl/platform/setround.h"
#include "tsl/platform/tracing.h"
#ifdef DNNL_AARCH64_USE_ACL
#include "tsl/platform/cpu_info.h"
#endif
#ifdef TENSORFLOW_THREADSCALING_EXPERIMENTAL
ABSL_FLAG(float, tensorflow_num_threads_scale_factor, 1.0,
"Allows to scale all Tensorflow ThreadPools. Total number of threads "
"in a given ThreadPool equals to num_threads * "
"tensorflow_num_threads_scale_factor. Default scale factor of 1 is a "
"no-op.");
#endif
namespace tsl {
namespace thread {
struct EigenEnvironment {
typedef Thread EnvThread;
struct TaskImpl {
std::function<void()> f;
Context context;
uint64 trace_id;
};
struct Task {
std::unique_ptr<TaskImpl> f;
};
Env* const env_;
const ThreadOptions thread_options_;
const string name_;
EigenEnvironment(Env* env, const ThreadOptions& thread_options,
const string& name)
: env_(env), thread_options_(thread_options), name_(name) {}
EnvThread* CreateThread(std::function<void()> f) {
return env_->StartThread(thread_options_, name_, [=]() {
port::ScopedFlushDenormal flush;
tsl::port::ScopedSetRound round(FE_TONEAREST);
if (thread_options_.numa_node != port::kNUMANoAffinity) {
port::NUMASetThreadNodeAffinity(thread_options_.numa_node);
}
f();
});
}
Task CreateTask(std::function<void()> f) {
uint64 id = 0;
if (tracing::EventCollector::IsEnabled()) {
id = tracing::GetUniqueArg();
tracing::RecordEvent(tracing::EventCategory::kScheduleClosure, id);
}
return Task{
std::unique_ptr<TaskImpl>(new TaskImpl{
std::move(f),
Context(ContextKind::kThread),
id,
}),
};
}
void ExecuteTask(const Task& t) {
WithContext wc(t.f->context);
tracing::ScopedRegion region(tracing::EventCategory::kRunClosure,
t.f->trace_id);
t.f->f();
}
};
ThreadPool::ThreadPool(Env* env, const string& name, int num_threads)
: ThreadPool(env, ThreadOptions(), name, num_threads, true, nullptr) {}
ThreadPool::ThreadPool(Env* env, const ThreadOptions& thread_options,
const string& name, int num_threads)
: ThreadPool(env, thread_options, name, num_threads, true, nullptr) {}
ThreadPool::ThreadPool(Env* env, const ThreadOptions& thread_options,
const string& name, int num_threads,
bool low_latency_hint, Eigen::Allocator* allocator) {
CHECK_GE(num_threads, 1);
#ifdef DNNL_AARCH64_USE_ACL
if (num_threads == tsl::port::NumTotalCPUs() && num_threads >= 16) {
num_threads = num_threads - 1;
}
#endif
#ifdef TENSORFLOW_THREADSCALING_EXPERIMENTAL
CHECK_GT(absl::GetFlag(FLAGS_tensorflow_num_threads_scale_factor), 0);
num_threads *= absl::GetFlag(FLAGS_tensorflow_num_threads_scale_factor);
if (num_threads < 1) num_threads = 1;
#endif
eigen_threadpool_.reset(new Eigen::ThreadPoolTempl<EigenEnvironment>(
num_threads, low_latency_hint,
EigenEnvironment(env, thread_options, "tf_" + name)));
underlying_threadpool_ = eigen_threadpool_.get();
threadpool_device_.reset(new Eigen::ThreadPoolDevice(underlying_threadpool_,
num_threads, allocator));
}
ThreadPool::ThreadPool(thread::ThreadPoolInterface* user_threadpool) {
underlying_threadpool_ = user_threadpool;
threadpool_device_.reset(new Eigen::ThreadPoolDevice(
underlying_threadpool_, underlying_threadpool_->NumThreads(), nullptr));
}
ThreadPool::~ThreadPool() {}
void ThreadPool::Schedule(std::function<void()> fn) {
CHECK(fn != nullptr);
underlying_threadpool_->Schedule(std::move(fn));
}
int ThreadPool::NumShardsUsedByFixedBlockSizeScheduling(
const int64_t total, const int64_t block_size) {
if (block_size <= 0 || total <= 1 || total <= block_size ||
NumThreads() == 1) {
return 1;
}
return (total + block_size - 1) / block_size;
}
int ThreadPool::NumShardsUsedByTransformRangeConcurrently(
const int64_t block_size, const int64_t total) {
return NumShardsUsedByFixedBlockSizeScheduling(total, block_size);
}
void ThreadPool::ParallelFor(int64_t total,
const SchedulingParams& scheduling_params,
const std::function<void(int64_t, int64_t)>& fn) {
switch (scheduling_params.strategy()) {
case SchedulingStrategy::kAdaptive: {
if (scheduling_params.cost_per_unit().has_value()) {
ParallelFor(total, *scheduling_params.cost_per_unit(), fn);
}
break;
}
case SchedulingStrategy::kFixedBlockSize: {
if (scheduling_params.block_size().has_value()) {
ParallelForFixedBlockSizeScheduling(
total, *scheduling_params.block_size(), fn);
}
break;
}
}
}
void ThreadPool::TransformRangeConcurrently(
const int64_t block_size, const int64_t total,
const std::function<void(int64_t, int64_t)>& fn) {
ParallelFor(total,
SchedulingParams(SchedulingStrategy::kFixedBlockSize,
absl::nullopt , block_size),
fn);
}
void ThreadPool::ParallelForFixedBlockSizeScheduling(
const int64_t total, const int64_t block_size,
const std::function<void(int64_t, int64_t)>& fn) {
const int num_shards_used =
NumShardsUsedByFixedBlockSizeScheduling(total, block_size);
if (num_shards_used == 1) {
fn(0, total);
return;
}
BlockingCounter counter(num_shards_used);
std::function<void(int64_t, int64_t)> handle_range =
[=, &handle_range, &counter, &fn](int64_t first, int64_t last) {
while (last - first > block_size) {
const int64_t mid = first + ((last - first) / 2 + block_size - 1) /
block_size * block_size;
Schedule([=, &handle_range]() { handle_range(mid, last); });
last = mid;
}
fn(first, last);
counter.DecrementCount();
};
if (num_shards_used <= NumThreads()) {
handle_range(0, total);
} else {
Schedule([=, &handle_range]() { handle_range(0, total); });
}
counter.Wait();
}
void ThreadPool::ParallelFor(int64_t total, int64_t cost_per_unit,
const std::function<void(int64_t, int64_t)>& fn) {
CHECK_GE(total, 0);
CHECK_EQ(total, (int64_t)(Eigen::Index)total);
threadpool_device_->parallelFor(
total, Eigen::TensorOpCost(0, 0, cost_per_unit),
[&fn](Eigen::Index first, Eigen::Index last) { fn(first, last); });
}
void ThreadPool::ParallelForWithWorkerId(
int64_t total, int64_t cost_per_unit,
const std::function<void(int64_t, int64_t, int)>& fn) {
CHECK_GE(total, 0);
CHECK_EQ(total, (int64_t)(Eigen::Index)total);
threadpool_device_->parallelFor(total,
Eigen::TensorOpCost(0, 0, cost_per_unit),
[this, &fn](int64_t start, int64_t limit) {
int id = CurrentThreadId() + 1;
fn(start, limit, id);
});
}
void ThreadPool::ParallelForWithWorkerId(
int64_t total, const SchedulingParams& scheduling_params,
const std::function<void(int64_t, int64_t, int)>& fn) {
ParallelFor(total, scheduling_params,
[this, &fn](int64_t start, int64_t limit) {
int id = CurrentThreadId() + 1;
fn(start, limit, id);
});
}
int ThreadPool::NumThreads() const {
return underlying_threadpool_->NumThreads();
}
int ThreadPool::CurrentThreadId() const {
return underlying_threadpool_->CurrentThreadId();
}
void ThreadPool::ScheduleWithHint(std::function<void()> fn, int start,
int limit) {
underlying_threadpool_->ScheduleWithHint(std::move(fn), start, limit);
}
void ThreadPool::SetStealPartitions(
const std::vector<std::pair<unsigned, unsigned>>& partitions) {
DCHECK(eigen_threadpool_ != nullptr);
eigen_threadpool_->SetStealPartitions(partitions);
}
Eigen::ThreadPoolInterface* ThreadPool::AsEigenThreadPool() const {
DCHECK(underlying_threadpool_ != nullptr);
return underlying_threadpool_;
}
}
} | #include "tensorflow/core/lib/core/threadpool.h"
#include <atomic>
#include <optional>
#include "absl/synchronization/barrier.h"
#include "absl/synchronization/blocking_counter.h"
#include "absl/types/optional.h"
#include "tensorflow/core/platform/context.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace thread {
static const int kNumThreads = 30;
TEST(ThreadPool, Empty) {
for (int num_threads = 1; num_threads < kNumThreads; num_threads++) {
fprintf(stderr, "Testing with %d threads\n", num_threads);
ThreadPool pool(Env::Default(), "test", num_threads);
}
}
TEST(ThreadPool, DoWork) {
Context outer_context(ContextKind::kThread);
for (int num_threads = 1; num_threads < kNumThreads; num_threads++) {
fprintf(stderr, "Testing with %d threads\n", num_threads);
const int kWorkItems = 15;
std::atomic<bool> work[kWorkItems];
for (int i = 0; i < kWorkItems; i++) {
work[i] = false;
}
{
ThreadPool pool(Env::Default(), "test", num_threads);
for (int i = 0; i < kWorkItems; i++) {
pool.Schedule([&outer_context, &work, i]() {
Context inner_context(ContextKind::kThread);
ASSERT_EQ(outer_context, inner_context);
ASSERT_FALSE(work[i].exchange(true));
});
}
}
for (int i = 0; i < kWorkItems; i++) {
ASSERT_TRUE(work[i]);
}
}
}
void RunWithFixedBlockSize(int64_t block_size, int64_t total,
ThreadPool* threads) {
mutex mu;
int64_t num_shards = 0;
int64_t num_done_work = 0;
std::vector<std::atomic<bool>> work(total);
for (int i = 0; i < total; i++) {
work[i] = false;
}
threads->ParallelFor(
total,
ThreadPool::SchedulingParams(
ThreadPool::SchedulingStrategy::kFixedBlockSize ,
std::nullopt , block_size ),
[=, &mu, &num_shards, &num_done_work, &work](int64_t start, int64_t end) {
VLOG(1) << "Shard [" << start << "," << end << ")";
EXPECT_GE(start, 0);
EXPECT_LE(end, total);
mutex_lock l(mu);
++num_shards;
for (; start < end; ++start) {
EXPECT_FALSE(work[start].exchange(true));
++num_done_work;
}
});
EXPECT_EQ(num_done_work, total);
for (int i = 0; i < total; i++) {
ASSERT_TRUE(work[i]);
}
const int64_t num_workers = (total + block_size - 1) / block_size;
if (num_workers < threads->NumThreads()) {
EXPECT_LE(num_shards, 1 + num_workers);
}
}
TEST(ThreadPoolTest, ParallelForFixedBlockSizeScheduling) {
ThreadPool threads(Env::Default(), "test", 16);
for (auto block_size : {1, 7, 10, 64, 100, 256, 1000, 9999}) {
for (auto diff : {0, 1, 11, 102, 1003, 10005, 1000007}) {
const int64_t total = block_size + diff;
RunWithFixedBlockSize(block_size, total, &threads);
}
}
}
void RunWithFixedBlockSizeTransformRangeConcurrently(int64_t block_size,
int64_t total,
ThreadPool* threads) {
mutex mu;
int64_t num_shards = 0;
int64_t num_done_work = 0;
std::vector<std::atomic<bool>> work(total);
for (int i = 0; i < total; i++) {
work[i] = false;
}
threads->TransformRangeConcurrently(
block_size, total,
[=, &mu, &num_shards, &num_done_work, &work](int64_t start, int64_t end) {
VLOG(1) << "Shard [" << start << "," << end << ")";
EXPECT_GE(start, 0);
EXPECT_LE(end, total);
mutex_lock l(mu);
++num_shards;
for (; start < end; ++start) {
EXPECT_FALSE(work[start].exchange(true));
++num_done_work;
}
});
EXPECT_EQ(num_done_work, total);
for (int i = 0; i < total; i++) {
ASSERT_TRUE(work[i]);
}
const int64_t num_workers = (total + block_size - 1) / block_size;
if (num_workers < threads->NumThreads()) {
EXPECT_LE(num_shards, 1 + num_workers);
}
}
TEST(ThreadPoolTest, TransformRangeConcurrently) {
ThreadPool threads(Env::Default(), "test", 16);
for (auto block_size : {1, 7, 10, 64, 100, 256, 1000, 9999}) {
for (auto diff : {0, 1, 11, 102, 1003, 10005, 1000007}) {
const int64_t total = block_size + diff;
RunWithFixedBlockSizeTransformRangeConcurrently(block_size, total,
&threads);
}
}
}
TEST(ThreadPoolTest, NumShardsUsedByFixedBlockSizeScheduling) {
ThreadPool threads(Env::Default(), "test", 16);
EXPECT_EQ(1, threads.NumShardsUsedByFixedBlockSizeScheduling(
3 , 3 ));
EXPECT_EQ(2, threads.NumShardsUsedByFixedBlockSizeScheduling(
4 , 3 ));
EXPECT_EQ(2, threads.NumShardsUsedByFixedBlockSizeScheduling(
5 , 3 ));
EXPECT_EQ(2, threads.NumShardsUsedByFixedBlockSizeScheduling(
6 , 3 ));
EXPECT_EQ(3, threads.NumShardsUsedByFixedBlockSizeScheduling(
7 , 3 ));
EXPECT_EQ(7, threads.NumShardsUsedByFixedBlockSizeScheduling(
7 , 1 ));
EXPECT_EQ(1, threads.NumShardsUsedByFixedBlockSizeScheduling(
7 , 0 ));
}
TEST(ThreadPoolTest, NumShardsUsedByTransformRangeConcurrently) {
ThreadPool threads(Env::Default(), "test", 16);
EXPECT_EQ(1, threads.NumShardsUsedByTransformRangeConcurrently(
3 , 3 ));
EXPECT_EQ(2, threads.NumShardsUsedByTransformRangeConcurrently(
3 , 4 ));
EXPECT_EQ(2, threads.NumShardsUsedByTransformRangeConcurrently(
3 , 5 ));
EXPECT_EQ(2, threads.NumShardsUsedByTransformRangeConcurrently(
3 , 6 ));
EXPECT_EQ(3, threads.NumShardsUsedByTransformRangeConcurrently(
3 , 7 ));
EXPECT_EQ(7, threads.NumShardsUsedByTransformRangeConcurrently(
1 , 7 ));
EXPECT_EQ(1, threads.NumShardsUsedByTransformRangeConcurrently(
0 , 7 ));
}
void RunFixedBlockSizeShardingWithWorkerId(int64_t block_size, int64_t total,
ThreadPool* threads) {
mutex mu;
int64_t num_done_work = 0;
std::vector<std::atomic<bool>> work(total);
for (int i = 0; i < total; i++) {
work[i] = false;
}
const int64_t num_threads = threads->NumThreads();
std::vector<std::atomic<bool>> threads_running(num_threads + 1);
for (int i = 0; i < num_threads + 1; i++) {
threads_running[i] = false;
}
threads->ParallelForWithWorkerId(
total,
ThreadPool::SchedulingParams(
ThreadPool::SchedulingStrategy::kFixedBlockSize ,
std::nullopt , block_size ),
[=, &mu, &num_done_work, &work, &threads_running](int64_t start,
int64_t end, int id) {
VLOG(1) << "Shard [" << start << "," << end << ")";
EXPECT_GE(start, 0);
EXPECT_LE(end, total);
EXPECT_GE(id, 0);
EXPECT_LE(id, num_threads);
EXPECT_FALSE(threads_running[id].exchange(true));
mutex_lock l(mu);
for (; start < end; ++start) {
EXPECT_FALSE(work[start].exchange(true));
++num_done_work;
}
EXPECT_TRUE(threads_running[id].exchange(false));
});
EXPECT_EQ(num_done_work, total);
for (int i = 0; i < total; i++) {
EXPECT_TRUE(work[i]);
}
}
TEST(ThreadPoolTest, ParallelForFixedBlockSizeSchedulingWithWorkerId) {
for (int32_t num_threads : {1, 2, 3, 9, 16, 31}) {
ThreadPool threads(Env::Default(), "test", num_threads);
for (int64_t block_size : {1, 7, 10, 64, 100, 256, 1000}) {
for (int64_t diff : {0, 1, 11, 102, 1003}) {
const int64_t total = block_size + diff;
RunFixedBlockSizeShardingWithWorkerId(block_size, total, &threads);
}
}
}
}
TEST(ThreadPool, ParallelFor) {
Context outer_context(ContextKind::kThread);
int64_t kHugeCost = 1 << 30;
for (int num_threads = 1; num_threads < kNumThreads; num_threads++) {
fprintf(stderr, "Testing with %d threads\n", num_threads);
const int kWorkItems = 15;
std::atomic<bool> work[kWorkItems];
ThreadPool pool(Env::Default(), "test", num_threads);
for (int i = 0; i < kWorkItems; i++) {
work[i] = false;
}
pool.ParallelFor(kWorkItems, kHugeCost,
[&outer_context, &work](int64_t begin, int64_t end) {
Context inner_context(ContextKind::kThread);
ASSERT_EQ(outer_context, inner_context);
for (int64_t i = begin; i < end; ++i) {
ASSERT_FALSE(work[i].exchange(true));
}
});
for (int i = 0; i < kWorkItems; i++) {
ASSERT_TRUE(work[i]);
}
}
}
TEST(ThreadPool, ParallelForWithAdaptiveSchedulingStrategy) {
Context outer_context(ContextKind::kThread);
int64_t kHugeCost = 1 << 30;
for (int num_threads = 1; num_threads < kNumThreads; num_threads++) {
fprintf(stderr, "Testing with %d threads\n", num_threads);
const int kWorkItems = 15;
std::atomic<bool> work[kWorkItems];
ThreadPool pool(Env::Default(), "test", num_threads);
for (int i = 0; i < kWorkItems; i++) {
work[i] = false;
}
pool.ParallelFor(
kWorkItems,
ThreadPool::SchedulingParams(
ThreadPool::SchedulingStrategy::kAdaptive ,
kHugeCost , std::nullopt ),
[&outer_context, &work](int64_t begin, int64_t end) {
Context inner_context(ContextKind::kThread);
ASSERT_EQ(outer_context, inner_context);
for (int64_t i = begin; i < end; ++i) {
ASSERT_FALSE(work[i].exchange(true));
}
});
for (int i = 0; i < kWorkItems; i++) {
ASSERT_TRUE(work[i]);
}
}
}
TEST(ThreadPool, ParallelForWithWorkerId) {
int64_t kHugeCost = 1 << 30;
for (int num_threads = 1; num_threads < kNumThreads; num_threads++) {
fprintf(stderr, "Testing with %d threads\n", num_threads);
const int kWorkItems = 15;
std::atomic<bool> work[kWorkItems];
ThreadPool pool(Env::Default(), "test", num_threads);
for (int i = 0; i < kWorkItems; i++) {
work[i] = false;
}
std::atomic<bool> threads_running[kNumThreads + 1];
for (int i = 0; i < num_threads + 1; i++) {
threads_running[i] = false;
}
pool.ParallelForWithWorkerId(
kWorkItems, kHugeCost,
[&threads_running, &work](int64_t begin, int64_t end, int64_t id) {
ASSERT_LE(0, id);
ASSERT_LE(id, kNumThreads);
ASSERT_FALSE(threads_running[id].exchange(true));
for (int64_t i = begin; i < end; ++i) {
ASSERT_FALSE(work[i].exchange(true));
}
ASSERT_TRUE(threads_running[id].exchange(false));
threads_running[id] = false;
});
for (int i = 0; i < kWorkItems; i++) {
ASSERT_TRUE(work[i]);
}
for (int i = 0; i < num_threads + 1; i++) {
ASSERT_FALSE(threads_running[i]);
}
}
}
TEST(ThreadPool, Parallelism) {
ThreadPool pool(Env::Default(), "test", kNumThreads);
for (int iter = 0; iter < 2000; iter++) {
absl::Barrier barrier(kNumThreads);
absl::BlockingCounter counter(kNumThreads);
for (int t = 0; t < kNumThreads; ++t) {
pool.Schedule([&]() {
barrier.Block();
counter.DecrementCount();
});
}
counter.Wait();
}
}
static void BM_Sequential(::testing::benchmark::State& state) {
for (auto s : state) {
state.PauseTiming();
ThreadPool pool(Env::Default(), "test", kNumThreads);
int count = state.range(0);
mutex done_lock;
bool done_flag = false;
std::function<void()> work = [&pool, &count, &done_lock, &done_flag,
&work]() {
if (count--) {
pool.Schedule(work);
} else {
mutex_lock l(done_lock);
done_flag = true;
}
};
state.ResumeTiming();
work();
mutex_lock l(done_lock);
done_lock.Await(Condition(&done_flag));
}
}
BENCHMARK(BM_Sequential)->Arg(200)->Arg(300);
static void BM_Parallel(::testing::benchmark::State& state) {
ThreadPool pool(Env::Default(), "test", kNumThreads);
std::atomic_int_fast32_t count(state.max_iterations);
mutex done_lock;
bool done_flag = false;
for (auto s : state) {
pool.Schedule([&count, &done_lock, &done_flag]() {
if (count.fetch_sub(1) == 1) {
mutex_lock l(done_lock);
done_flag = true;
}
});
}
mutex_lock l(done_lock);
done_lock.Await(Condition(&done_flag));
}
BENCHMARK(BM_Parallel);
static void BM_ParallelFor(::testing::benchmark::State& state) {
int total = state.range(0);
int cost_per_unit = state.range(1);
ThreadPool pool(Env::Default(), "test", kNumThreads);
std::atomic_int_fast32_t count(state.max_iterations);
mutex done_lock;
bool done_flag = false;
for (auto s : state) {
pool.ParallelFor(
total, cost_per_unit,
[&count, &done_lock, &done_flag](int64_t begin, int64_t end) {
for (int64_t i = begin; i < end; ++i) {
if (count.fetch_sub(1) == 1) {
mutex_lock l(done_lock);
done_flag = true;
}
}
});
mutex_lock l(done_lock);
done_lock.Await(Condition(&done_flag));
}
}
BENCHMARK(BM_ParallelFor)
->ArgPair(1 << 10, 1)
->ArgPair(1 << 20, 1)
->ArgPair(1 << 10, 1 << 10)
->ArgPair(1 << 20, 1 << 10)
->ArgPair(1 << 10, 1 << 20)
->ArgPair(1 << 20, 1 << 20)
->ArgPair(1 << 10, 1 << 30)
->ArgPair(1 << 20, 1 << 30);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/threadpool.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/core/threadpool_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
26622fdc-cf97-44a2-8a8a-26e4c1b2bad4 | cpp | tensorflow/tensorflow | numbers | third_party/xla/third_party/tsl/tsl/platform/numbers.cc | third_party/xla/third_party/tsl/tsl/platform/numbers_test.cc | #include "tsl/platform/numbers.h"
#include <ctype.h>
#include <float.h>
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdint>
#include <locale>
#include <unordered_map>
#include "double-conversion/double-conversion.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/stringprintf.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace {
template <typename T>
const std::unordered_map<std::string, T>* GetSpecialNumsSingleton() {
static const std::unordered_map<std::string, T>* special_nums =
CHECK_NOTNULL((new const std::unordered_map<std::string, T>{
{"inf", std::numeric_limits<T>::infinity()},
{"+inf", std::numeric_limits<T>::infinity()},
{"-inf", -std::numeric_limits<T>::infinity()},
{"infinity", std::numeric_limits<T>::infinity()},
{"+infinity", std::numeric_limits<T>::infinity()},
{"-infinity", -std::numeric_limits<T>::infinity()},
{"nan", std::numeric_limits<T>::quiet_NaN()},
{"+nan", std::numeric_limits<T>::quiet_NaN()},
{"-nan", -std::numeric_limits<T>::quiet_NaN()},
}));
return special_nums;
}
template <typename T>
T locale_independent_strtonum(const char* str, const char** endptr) {
auto special_nums = GetSpecialNumsSingleton<T>();
std::stringstream s(str);
std::string special_num_str;
s >> special_num_str;
for (size_t i = 0; i < special_num_str.length(); ++i) {
special_num_str[i] =
std::tolower(special_num_str[i], std::locale::classic());
}
auto entry = special_nums->find(special_num_str);
if (entry != special_nums->end()) {
*endptr = str + (s.eof() ? static_cast<std::iostream::pos_type>(strlen(str))
: s.tellg());
return entry->second;
} else {
if (special_num_str.compare(0, 2, "0x") == 0 ||
special_num_str.compare(0, 3, "-0x") == 0) {
return strtol(str, const_cast<char**>(endptr), 16);
}
}
s.str(str);
s.clear();
s.imbue(std::locale::classic());
T result;
s >> result;
if (s.fail()) {
if (result == std::numeric_limits<T>::max() ||
result == std::numeric_limits<T>::infinity()) {
result = std::numeric_limits<T>::infinity();
s.clear(s.rdstate() & ~std::ios::failbit);
} else if (result == -std::numeric_limits<T>::max() ||
result == -std::numeric_limits<T>::infinity()) {
result = -std::numeric_limits<T>::infinity();
s.clear(s.rdstate() & ~std::ios::failbit);
}
}
if (endptr) {
*endptr =
str +
(s.fail() ? static_cast<std::iostream::pos_type>(0)
: (s.eof() ? static_cast<std::iostream::pos_type>(strlen(str))
: s.tellg()));
}
return result;
}
static inline const double_conversion::StringToDoubleConverter&
StringToFloatConverter() {
static const double_conversion::StringToDoubleConverter converter(
double_conversion::StringToDoubleConverter::ALLOW_LEADING_SPACES |
double_conversion::StringToDoubleConverter::ALLOW_HEX |
double_conversion::StringToDoubleConverter::ALLOW_TRAILING_SPACES |
double_conversion::StringToDoubleConverter::ALLOW_CASE_INSENSIBILITY,
0., 0., "inf", "nan");
return converter;
}
}
namespace strings {
size_t FastInt32ToBufferLeft(int32_t i, char* buffer) {
uint32_t u = i;
size_t length = 0;
if (i < 0) {
*buffer++ = '-';
++length;
u = 0 - u;
}
length += FastUInt32ToBufferLeft(u, buffer);
return length;
}
size_t FastUInt32ToBufferLeft(uint32_t i, char* buffer) {
char* start = buffer;
do {
*buffer++ = ((i % 10) + '0');
i /= 10;
} while (i > 0);
*buffer = 0;
std::reverse(start, buffer);
return buffer - start;
}
size_t FastInt64ToBufferLeft(int64_t i, char* buffer) {
uint64_t u = i;
size_t length = 0;
if (i < 0) {
*buffer++ = '-';
++length;
u = 0 - u;
}
length += FastUInt64ToBufferLeft(u, buffer);
return length;
}
size_t FastUInt64ToBufferLeft(uint64_t i, char* buffer) {
char* start = buffer;
do {
*buffer++ = ((i % 10) + '0');
i /= 10;
} while (i > 0);
*buffer = 0;
std::reverse(start, buffer);
return buffer - start;
}
static const double kDoublePrecisionCheckMax = DBL_MAX / 1.000000000000001;
size_t DoubleToBuffer(double value, char* buffer) {
static_assert(DBL_DIG < 20, "DBL_DIG is too big");
if (std::isnan(value)) {
int snprintf_result = snprintf(buffer, kFastToBufferSize, "%snan",
std::signbit(value) ? "-" : "");
DCHECK(snprintf_result > 0 && snprintf_result < kFastToBufferSize);
return snprintf_result;
}
if (std::abs(value) <= kDoublePrecisionCheckMax) {
int snprintf_result =
snprintf(buffer, kFastToBufferSize, "%.*g", DBL_DIG, value);
DCHECK(snprintf_result > 0 && snprintf_result < kFastToBufferSize);
if (locale_independent_strtonum<double>(buffer, nullptr) == value) {
return snprintf_result;
}
}
int snprintf_result =
snprintf(buffer, kFastToBufferSize, "%.*g", DBL_DIG + 2, value);
DCHECK(snprintf_result > 0 && snprintf_result < kFastToBufferSize);
return snprintf_result;
}
namespace {
char SafeFirstChar(absl::string_view str) {
if (str.empty()) return '\0';
return str[0];
}
void SkipSpaces(absl::string_view* str) {
while (isspace(SafeFirstChar(*str))) str->remove_prefix(1);
}
}
bool safe_strto64(absl::string_view str, int64_t* value) {
SkipSpaces(&str);
int64_t vlimit = kint64max;
int sign = 1;
if (absl::ConsumePrefix(&str, "-")) {
sign = -1;
vlimit = kint64min;
}
if (!isdigit(SafeFirstChar(str))) return false;
int64_t result = 0;
if (sign == 1) {
do {
int digit = SafeFirstChar(str) - '0';
if ((vlimit - digit) / 10 < result) {
return false;
}
result = result * 10 + digit;
str.remove_prefix(1);
} while (isdigit(SafeFirstChar(str)));
} else {
do {
int digit = SafeFirstChar(str) - '0';
if ((vlimit + digit) / 10 > result) {
return false;
}
result = result * 10 - digit;
str.remove_prefix(1);
} while (isdigit(SafeFirstChar(str)));
}
SkipSpaces(&str);
if (!str.empty()) return false;
*value = result;
return true;
}
bool safe_strtou64(absl::string_view str, uint64_t* value) {
SkipSpaces(&str);
if (!isdigit(SafeFirstChar(str))) return false;
uint64_t result = 0;
do {
int digit = SafeFirstChar(str) - '0';
if ((kuint64max - digit) / 10 < result) {
return false;
}
result = result * 10 + digit;
str.remove_prefix(1);
} while (isdigit(SafeFirstChar(str)));
SkipSpaces(&str);
if (!str.empty()) return false;
*value = result;
return true;
}
bool safe_strto32(absl::string_view str, int32_t* value) {
SkipSpaces(&str);
int64_t vmax = kint32max;
int sign = 1;
if (absl::ConsumePrefix(&str, "-")) {
sign = -1;
++vmax;
}
if (!isdigit(SafeFirstChar(str))) return false;
int64_t result = 0;
do {
result = result * 10 + SafeFirstChar(str) - '0';
if (result > vmax) {
return false;
}
str.remove_prefix(1);
} while (isdigit(SafeFirstChar(str)));
SkipSpaces(&str);
if (!str.empty()) return false;
*value = static_cast<int32_t>(result * sign);
return true;
}
bool safe_strtou32(absl::string_view str, uint32_t* value) {
SkipSpaces(&str);
if (!isdigit(SafeFirstChar(str))) return false;
int64_t result = 0;
do {
result = result * 10 + SafeFirstChar(str) - '0';
if (result > kuint32max) {
return false;
}
str.remove_prefix(1);
} while (isdigit(SafeFirstChar(str)));
SkipSpaces(&str);
if (!str.empty()) return false;
*value = static_cast<uint32_t>(result);
return true;
}
bool safe_strtof(absl::string_view str, float* value) {
int processed_characters_count = -1;
auto len = str.size();
if (len >= kFastToBufferSize) return false;
if (len > std::numeric_limits<int>::max()) return false;
*value = StringToFloatConverter().StringToFloat(
str.data(), static_cast<int>(len), &processed_characters_count);
return processed_characters_count > 0;
}
bool safe_strtod(absl::string_view str, double* value) {
int processed_characters_count = -1;
auto len = str.size();
if (len >= kFastToBufferSize) return false;
if (len > std::numeric_limits<int>::max()) return false;
*value = StringToFloatConverter().StringToDouble(
str.data(), static_cast<int>(len), &processed_characters_count);
return processed_characters_count > 0;
}
size_t FloatToBuffer(float value, char* buffer) {
static_assert(FLT_DIG < 10, "FLT_DIG is too big");
if (std::isnan(value)) {
int snprintf_result = snprintf(buffer, kFastToBufferSize, "%snan",
std::signbit(value) ? "-" : "");
DCHECK(snprintf_result > 0 && snprintf_result < kFastToBufferSize);
return snprintf_result;
}
int snprintf_result =
snprintf(buffer, kFastToBufferSize, "%.*g", FLT_DIG, value);
DCHECK(snprintf_result > 0 && snprintf_result < kFastToBufferSize);
float parsed_value;
if (!safe_strtof(buffer, &parsed_value) || parsed_value != value) {
snprintf_result =
snprintf(buffer, kFastToBufferSize, "%.*g", FLT_DIG + 3, value);
DCHECK(snprintf_result > 0 && snprintf_result < kFastToBufferSize);
}
return snprintf_result;
}
std::string FpToString(Fprint fp) {
char buf[17];
snprintf(buf, sizeof(buf), "%016llx", static_cast<long long>(fp));
return std::string(buf);
}
bool StringToFp(const std::string& s, Fprint* fp) {
char junk;
uint64_t result;
if (sscanf(s.c_str(), "%" SCNx64 "%c", &result, &junk) == 1) {
*fp = result;
return true;
} else {
return false;
}
}
absl::string_view Uint64ToHexString(uint64_t v, char* buf) {
static const char* hexdigits = "0123456789abcdef";
const int num_byte = 16;
buf[num_byte] = '\0';
for (int i = num_byte - 1; i >= 0; i--) {
buf[i] = hexdigits[v & 0xf];
v >>= 4;
}
return absl::string_view(buf, num_byte);
}
bool HexStringToUint64(const absl::string_view& s, uint64_t* result) {
uint64_t v = 0;
if (s.empty()) {
return false;
}
for (size_t i = 0; i < s.size(); i++) {
char c = s[i];
if (c >= '0' && c <= '9') {
v = (v << 4) + (c - '0');
} else if (c >= 'a' && c <= 'f') {
v = (v << 4) + 10 + (c - 'a');
} else if (c >= 'A' && c <= 'F') {
v = (v << 4) + 10 + (c - 'A');
} else {
return false;
}
}
*result = v;
return true;
}
std::string HumanReadableNum(int64_t value) {
std::string s;
if (value < 0) {
s += "-";
value = -value;
}
if (value < 1000) {
Appendf(&s, "%lld", static_cast<long long>(value));
} else if (value >= static_cast<int64_t>(1e15)) {
Appendf(&s, "%0.3G", static_cast<double>(value));
} else {
static const char units[] = "kMBT";
const char* unit = units;
while (value >= static_cast<int64_t>(1000000)) {
value /= static_cast<int64_t>(1000);
++unit;
CHECK(unit < units + TF_ARRAYSIZE(units));
}
Appendf(&s, "%.2f%c", value / 1000.0, *unit);
}
return s;
}
std::string HumanReadableNumBytes(int64_t num_bytes) {
if (num_bytes == kint64min) {
return "-8E";
}
const char* neg_str = (num_bytes < 0) ? "-" : "";
if (num_bytes < 0) {
num_bytes = -num_bytes;
}
if (num_bytes < 1024) {
char buf[8];
snprintf(buf, sizeof(buf), "%s%lldB", neg_str,
static_cast<long long>(num_bytes));
return std::string(buf);
}
static const char units[] = "KMGTPE";
const char* unit = units;
while (num_bytes >= static_cast<int64_t>(1024) * 1024) {
num_bytes /= 1024;
++unit;
CHECK(unit < units + TF_ARRAYSIZE(units));
}
char buf[16];
snprintf(buf, sizeof(buf), ((*unit == 'K') ? "%s%.1f%ciB" : "%s%.2f%ciB"),
neg_str, num_bytes / 1024.0, *unit);
return std::string(buf);
}
std::string HumanReadableElapsedTime(double seconds) {
std::string human_readable;
if (seconds < 0) {
human_readable = "-";
seconds = -seconds;
}
const double microseconds = seconds * 1.0e6;
if (microseconds < 999.5) {
strings::Appendf(&human_readable, "%0.3g us", microseconds);
return human_readable;
}
double milliseconds = seconds * 1e3;
if (milliseconds >= .995 && milliseconds < 1) {
milliseconds = 1.0;
}
if (milliseconds < 999.5) {
strings::Appendf(&human_readable, "%0.3g ms", milliseconds);
return human_readable;
}
if (seconds < 60.0) {
strings::Appendf(&human_readable, "%0.3g s", seconds);
return human_readable;
}
seconds /= 60.0;
if (seconds < 60.0) {
strings::Appendf(&human_readable, "%0.3g min", seconds);
return human_readable;
}
seconds /= 60.0;
if (seconds < 24.0) {
strings::Appendf(&human_readable, "%0.3g h", seconds);
return human_readable;
}
seconds /= 24.0;
if (seconds < 30.0) {
strings::Appendf(&human_readable, "%0.3g days", seconds);
return human_readable;
}
if (seconds < 365.2425) {
strings::Appendf(&human_readable, "%0.3g months", seconds / 30.436875);
return human_readable;
}
seconds /= 365.2425;
strings::Appendf(&human_readable, "%0.3g years", seconds);
return human_readable;
}
}
} | #include "tsl/platform/numbers.h"
#include <cmath>
#include <string>
#include "tsl/platform/test.h"
namespace tsl {
namespace strings {
TEST(FpToString, Ints) {
for (int s = 0; s < 64; s++) {
for (int delta = -1; delta <= 1; delta++) {
uint64 fp = (1ull << s) + delta;
string s = FpToString(fp);
uint64 fp2;
EXPECT_TRUE(StringToFp(s, &fp2));
EXPECT_EQ(fp, fp2);
}
}
Fprint dummy;
EXPECT_FALSE(StringToFp("", &dummy));
EXPECT_FALSE(StringToFp("xyz", &dummy));
EXPECT_FALSE(StringToFp("0000000000000000xyz", &dummy));
}
TEST(Uint64ToHexString, Ints) {
for (int s = 0; s < 64; s++) {
for (int delta = -1; delta <= 1; delta++) {
uint64 fp = (1ull << s) + delta;
char buf[kFastToBufferSize];
absl::string_view s = Uint64ToHexString(fp, buf);
uint64 fp2;
EXPECT_TRUE(HexStringToUint64(s, &fp2));
EXPECT_EQ(fp, fp2) << s;
}
}
uint64 dummy;
EXPECT_FALSE(HexStringToUint64("", &dummy));
EXPECT_FALSE(HexStringToUint64("xyz", &dummy));
EXPECT_FALSE(HexStringToUint64("0000000000000000xyz", &dummy));
}
TEST(HumanReadableNum, Basic) {
EXPECT_EQ(HumanReadableNum(823), "823");
EXPECT_EQ(HumanReadableNum(1024), "1.02k");
EXPECT_EQ(HumanReadableNum(4000), "4.00k");
EXPECT_EQ(HumanReadableNum(999499), "999.50k");
EXPECT_EQ(HumanReadableNum(1000000), "1.00M");
EXPECT_EQ(HumanReadableNum(1048575), "1.05M");
EXPECT_EQ(HumanReadableNum(1048576), "1.05M");
EXPECT_EQ(HumanReadableNum(23956812342), "23.96B");
EXPECT_EQ(HumanReadableNum(123456789012345678), "1.23E+17");
}
TEST(HumanReadableNumBytes, Bytes) {
EXPECT_EQ("0B", HumanReadableNumBytes(0));
EXPECT_EQ("4B", HumanReadableNumBytes(4));
EXPECT_EQ("1023B", HumanReadableNumBytes(1023));
EXPECT_EQ("1.0KiB", HumanReadableNumBytes(1024));
EXPECT_EQ("1.0KiB", HumanReadableNumBytes(1025));
EXPECT_EQ("1.5KiB", HumanReadableNumBytes(1500));
EXPECT_EQ("1.9KiB", HumanReadableNumBytes(1927));
EXPECT_EQ("2.0KiB", HumanReadableNumBytes(2048));
EXPECT_EQ("1.00MiB", HumanReadableNumBytes(1 << 20));
EXPECT_EQ("11.77MiB", HumanReadableNumBytes(12345678));
EXPECT_EQ("1.00GiB", HumanReadableNumBytes(1 << 30));
EXPECT_EQ("1.00TiB", HumanReadableNumBytes(1LL << 40));
EXPECT_EQ("1.00PiB", HumanReadableNumBytes(1LL << 50));
EXPECT_EQ("1.00EiB", HumanReadableNumBytes(1LL << 60));
EXPECT_EQ("-1B", HumanReadableNumBytes(-1));
EXPECT_EQ("-4B", HumanReadableNumBytes(-4));
EXPECT_EQ("-1000B", HumanReadableNumBytes(-1000));
EXPECT_EQ("-11.77MiB", HumanReadableNumBytes(-12345678));
EXPECT_EQ("-8E", HumanReadableNumBytes(kint64min));
}
TEST(HumanReadableElapsedTime, Basic) {
EXPECT_EQ(HumanReadableElapsedTime(-10), "-10 s");
EXPECT_EQ(HumanReadableElapsedTime(-0.001), "-1 ms");
EXPECT_EQ(HumanReadableElapsedTime(-60.0), "-1 min");
EXPECT_EQ(HumanReadableElapsedTime(0.00000001), "0.01 us");
EXPECT_EQ(HumanReadableElapsedTime(0.0000012), "1.2 us");
EXPECT_EQ(HumanReadableElapsedTime(0.0012), "1.2 ms");
EXPECT_EQ(HumanReadableElapsedTime(0.12), "120 ms");
EXPECT_EQ(HumanReadableElapsedTime(1.12), "1.12 s");
EXPECT_EQ(HumanReadableElapsedTime(90.0), "1.5 min");
EXPECT_EQ(HumanReadableElapsedTime(600.0), "10 min");
EXPECT_EQ(HumanReadableElapsedTime(9000.0), "2.5 h");
EXPECT_EQ(HumanReadableElapsedTime(87480.0), "1.01 days");
EXPECT_EQ(HumanReadableElapsedTime(7776000.0), "2.96 months");
EXPECT_EQ(HumanReadableElapsedTime(78840000.0), "2.5 years");
EXPECT_EQ(HumanReadableElapsedTime(382386614.40), "12.1 years");
EXPECT_EQ(HumanReadableElapsedTime(DBL_MAX), "5.7e+300 years");
}
TEST(safe_strto32, Int32s) {
int32 result;
EXPECT_EQ(true, safe_strto32("1", &result));
EXPECT_EQ(1, result);
EXPECT_EQ(true, safe_strto32("123", &result));
EXPECT_EQ(123, result);
EXPECT_EQ(true, safe_strto32(" -123 ", &result));
EXPECT_EQ(-123, result);
EXPECT_EQ(true, safe_strto32("2147483647", &result));
EXPECT_EQ(2147483647, result);
EXPECT_EQ(true, safe_strto32("-2147483648", &result));
EXPECT_EQ(-2147483648, result);
EXPECT_EQ(false, safe_strto32(" 132as ", &result));
EXPECT_EQ(false, safe_strto32(" 132.2 ", &result));
EXPECT_EQ(false, safe_strto32(" -", &result));
EXPECT_EQ(false, safe_strto32("", &result));
EXPECT_EQ(false, safe_strto32(" ", &result));
EXPECT_EQ(false, safe_strto32("123 a", &result));
EXPECT_EQ(false, safe_strto32("2147483648", &result));
EXPECT_EQ(false, safe_strto32("-2147483649", &result));
EXPECT_EQ(true, safe_strto32(absl::string_view("123", 1), &result));
EXPECT_EQ(1, result);
EXPECT_EQ(true, safe_strto32(absl::string_view(" -123", 4), &result));
EXPECT_EQ(-12, result);
EXPECT_EQ(false, safe_strto32(absl::string_view(nullptr, 0), &result));
}
TEST(safe_strtou32, UInt32s) {
uint32 result;
EXPECT_TRUE(safe_strtou32("0", &result));
EXPECT_EQ(0, result);
EXPECT_TRUE(safe_strtou32("1", &result));
EXPECT_EQ(1, result);
EXPECT_TRUE(safe_strtou32("123", &result));
EXPECT_EQ(123, result);
EXPECT_TRUE(safe_strtou32("4294967295", &result));
EXPECT_EQ(4294967295, result);
EXPECT_FALSE(safe_strtou32(" 132as ", &result));
EXPECT_FALSE(safe_strtou32(" 132.2 ", &result));
EXPECT_FALSE(safe_strtou32(" -", &result));
EXPECT_FALSE(safe_strtou32("", &result));
EXPECT_FALSE(safe_strtou32(" ", &result));
EXPECT_FALSE(safe_strtou32("123 a", &result));
EXPECT_FALSE(safe_strtou32("123 456", &result));
EXPECT_FALSE(safe_strtou32("4294967296", &result));
EXPECT_FALSE(safe_strtou32("-1", &result));
EXPECT_TRUE(safe_strtou32(absl::string_view("123", 1), &result));
EXPECT_EQ(1, result);
EXPECT_TRUE(safe_strtou32(absl::string_view(" 123", 3), &result));
EXPECT_EQ(12, result);
EXPECT_FALSE(safe_strtou32(absl::string_view(nullptr, 0), &result));
}
TEST(safe_strto64, Int64s) {
int64 result;
EXPECT_EQ(true, safe_strto64("1", &result));
EXPECT_EQ(1, result);
EXPECT_EQ(true, safe_strto64("123", &result));
EXPECT_EQ(123, result);
EXPECT_EQ(true, safe_strto64(" -123 ", &result));
EXPECT_EQ(-123, result);
EXPECT_EQ(true, safe_strto64("9223372036854775807", &result));
EXPECT_EQ(9223372036854775807, result);
EXPECT_EQ(true, safe_strto64("-9223372036854775808", &result));
EXPECT_EQ(kint64min, result);
EXPECT_EQ(false, safe_strto64(" 132as ", &result));
EXPECT_EQ(false, safe_strto64(" 132.2 ", &result));
EXPECT_EQ(false, safe_strto64(" -", &result));
EXPECT_EQ(false, safe_strto64("", &result));
EXPECT_EQ(false, safe_strto64(" ", &result));
EXPECT_EQ(false, safe_strto64("123 a", &result));
EXPECT_EQ(false, safe_strto64("9223372036854775808", &result));
EXPECT_EQ(false, safe_strto64("-9223372036854775809", &result));
EXPECT_EQ(true, safe_strto64(absl::string_view("123", 1), &result));
EXPECT_EQ(1, result);
EXPECT_EQ(true, safe_strto64(absl::string_view(" -123", 4), &result));
EXPECT_EQ(-12, result);
EXPECT_EQ(false, safe_strto64(absl::string_view(nullptr, 0), &result));
}
TEST(safe_strtou64, UInt64s) {
uint64 result;
EXPECT_TRUE(safe_strtou64("0", &result));
EXPECT_EQ(0, result);
EXPECT_TRUE(safe_strtou64("1", &result));
EXPECT_EQ(1, result);
EXPECT_TRUE(safe_strtou64("123", &result));
EXPECT_EQ(123, result);
EXPECT_TRUE(safe_strtou64(" 345 ", &result));
EXPECT_EQ(345, result);
EXPECT_TRUE(safe_strtou64("18446744073709551615", &result));
EXPECT_EQ(18446744073709551615UL, result);
EXPECT_FALSE(safe_strtou64(" 132.2 ", &result));
EXPECT_FALSE(safe_strtou64(" 132.2 ", &result));
EXPECT_FALSE(safe_strtou64(" -", &result));
EXPECT_FALSE(safe_strtou64("", &result));
EXPECT_FALSE(safe_strtou64(" ", &result));
EXPECT_FALSE(safe_strtou64("123 a", &result));
EXPECT_FALSE(safe_strtou64("123 456", &result));
EXPECT_FALSE(safe_strtou64("18446744073709551616", &result));
EXPECT_FALSE(safe_strtou64("-1", &result));
EXPECT_TRUE(safe_strtou64(absl::string_view("123", 1), &result));
EXPECT_EQ(1, result);
EXPECT_TRUE(safe_strtou64(absl::string_view(" 123", 3), &result));
EXPECT_EQ(12, result);
EXPECT_FALSE(safe_strtou64(absl::string_view(nullptr, 0), &result));
}
TEST(safe_strtof, Float) {
float result = 0;
EXPECT_TRUE(safe_strtof("0.123456", &result));
EXPECT_EQ(0.123456f, result);
EXPECT_FALSE(safe_strtof("0.12345abc", &result));
EXPECT_TRUE(safe_strtof("1e39", &result));
EXPECT_EQ(std::numeric_limits<float>::infinity(), result);
EXPECT_TRUE(safe_strtof("-1e39", &result));
EXPECT_EQ(-std::numeric_limits<float>::infinity(), result);
EXPECT_TRUE(safe_strtof("1e-50", &result));
EXPECT_EQ(0, result);
EXPECT_TRUE(safe_strtof("0xF", &result));
EXPECT_EQ(0xF, result);
EXPECT_TRUE(safe_strtof("-0x2A", &result));
EXPECT_EQ(-42.0f, result);
EXPECT_TRUE(safe_strtof(" -0x2", &result));
EXPECT_EQ(-2.0f, result);
EXPECT_TRUE(safe_strtof("8 \t", &result));
EXPECT_EQ(8.0f, result);
EXPECT_TRUE(safe_strtof("\t20.0\t ", &result));
EXPECT_EQ(20.0f, result);
EXPECT_FALSE(safe_strtof("-infinity is awesome", &result));
char test_str[2 * kFastToBufferSize];
for (int i = 0; i < 2 * kFastToBufferSize; ++i) test_str[i] = 'a';
test_str[kFastToBufferSize + 1] = '\0';
EXPECT_FALSE(safe_strtof(test_str, &result));
EXPECT_TRUE(safe_strtof("-inf", &result));
EXPECT_EQ(-std::numeric_limits<float>::infinity(), result);
EXPECT_TRUE(safe_strtof("+inf", &result));
EXPECT_EQ(std::numeric_limits<float>::infinity(), result);
EXPECT_TRUE(safe_strtof("InF", &result));
EXPECT_EQ(std::numeric_limits<float>::infinity(), result);
EXPECT_TRUE(safe_strtof("-INF", &result));
EXPECT_EQ(-std::numeric_limits<float>::infinity(), result);
EXPECT_TRUE(safe_strtof("nan", &result));
EXPECT_TRUE(std::isnan(result));
EXPECT_TRUE(safe_strtof("-nan", &result));
EXPECT_TRUE(std::isnan(result));
EXPECT_TRUE(safe_strtof("-NaN", &result));
EXPECT_TRUE(std::isnan(result));
EXPECT_TRUE(safe_strtof("+NAN", &result));
EXPECT_TRUE(std::isnan(result));
}
TEST(safe_strtod, Double) {
double result = 0;
EXPECT_TRUE(safe_strtod("0.1234567890123", &result));
EXPECT_EQ(0.1234567890123, result);
EXPECT_FALSE(safe_strtod("0.1234567890123abc", &result));
char test_str[2 * kFastToBufferSize];
for (int i = 0; i < 2 * kFastToBufferSize; ++i) test_str[i] = 'a';
test_str[kFastToBufferSize + 1] = '\0';
EXPECT_FALSE(safe_strtod(test_str, &result));
EXPECT_TRUE(safe_strtod("1e310", &result));
EXPECT_EQ(std::numeric_limits<double>::infinity(), result);
EXPECT_TRUE(safe_strtod("-1e310", &result));
EXPECT_EQ(-std::numeric_limits<double>::infinity(), result);
EXPECT_TRUE(safe_strtod("1e-325", &result));
EXPECT_EQ(0, result);
EXPECT_TRUE(safe_strtod(" -0x1c", &result));
EXPECT_EQ(-28.0, result);
EXPECT_TRUE(safe_strtod("50 \t", &result));
EXPECT_EQ(50.0, result);
EXPECT_TRUE(safe_strtod("\t82.0\t ", &result));
EXPECT_EQ(82.0, result);
EXPECT_FALSE(safe_strtod("infinity", &result));
EXPECT_TRUE(safe_strtod("-inf", &result));
EXPECT_EQ(-std::numeric_limits<double>::infinity(), result);
EXPECT_TRUE(safe_strtod("+inf", &result));
EXPECT_EQ(std::numeric_limits<double>::infinity(), result);
EXPECT_TRUE(safe_strtod("InF", &result));
EXPECT_EQ(std::numeric_limits<double>::infinity(), result);
EXPECT_TRUE(safe_strtod("-INF", &result));
EXPECT_EQ(-std::numeric_limits<double>::infinity(), result);
EXPECT_TRUE(safe_strtod("nan", &result));
EXPECT_TRUE(std::isnan(result));
EXPECT_TRUE(safe_strtod("-nan", &result));
EXPECT_TRUE(std::isnan(result));
EXPECT_TRUE(safe_strtod("-NaN", &result));
EXPECT_TRUE(std::isnan(result));
EXPECT_TRUE(safe_strtod("+NAN", &result));
EXPECT_TRUE(std::isnan(result));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/numbers.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/numbers_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b22a5836-c249-40c3-848e-f8cb6f761433 | cpp | tensorflow/tensorflow | random | tensorflow/compiler/tf2xla/lib/random.cc | third_party/xla/third_party/tsl/tsl/platform/random_test.cc | #include "tensorflow/compiler/tf2xla/lib/random.h"
#include <cmath>
#include <limits>
#include "xla/client/lib/constants.h"
#include "xla/client/lib/math.h"
#include "xla/client/xla_builder.h"
#include "xla/xla_data.pb.h"
namespace tensorflow {
xla::XlaOp TruncatedNormal(xla::XlaOp uniform) {
const double kA = -2.0;
const double kB = 2.0;
const double kMu = 0.0;
const double kSigma = 1.0;
return ParameterizedTruncatedNormal(
uniform, xla::ScalarLike(uniform, kMu), xla::ScalarLike(uniform, kSigma),
xla::ScalarLike(uniform, kA), xla::ScalarLike(uniform, kB));
}
xla::XlaOp ParameterizedTruncatedNormal(xla::XlaOp uniform, xla::XlaOp mu,
xla::XlaOp sigma, xla::XlaOp a,
xla::XlaOp b) {
xla::XlaOp one = xla::ScalarLike(uniform, 1.0);
xla::XlaOp two = xla::ScalarLike(uniform, 2.0);
xla::XlaOp sqrt_2 = xla::ScalarLike(uniform, std::sqrt(2.0));
auto normal_cdf = [&](xla::XlaOp x) {
return (one + xla::Erf(x / sqrt_2)) / two;
};
xla::XlaOp alpha = (a - mu) / sigma;
xla::XlaOp beta = (b - mu) / sigma;
xla::XlaOp alpha_normal_cdf = normal_cdf(alpha);
xla::XlaOp beta_normal_cdf = normal_cdf(beta);
xla::XlaOp p =
alpha_normal_cdf + (beta_normal_cdf - alpha_normal_cdf) * uniform;
xla::XlaOp v = two * p - one;
xla::PrimitiveType primitive_type =
uniform.builder()->GetShape(uniform).value().element_type();
xla::XlaOp epsilon = xla::Epsilon(uniform.builder(), primitive_type);
v = xla::Clamp(-one + epsilon, v, one - epsilon);
xla::XlaOp x = mu + sigma * sqrt_2 * xla::ErfInv(v);
x = xla::Clamp(a, x, b);
return x;
}
} | #include "tsl/platform/random.h"
#include <set>
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace random {
namespace {
TEST(New64Test, SanityCheck) {
std::set<uint64> values;
for (int i = 0; i < 1000000; i++) {
uint64 x = New64();
EXPECT_TRUE(values.insert(x).second) << "duplicate " << x;
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/lib/random.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/random_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
19afc211-8c06-4f02-9ab1-ee2b82b6cba1 | cpp | tensorflow/tensorflow | strcat | third_party/xla/third_party/tsl/tsl/platform/strcat.cc | third_party/xla/third_party/tsl/tsl/platform/strcat_test.cc | #include "tsl/platform/strcat.h"
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <algorithm>
#include "absl/meta/type_traits.h"
#include "tsl/platform/logging.h"
namespace tsl {
namespace strings {
AlphaNum::AlphaNum(Hex hex) {
char *const end = &digits_[kFastToBufferSize];
char *writer = end;
uint64 value = hex.value;
uint64 width = hex.spec;
uint64 mask = (static_cast<uint64>(1) << (width - 1) * 4) | value;
static const char hexdigits[] = "0123456789abcdef";
do {
*--writer = hexdigits[value & 0xF];
value >>= 4;
mask >>= 4;
} while (mask != 0);
piece_ = absl::string_view(writer, end - writer);
}
static char *Append1(char *out, const AlphaNum &x) {
if (x.data() == nullptr) return out;
memcpy(out, x.data(), x.size());
return out + x.size();
}
static char *Append2(char *out, const AlphaNum &x1, const AlphaNum &x2) {
if (x1.data() != nullptr) {
memcpy(out, x1.data(), x1.size());
out += x1.size();
}
if (x2.data() == nullptr) return out;
memcpy(out, x2.data(), x2.size());
return out + x2.size();
}
static char *Append4(char *out, const AlphaNum &x1, const AlphaNum &x2,
const AlphaNum &x3, const AlphaNum &x4) {
if (x1.data() != nullptr) {
memcpy(out, x1.data(), x1.size());
out += x1.size();
}
if (x2.data() != nullptr) {
memcpy(out, x2.data(), x2.size());
out += x2.size();
}
if (x3.data() != nullptr) {
memcpy(out, x3.data(), x3.size());
out += x3.size();
}
if (x4.data() == nullptr) return out;
memcpy(out, x4.data(), x4.size());
return out + x4.size();
}
string StrCat(const AlphaNum &a) { return string(a.data(), a.size()); }
string StrCat(const AlphaNum &a, const AlphaNum &b) {
string result(a.size() + b.size(), '\0');
char *const begin = &*result.begin();
char *out = Append2(begin, a, b);
DCHECK_EQ(out, begin + result.size());
return result;
}
string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c) {
string result(a.size() + b.size() + c.size(), '\0');
char *const begin = &*result.begin();
char *out = Append2(begin, a, b);
out = Append1(out, c);
DCHECK_EQ(out, begin + result.size());
return result;
}
string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c,
const AlphaNum &d) {
string result(a.size() + b.size() + c.size() + d.size(), '\0');
char *const begin = &*result.begin();
char *out = Append4(begin, a, b, c, d);
DCHECK_EQ(out, begin + result.size());
return result;
}
namespace {
template <typename string_type, typename = void>
struct ResizeUninitializedTraits {
using HasMember = std::false_type;
static void Resize(string_type *s, size_t new_size) { s->resize(new_size); }
};
template <typename string_type>
struct ResizeUninitializedTraits<
string_type, absl::void_t<decltype(std::declval<string_type &>()
.__resize_default_init(237))> > {
using HasMember = std::true_type;
static void Resize(string_type *s, size_t new_size) {
s->__resize_default_init(new_size);
}
};
static inline void STLStringResizeUninitialized(string *s, size_t new_size) {
ResizeUninitializedTraits<string>::Resize(s, new_size);
}
template <typename string_type>
void STLStringReserveAmortized(string_type *s, size_t new_size) {
const size_t cap = s->capacity();
if (new_size > cap) {
s->reserve((std::max)(new_size, 2 * cap));
}
}
template <typename string_type>
void STLStringResizeUninitializedAmortized(string_type *s, size_t new_size) {
STLStringReserveAmortized(s, new_size);
STLStringResizeUninitialized(s, new_size);
}
}
namespace internal {
string CatPieces(std::initializer_list<absl::string_view> pieces) {
size_t total_size = 0;
for (const absl::string_view piece : pieces) total_size += piece.size();
string result(total_size, '\0');
char *const begin = &*result.begin();
char *out = begin;
for (const absl::string_view piece : pieces) {
const size_t this_size = piece.size();
memcpy(out, piece.data(), this_size);
out += this_size;
}
DCHECK_EQ(out, begin + result.size());
return result;
}
#define DCHECK_NO_OVERLAP(dest, src) \
DCHECK_GE(uintptr_t((src).data() - (dest).data()), uintptr_t((dest).size()))
void AppendPieces(string *result,
std::initializer_list<absl::string_view> pieces) {
size_t old_size = result->size();
size_t total_size = old_size;
for (const absl::string_view piece : pieces) {
DCHECK_NO_OVERLAP(*result, piece);
total_size += piece.size();
}
STLStringResizeUninitializedAmortized(result, total_size);
char *const begin = &*result->begin();
char *out = begin + old_size;
for (const absl::string_view piece : pieces) {
const size_t this_size = piece.size();
memcpy(out, piece.data(), this_size);
out += this_size;
}
DCHECK_EQ(out, begin + result->size());
}
}
void StrAppend(string *result, const AlphaNum &a) {
DCHECK_NO_OVERLAP(*result, a);
result->append(a.data(), a.size());
}
void StrAppend(string *result, const AlphaNum &a, const AlphaNum &b) {
DCHECK_NO_OVERLAP(*result, a);
DCHECK_NO_OVERLAP(*result, b);
string::size_type old_size = result->size();
STLStringResizeUninitializedAmortized(result, old_size + a.size() + b.size());
char *const begin = &*result->begin();
char *out = Append2(begin + old_size, a, b);
DCHECK_EQ(out, begin + result->size());
}
void StrAppend(string *result, const AlphaNum &a, const AlphaNum &b,
const AlphaNum &c) {
DCHECK_NO_OVERLAP(*result, a);
DCHECK_NO_OVERLAP(*result, b);
DCHECK_NO_OVERLAP(*result, c);
string::size_type old_size = result->size();
STLStringResizeUninitializedAmortized(
result, old_size + a.size() + b.size() + c.size());
char *const begin = &*result->begin();
char *out = Append2(begin + old_size, a, b);
out = Append1(out, c);
DCHECK_EQ(out, begin + result->size());
}
void StrAppend(string *result, const AlphaNum &a, const AlphaNum &b,
const AlphaNum &c, const AlphaNum &d) {
DCHECK_NO_OVERLAP(*result, a);
DCHECK_NO_OVERLAP(*result, b);
DCHECK_NO_OVERLAP(*result, c);
DCHECK_NO_OVERLAP(*result, d);
string::size_type old_size = result->size();
STLStringResizeUninitializedAmortized(
result, old_size + a.size() + b.size() + c.size() + d.size());
char *const begin = &*result->begin();
char *out = Append4(begin + old_size, a, b, c, d);
DCHECK_EQ(out, begin + result->size());
}
}
} | #include "tsl/platform/strcat.h"
#include <string>
#include "absl/strings/string_view.h"
#include "tsl/platform/stringprintf.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
#ifdef _MSC_VER
typedef ptrdiff_t ssize_t;
#endif
namespace tsl {
namespace strings {
TEST(StrCat, Ints) {
const int16_t s = -1;
const uint16 us = 2;
const int i = -3;
const unsigned int ui = 4;
const int32_t l = -5;
const uint32 ul = 6;
const int64_t ll = -7;
const uint64 ull = 8;
const ptrdiff_t ptrdiff = -9;
const size_t size = 10;
const ssize_t ssize = -11;
const intptr_t intptr = -12;
const uintptr_t uintptr = 13;
string answer;
answer = StrCat(s, us);
EXPECT_EQ(answer, "-12");
answer = StrCat(i, ui);
EXPECT_EQ(answer, "-34");
answer = StrCat(l, ul);
EXPECT_EQ(answer, "-56");
answer = StrCat(ll, ull);
EXPECT_EQ(answer, "-78");
answer = StrCat(ptrdiff, size);
EXPECT_EQ(answer, "-910");
answer = StrCat(ssize, intptr);
EXPECT_EQ(answer, "-11-12");
answer = StrCat(uintptr, 0);
EXPECT_EQ(answer, "130");
}
TEST(StrCat, Floats) {
const int s = 0;
const float f = 1.5f;
const double d = 1.5;
const bfloat16 bf(1.5f);
string answer;
answer = StrCat(s, f);
EXPECT_EQ(answer, "01.5");
answer = StrCat(s, d);
EXPECT_EQ(answer, "01.5");
answer = StrCat(s, bf);
EXPECT_EQ(answer, "01.5");
}
TEST(StrCat, Nulls) {
string result;
absl::string_view v;
string strs[] = {"Hello", "Cruel", "World"};
result = StrCat(v);
EXPECT_EQ(result, "");
result = StrCat(strs[0], v);
EXPECT_EQ(result, "Hello");
result = StrCat(v, strs[0]);
EXPECT_EQ(result, "Hello");
result = StrCat(v, strs[0], strs[1]);
EXPECT_EQ(result, "HelloCruel");
result = StrCat(strs[0], v, strs[1]);
EXPECT_EQ(result, "HelloCruel");
result = StrCat(strs[0], strs[1], v);
EXPECT_EQ(result, "HelloCruel");
result = StrCat(v, strs[0], strs[1], strs[2]);
EXPECT_EQ(result, "HelloCruelWorld");
result = StrCat(strs[0], v, strs[1], strs[2]);
EXPECT_EQ(result, "HelloCruelWorld");
result = StrCat(strs[0], strs[1], v, strs[2]);
EXPECT_EQ(result, "HelloCruelWorld");
result = StrCat(strs[0], strs[1], strs[2], v);
EXPECT_EQ(result, "HelloCruelWorld");
}
TEST(StrCat, Basics) {
string result;
string strs[] = {"Hello", "Cruel", "World"};
absl::string_view pieces[] = {"Hello", "Cruel", "World"};
const char *c_strs[] = {"Hello", "Cruel", "World"};
int32 i32s[] = {'H', 'C', 'W'};
uint64 ui64s[] = {12345678910LL, 10987654321LL};
result = StrCat(false, true, 2, 3);
EXPECT_EQ(result, "0123");
result = StrCat(-1);
EXPECT_EQ(result, "-1");
result = StrCat(0.5);
EXPECT_EQ(result, "0.5");
result = StrCat(strs[1], pieces[2]);
EXPECT_EQ(result, "CruelWorld");
result = StrCat(strs[0], ", ", pieces[2]);
EXPECT_EQ(result, "Hello, World");
result = StrCat(strs[0], ", ", strs[1], " ", strs[2], "!");
EXPECT_EQ(result, "Hello, Cruel World!");
result = StrCat(pieces[0], ", ", pieces[1], " ", pieces[2]);
EXPECT_EQ(result, "Hello, Cruel World");
result = StrCat(c_strs[0], ", ", c_strs[1], " ", c_strs[2]);
EXPECT_EQ(result, "Hello, Cruel World");
result = StrCat("ASCII ", i32s[0], ", ", i32s[1], " ", i32s[2], "!");
EXPECT_EQ(result, "ASCII 72, 67 87!");
result = StrCat(ui64s[0], ", ", ui64s[1], "!");
EXPECT_EQ(result, "12345678910, 10987654321!");
string one = "1";
result = StrCat("And a ", one.size(), " and a ", &result[2] - &result[0],
" and a ", one, " 2 3 4", "!");
EXPECT_EQ(result, "And a 1 and a 2 and a 1 2 3 4!");
result = StrCat("To output a char by ASCII/numeric value, use +: ", '!' + 0);
EXPECT_EQ(result, "To output a char by ASCII/numeric value, use +: 33");
float f = 100000.5;
result = StrCat("A hundred K and a half is ", f);
EXPECT_EQ(result, "A hundred K and a half is 100000.5");
double d = f;
d *= d;
result = StrCat("A hundred K and a half squared is ", d);
EXPECT_EQ(result, "A hundred K and a half squared is 10000100000.25");
result = StrCat(1, 2, 333, 4444, 55555, 666666, 7777777, 88888888, 999999999);
EXPECT_EQ(result, "12333444455555666666777777788888888999999999");
}
TEST(StrCat, MaxArgs) {
string result;
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a");
EXPECT_EQ(result, "123456789a");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b");
EXPECT_EQ(result, "123456789ab");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c");
EXPECT_EQ(result, "123456789abc");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d");
EXPECT_EQ(result, "123456789abcd");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e");
EXPECT_EQ(result, "123456789abcde");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f");
EXPECT_EQ(result, "123456789abcdef");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g");
EXPECT_EQ(result, "123456789abcdefg");
result =
StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h");
EXPECT_EQ(result, "123456789abcdefgh");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i");
EXPECT_EQ(result, "123456789abcdefghi");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j");
EXPECT_EQ(result, "123456789abcdefghij");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k");
EXPECT_EQ(result, "123456789abcdefghijk");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l");
EXPECT_EQ(result, "123456789abcdefghijkl");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m");
EXPECT_EQ(result, "123456789abcdefghijklm");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m", "n");
EXPECT_EQ(result, "123456789abcdefghijklmn");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m", "n", "o");
EXPECT_EQ(result, "123456789abcdefghijklmno");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m", "n", "o", "p");
EXPECT_EQ(result, "123456789abcdefghijklmnop");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m", "n", "o", "p", "q");
EXPECT_EQ(result, "123456789abcdefghijklmnopq");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "a", "b", "c", "d", "e", "f",
"g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r",
"s", "t", "u", "v", "w", "x", "y", "z", "A", "B", "C", "D",
"E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P",
"Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z");
EXPECT_EQ(result,
"12345678910abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ");
}
TEST(StrAppend, Basics) {
string result = "existing text";
string strs[] = {"Hello", "Cruel", "World"};
absl::string_view pieces[] = {"Hello", "Cruel", "World"};
const char *c_strs[] = {"Hello", "Cruel", "World"};
int32 i32s[] = {'H', 'C', 'W'};
uint64 ui64s[] = {12345678910LL, 10987654321LL};
string::size_type old_size = result.size();
StrAppend(&result, strs[0]);
EXPECT_EQ(result.substr(old_size), "Hello");
old_size = result.size();
StrAppend(&result, strs[1], pieces[2]);
EXPECT_EQ(result.substr(old_size), "CruelWorld");
old_size = result.size();
StrAppend(&result, strs[0], ", ", pieces[2]);
EXPECT_EQ(result.substr(old_size), "Hello, World");
old_size = result.size();
StrAppend(&result, strs[0], ", ", strs[1], " ", strs[2], "!");
EXPECT_EQ(result.substr(old_size), "Hello, Cruel World!");
old_size = result.size();
StrAppend(&result, pieces[0], ", ", pieces[1], " ", pieces[2]);
EXPECT_EQ(result.substr(old_size), "Hello, Cruel World");
old_size = result.size();
StrAppend(&result, c_strs[0], ", ", c_strs[1], " ", c_strs[2]);
EXPECT_EQ(result.substr(old_size), "Hello, Cruel World");
old_size = result.size();
StrAppend(&result, "ASCII ", i32s[0], ", ", i32s[1], " ", i32s[2], "!");
EXPECT_EQ(result.substr(old_size), "ASCII 72, 67 87!");
old_size = result.size();
StrAppend(&result, ui64s[0], ", ", ui64s[1], "!");
EXPECT_EQ(result.substr(old_size), "12345678910, 10987654321!");
string one = "1";
old_size = result.size();
StrAppend(&result, "And a ", one.size(), " and a ", &result[2] - &result[0],
" and a ", one, " 2 3 4", "!");
EXPECT_EQ(result.substr(old_size), "And a 1 and a 2 and a 1 2 3 4!");
old_size = result.size();
StrAppend(&result,
"To output a char by ASCII/numeric value, use +: ", '!' + 0);
EXPECT_EQ(result.substr(old_size),
"To output a char by ASCII/numeric value, use +: 33");
float f = 100000.5;
old_size = result.size();
StrAppend(&result, "A hundred K and a half is ", f);
EXPECT_EQ(result.substr(old_size), "A hundred K and a half is 100000.5");
double d = f;
d *= d;
old_size = result.size();
StrAppend(&result, "A hundred K and a half squared is ", d);
EXPECT_EQ(result.substr(old_size),
"A hundred K and a half squared is 10000100000.25");
old_size = result.size();
StrAppend(&result, 1, 22, 333, 4444, 55555, 666666, 7777777, 88888888, 9);
EXPECT_EQ(result.substr(old_size), "1223334444555556666667777777888888889");
old_size = result.size();
StrAppend(&result, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "a", "b", "c", "d", "e",
"f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r",
"s", "t", "u", "v", "w", "x", "y", "z", "A", "B", "C", "D", "E",
"F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R",
"S", "T", "U", "V", "W", "X", "Y", "Z",
"No limit thanks to C++11's variadic templates");
EXPECT_EQ(result.substr(old_size),
"12345678910abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
"No limit thanks to C++11's variadic templates");
}
TEST(StrAppend, Death) {
string s = "self";
EXPECT_DEBUG_DEATH(StrAppend(&s, s.c_str() + 1), "Check failed:");
EXPECT_DEBUG_DEATH(StrAppend(&s, s), "Check failed:");
}
static void CheckHex64(uint64 v) {
string actual = StrCat(Hex(v, kZeroPad16));
string expected = Printf("%016llx", static_cast<unsigned long long>(v));
EXPECT_EQ(expected, actual) << " decimal value " << v;
actual = StrCat(Hex(v, kZeroPad8));
expected = Printf("%08llx", static_cast<unsigned long long>(v));
EXPECT_EQ(expected, actual) << " decimal value " << v;
actual = StrCat(Hex(v));
expected = Printf("%llx", static_cast<unsigned long long>(v));
EXPECT_EQ(expected, actual) << " decimal value " << v;
}
static void CheckHex32(uint32 v) {
string actual = StrCat(Hex(v, kZeroPad8));
string expected = Printf("%08x", v);
EXPECT_EQ(expected, actual) << " decimal value " << v;
actual = StrCat(Hex(v));
expected = Printf("%x", v);
EXPECT_EQ(expected, actual) << " decimal value " << v;
}
static void CheckHexSigned32(int32_t v) {
string actual = StrCat(Hex(v, kZeroPad8));
string expected = Printf("%08x", v);
EXPECT_EQ(expected, actual) << " decimal value " << v;
actual = StrCat(Hex(v));
expected = Printf("%x", v);
EXPECT_EQ(expected, actual) << " decimal value " << v;
}
static void TestFastPrints() {
for (int i = 0; i < 10000; i++) {
CheckHex64(i);
CheckHex32(i);
CheckHexSigned32(i);
CheckHexSigned32(-i);
}
CheckHex64(0x123456789abcdef0ull);
CheckHex32(0x12345678);
int8_t minus_one_8bit = -1;
EXPECT_EQ("ff", StrCat(Hex(minus_one_8bit)));
int16_t minus_one_16bit = -1;
EXPECT_EQ("ffff", StrCat(Hex(minus_one_16bit)));
}
TEST(Numbers, TestFunctionsMovedOverFromNumbersMain) { TestFastPrints(); }
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/strcat.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/strcat_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
Subsets and Splits